Skip to content

Commit

Permalink
Add Ollama as LLM provider option (#494)
Browse files Browse the repository at this point in the history
* Add support for Ollama as LLM provider
resolves #493
  • Loading branch information
timothycarambat authored Dec 28, 2023
1 parent 24227e4 commit e0a0a89
Show file tree
Hide file tree
Showing 15 changed files with 486 additions and 6 deletions.
1 change: 1 addition & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
{
"cSpell.words": [
"Ollama",
"openai",
"Qdrant",
"Weaviate"
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ Some cool features of AnythingLLM
- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service)
- [Anthropic ClaudeV2](https://www.anthropic.com/)
- [Google Gemini Pro](https://ai.google.dev/)
- [Ollama (chat models)](https://ollama.ai/)
- [LM Studio (all models)](https://lmstudio.ai)
- [LocalAi (all models)](https://localai.io/)

Expand Down
5 changes: 5 additions & 0 deletions docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ GID='1000'
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
# LOCAL_AI_API_KEY="sk-123abc"

# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096

###########################################
######## Embedding API SElECTION ##########
###########################################
Expand Down
120 changes: 120 additions & 0 deletions frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import { useEffect, useState } from "react";
import System from "@/models/system";

export default function OllamaLLMOptions({ settings }) {
const [basePathValue, setBasePathValue] = useState(
settings?.OllamaLLMBasePath
);
const [basePath, setBasePath] = useState(settings?.OllamaLLMBasePath);

return (
<div className="w-full flex flex-col gap-y-4">
<div className="w-full flex items-center gap-4">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Ollama Base URL
</label>
<input
type="url"
name="OllamaLLMBasePath"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="http://127.0.0.1:11434"
defaultValue={settings?.OllamaLLMBasePath}
required={true}
autoComplete="off"
spellCheck={false}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
/>
</div>
<OllamaLLMModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
</label>
<input
type="number"
name="OllamaLLMTokenLimit"
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="4096"
min={1}
onScroll={(e) => e.target.blur()}
defaultValue={settings?.OllamaLLMTokenLimit}
required={true}
autoComplete="off"
/>
</div>
</div>
</div>
);
}

function OllamaLLMModelSelection({ settings, basePath = null }) {
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);

useEffect(() => {
async function findCustomModels() {
if (!basePath) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels("ollama", null, basePath);
setCustomModels(models || []);
setLoading(false);
}
findCustomModels();
}, [basePath]);

if (loading || customModels.length == 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="OllamaLLMModelPref"
disabled={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
{!!basePath
? "-- loading available models --"
: "-- waiting for URL --"}
</option>
</select>
</div>
);
}

return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="OllamaLLMModelPref"
required={true}
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{customModels.length > 0 && (
<optgroup label="Your loaded models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={settings.OllamaLLMModelPref === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
</select>
</div>
);
}
Binary file added frontend/src/media/llmprovider/ollama.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
14 changes: 14 additions & 0 deletions frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import PreLoader from "@/components/Preloader";
Expand All @@ -19,6 +20,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";

export default function GeneralLLMPreference() {
const [saving, setSaving] = useState(false);
Expand Down Expand Up @@ -163,6 +165,15 @@ export default function GeneralLLMPreference() {
image={LocalAiLogo}
onClick={updateLLMChoice}
/>
<LLMProviderOption
name="Ollama"
value="ollama"
link="ollama.ai"
description="Run LLMs locally on your own machine."
checked={llmChoice === "ollama"}
image={OllamaLogo}
onClick={updateLLMChoice}
/>
{!window.location.hostname.includes("useanything.com") && (
<LLMProviderOption
name="Custom Llama Model"
Expand Down Expand Up @@ -193,6 +204,9 @@ export default function GeneralLLMPreference() {
{llmChoice === "localai" && (
<LocalAiOptions settings={settings} showAlert={true} />
)}
{llmChoice === "ollama" && (
<OllamaLLMOptions settings={settings} />
)}
{llmChoice === "native" && (
<NativeLLMOptions settings={settings} />
)}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import ChromaLogo from "@/media/vectordbs/chroma.png";
Expand Down Expand Up @@ -61,6 +62,13 @@ const LLM_SELECTION_PRIVACY = {
],
logo: LocalAiLogo,
},
ollama: {
name: "Ollama",
description: [
"Your model and chats are only accessible on the machine running Ollama models",
],
logo: OllamaLogo,
},
native: {
name: "Custom Llama Model",
description: [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png";
import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import System from "@/models/system";
Expand All @@ -16,6 +17,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";

function LLMSelection({ nextStep, prevStep, currentStep }) {
const [llmChoice, setLLMChoice] = useState("openai");
Expand Down Expand Up @@ -124,13 +126,24 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
onClick={updateLLMChoice}
/>
<LLMProviderOption
name="Custom Llama Model"
value="native"
description="Use a downloaded custom Llama model for chatting on this AnythingLLM instance."
checked={llmChoice === "native"}
image={AnythingLLMIcon}
name="Ollama"
value="ollama"
link="ollama.ai"
description="Run LLMs locally on your own machine."
checked={llmChoice === "ollama"}
image={OllamaLogo}
onClick={updateLLMChoice}
/>
{!window.location.hostname.includes("useanything.com") && (
<LLMProviderOption
name="Custom Llama Model"
value="native"
description="Use a downloaded custom Llama model for chatting on this AnythingLLM instance."
checked={llmChoice === "native"}
image={AnythingLLMIcon}
onClick={updateLLMChoice}
/>
)}
</div>
<div className="mt-4 flex flex-wrap gap-4 max-w-[752px]">
{llmChoice === "openai" && <OpenAiOptions settings={settings} />}
Expand All @@ -143,6 +156,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) {
<LMStudioOptions settings={settings} />
)}
{llmChoice === "localai" && <LocalAiOptions settings={settings} />}
{llmChoice === "ollama" && <OllamaLLMOptions settings={settings} />}
{llmChoice === "native" && <NativeLLMOptions settings={settings} />}
</div>
</div>
Expand Down
5 changes: 5 additions & 0 deletions server/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
# LOCAL_AI_API_KEY="sk-123abc"

# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096

###########################################
######## Embedding API SElECTION ##########
###########################################
Expand Down
14 changes: 14 additions & 0 deletions server/models/systemSettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,20 @@ const SystemSettings = {
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),

...(llmProvider === "ollama"
? {
OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH,
OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF,
OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT,

// For embedding credentials when ollama is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
}
: {}),
...(llmProvider === "native"
? {
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
Expand Down
Loading

0 comments on commit e0a0a89

Please sign in to comment.