Skip to content

Commit

Permalink
Rework failover agent invocation selection for agent and ephemmeral a…
Browse files Browse the repository at this point in the history
…gent (#2515)

* Rework failover agent invocation selection for agent and ephemmeral agent

* update order of check

* update order of check

* lint
  • Loading branch information
timothycarambat authored Oct 21, 2024
1 parent 11b7ccd commit c734742
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 52 deletions.
59 changes: 49 additions & 10 deletions server/utils/agents/ephemeral.js
Original file line number Diff line number Diff line change
Expand Up @@ -99,30 +99,69 @@ class EphemeralAgentHandler extends AgentHandler {
}
}

/**
* Attempts to find a fallback provider and model to use if the workspace
* does not have an explicit `agentProvider` and `agentModel` set.
* 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
* 2. Fallback to the system `LLM_PROVIDER` and try to load the the associated default model via ENV params or a base available model.
* 3. Otherwise, return null - will likely throw an error the user can act on.
* @returns {object|null} - An object with provider and model keys.
*/
#getFallbackProvider() {
// First, fallback to the workspace chat provider and model if they exist
if (this.#workspace.chatProvider && this.#workspace.chatModel) {
return {
provider: this.#workspace.chatProvider,
model: this.#workspace.chatModel,
};
}

// If workspace does not have chat provider and model fallback
// to system provider and try to load provider default model
const systemProvider = process.env.LLM_PROVIDER;
const systemModel = this.providerDefault(systemProvider);
if (systemProvider && systemModel) {
return {
provider: systemProvider,
model: systemModel,
};
}

return null;
}

/**
* Finds or assumes the model preference value to use for API calls.
* If multi-model loading is supported, we use their agent model selection of the workspace
* If not supported, we attempt to fallback to the system provider value for the LLM preference
* and if that fails - we assume a reasonable base model to exist.
* @returns {string} the model preference value to use in API calls
* @returns {string|null} the model preference value to use in API calls
*/
#fetchModel() {
if (!Object.keys(this.noProviderModelDefault).includes(this.provider))
return this.#workspace.agentModel || this.providerDefault();
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
// that will set a provider and model for us to use.
if (!this.provider) {
const fallback = this.#getFallbackProvider();
if (!fallback) throw new Error("No valid provider found for the agent.");
this.provider = fallback.provider; // re-set the provider to the fallback provider so it is not null.
return fallback.model; // set its defined model based on fallback logic.
}

// Provider has no reliable default (cant load many models) - so we need to look at system
// for the model param.
const sysModelKey = this.noProviderModelDefault[this.provider];
if (!!sysModelKey)
return process.env[sysModelKey] ?? this.providerDefault();
// The provider was explicitly set, so check if the workspace has an agent model set.
if (this.invocation.workspace.agentModel)
return this.invocation.workspace.agentModel;

// If all else fails - look at the provider default list
// Otherwise, we have no model to use - so guess a default model to use via the provider
// and it's system ENV params and if that fails - we return either a base model or null.
return this.providerDefault();
}

#providerSetupAndCheck() {
this.provider = this.#workspace.agentProvider;
this.provider = this.#workspace.agentProvider ?? null;
this.model = this.#fetchModel();

if (!this.provider)
throw new Error("No valid provider found for the agent.");
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
this.checkSetup();
}
Expand Down
83 changes: 41 additions & 42 deletions server/utils/agents/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,6 @@ const ImportedPlugin = require("./imported");
class AgentHandler {
#invocationUUID;
#funcsToLoad = [];
noProviderModelDefault = {
azure: "OPEN_MODEL_PREF",
lmstudio: "LMSTUDIO_MODEL_PREF",
textgenwebui: null, // does not even use `model` in API req
"generic-openai": "GENERIC_OPEN_AI_MODEL_PREF",
bedrock: "AWS_BEDROCK_LLM_MODEL_PREFERENCE",
};
invocation = null;
aibitat = null;
channel = null;
Expand Down Expand Up @@ -184,53 +177,70 @@ class AgentHandler {
}
}

/**
* Finds the default model for a given provider. If no default model is set for it's associated ENV then
* it will return a reasonable base model for the provider if one exists.
* @param {string} provider - The provider to find the default model for.
* @returns {string|null} The default model for the provider.
*/
providerDefault(provider = this.provider) {
switch (provider) {
case "openai":
return "gpt-4o";
return process.env.OPEN_MODEL_PREF ?? "gpt-4o";
case "anthropic":
return "claude-3-sonnet-20240229";
return process.env.ANTHROPIC_MODEL_PREF ?? "claude-3-sonnet-20240229";
case "lmstudio":
return "server-default";
return process.env.LMSTUDIO_MODEL_PREF ?? "server-default";
case "ollama":
return "llama3:latest";
return process.env.OLLAMA_MODEL_PREF ?? "llama3:latest";
case "groq":
return "llama3-70b-8192";
return process.env.GROQ_MODEL_PREF ?? "llama3-70b-8192";
case "togetherai":
return "mistralai/Mixtral-8x7B-Instruct-v0.1";
return (
process.env.TOGETHER_AI_MODEL_PREF ??
"mistralai/Mixtral-8x7B-Instruct-v0.1"
);
case "azure":
return "gpt-3.5-turbo";
case "koboldcpp":
return null;
case "koboldcpp":
return process.env.KOBOLD_CPP_MODEL_PREF ?? null;
case "gemini":
return "gemini-pro";
return process.env.GEMINI_MODEL_PREF ?? "gemini-pro";
case "localai":
return null;
return process.env.LOCAL_AI_MODEL_PREF ?? null;
case "openrouter":
return "openrouter/auto";
return process.env.OPENROUTER_MODEL_PREF ?? "openrouter/auto";
case "mistral":
return "mistral-medium";
return process.env.MISTRAL_MODEL_PREF ?? "mistral-medium";
case "generic-openai":
return null;
return process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
case "perplexity":
return "sonar-small-online";
return process.env.PERPLEXITY_MODEL_PREF ?? "sonar-small-online";
case "textgenwebui":
return null;
case "bedrock":
return null;
return process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null;
case "fireworksai":
return null;
return process.env.FIREWORKS_AI_LLM_MODEL_PREF ?? null;
case "deepseek":
return "deepseek-chat";
return process.env.DEEPSEEK_MODEL_PREF ?? "deepseek-chat";
case "litellm":
return null;
return process.env.LITE_LLM_MODEL_PREF ?? null;
case "apipie":
return null;
return process.env.APIPIE_LLM_MODEL_PREF ?? null;
default:
return "unknown";
return null;
}
}

/**
* Attempts to find a fallback provider and model to use if the workspace
* does not have an explicit `agentProvider` and `agentModel` set.
* 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
* 2. Fallback to the system `LLM_PROVIDER` and try to load the the associated default model via ENV params or a base available model.
* 3. Otherwise, return null - will likely throw an error the user can act on.
* @returns {object|null} - An object with provider and model keys.
*/
#getFallbackProvider() {
// First, fallback to the workspace chat provider and model if they exist
if (
Expand Down Expand Up @@ -262,7 +272,7 @@ class AgentHandler {
* If multi-model loading is supported, we use their agent model selection of the workspace
* If not supported, we attempt to fallback to the system provider value for the LLM preference
* and if that fails - we assume a reasonable base model to exist.
* @returns {string} the model preference value to use in API calls
* @returns {string|null} the model preference value to use in API calls
*/
#fetchModel() {
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
Expand All @@ -275,21 +285,11 @@ class AgentHandler {
}

// The provider was explicitly set, so check if the workspace has an agent model set.
if (this.invocation.workspace.agentModel) {
if (this.invocation.workspace.agentModel)
return this.invocation.workspace.agentModel;
}

// If the provider we are using is not supported or does not support multi-model loading
// then we use the default model for the provider.
if (!Object.keys(this.noProviderModelDefault).includes(this.provider)) {
return this.providerDefault();
}

// Load the model from the system environment variable for providers with no multi-model loading.
const sysModelKey = this.noProviderModelDefault[this.provider];
if (sysModelKey) return process.env[sysModelKey] ?? this.providerDefault();

// Otherwise, we have no model to use - so guess a default model to use.
// Otherwise, we have no model to use - so guess a default model to use via the provider
// and it's system ENV params and if that fails - we return either a base model or null.
return this.providerDefault();
}

Expand All @@ -299,7 +299,6 @@ class AgentHandler {

if (!this.provider)
throw new Error("No valid provider found for the agent.");

this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
this.checkSetup();
}
Expand Down

0 comments on commit c734742

Please sign in to comment.