refactor: refactored LLM Providers: Adapting Modular Approach (#832)
Browse files* refactor: Refactoring Providers to have providers as modules
* updated package and lock file
* added grok model back
* updated registry system
- app/components/chat/BaseChat.tsx +2 -1
- app/components/chat/Chat.client.tsx +1 -1
- app/components/chat/ModelSelector.tsx +1 -1
- app/lib/.server/llm/api-key.ts +0 -111
- app/lib/.server/llm/model.ts +0 -190
- app/lib/.server/llm/stream-text.ts +9 -2
- app/lib/modules/llm/base-provider.ts +72 -0
- app/lib/modules/llm/manager.ts +116 -0
- app/lib/modules/llm/providers/anthropic.ts +58 -0
- app/lib/modules/llm/providers/cohere.ts +54 -0
- app/lib/modules/llm/providers/deepseek.ts +47 -0
- app/lib/modules/llm/providers/google.ts +51 -0
- app/lib/modules/llm/providers/groq.ts +51 -0
- app/lib/modules/llm/providers/huggingface.ts +69 -0
- app/lib/modules/llm/providers/lmstudio.ts +73 -0
- app/lib/modules/llm/providers/mistral.ts +53 -0
- app/lib/modules/llm/providers/ollama.ts +99 -0
- app/lib/modules/llm/providers/open-router.ts +132 -0
- app/lib/modules/llm/providers/openai-like.ts +77 -0
- app/lib/modules/llm/providers/openai.ts +48 -0
- app/lib/modules/llm/providers/perplexity.ts +63 -0
- app/lib/modules/llm/providers/together.ts +100 -0
- app/lib/modules/llm/providers/xai.ts +47 -0
- app/lib/modules/llm/registry.ts +33 -0
- app/lib/modules/llm/types.ts +32 -0
- app/types/model.ts +1 -1
- app/utils/constants.ts +334 -650
- app/utils/types.ts +0 -7
- pnpm-lock.yaml +1 -1
- worker-configuration.d.ts +1 -0
app/components/chat/BaseChat.tsx
CHANGED
|
@@ -160,6 +160,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
| 160 |
}
|
| 161 |
|
| 162 |
initializeModelList({ apiKeys: parsedApiKeys, providerSettings }).then((modelList) => {
|
|
|
|
| 163 |
setModelList(modelList);
|
| 164 |
});
|
| 165 |
|
|
@@ -359,7 +360,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
|
| 359 |
modelList={modelList}
|
| 360 |
provider={provider}
|
| 361 |
setProvider={setProvider}
|
| 362 |
-
providerList={providerList || PROVIDER_LIST}
|
| 363 |
apiKeys={apiKeys}
|
| 364 |
/>
|
| 365 |
{(providerList || []).length > 0 && provider && (
|
|
|
|
| 160 |
}
|
| 161 |
|
| 162 |
initializeModelList({ apiKeys: parsedApiKeys, providerSettings }).then((modelList) => {
|
| 163 |
+
console.log('Model List: ', modelList);
|
| 164 |
setModelList(modelList);
|
| 165 |
});
|
| 166 |
|
|
|
|
| 360 |
modelList={modelList}
|
| 361 |
provider={provider}
|
| 362 |
setProvider={setProvider}
|
| 363 |
+
providerList={providerList || (PROVIDER_LIST as ProviderInfo[])}
|
| 364 |
apiKeys={apiKeys}
|
| 365 |
/>
|
| 366 |
{(providerList || []).length > 0 && provider && (
|
app/components/chat/Chat.client.tsx
CHANGED
|
@@ -122,7 +122,7 @@ export const ChatImpl = memo(
|
|
| 122 |
});
|
| 123 |
const [provider, setProvider] = useState(() => {
|
| 124 |
const savedProvider = Cookies.get('selectedProvider');
|
| 125 |
-
return PROVIDER_LIST.find((p) => p.name === savedProvider) || DEFAULT_PROVIDER;
|
| 126 |
});
|
| 127 |
|
| 128 |
const { showChat } = useStore(chatStore);
|
|
|
|
| 122 |
});
|
| 123 |
const [provider, setProvider] = useState(() => {
|
| 124 |
const savedProvider = Cookies.get('selectedProvider');
|
| 125 |
+
return (PROVIDER_LIST.find((p) => p.name === savedProvider) || DEFAULT_PROVIDER) as ProviderInfo;
|
| 126 |
});
|
| 127 |
|
| 128 |
const { showChat } = useStore(chatStore);
|
app/components/chat/ModelSelector.tsx
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import type { ProviderInfo } from '~/types/model';
|
| 2 |
-
import type { ModelInfo } from '~/utils/types';
|
| 3 |
import { useEffect } from 'react';
|
|
|
|
| 4 |
|
| 5 |
interface ModelSelectorProps {
|
| 6 |
model?: string;
|
|
|
|
| 1 |
import type { ProviderInfo } from '~/types/model';
|
|
|
|
| 2 |
import { useEffect } from 'react';
|
| 3 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 4 |
|
| 5 |
interface ModelSelectorProps {
|
| 6 |
model?: string;
|
app/lib/.server/llm/api-key.ts
DELETED
|
@@ -1,111 +0,0 @@
|
|
| 1 |
-
import { env } from 'node:process';
|
| 2 |
-
import type { IProviderSetting } from '~/types/model';
|
| 3 |
-
import { getProviderBaseUrlAndKey } from '~/utils/constants';
|
| 4 |
-
|
| 5 |
-
export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record<string, string>) {
|
| 6 |
-
/**
|
| 7 |
-
* The `cloudflareEnv` is only used when deployed or when previewing locally.
|
| 8 |
-
* In development the environment variables are available through `env`.
|
| 9 |
-
*/
|
| 10 |
-
|
| 11 |
-
// First check user-provided API keys
|
| 12 |
-
if (userApiKeys?.[provider]) {
|
| 13 |
-
return userApiKeys[provider];
|
| 14 |
-
}
|
| 15 |
-
|
| 16 |
-
const { apiKey } = getProviderBaseUrlAndKey({
|
| 17 |
-
provider,
|
| 18 |
-
apiKeys: userApiKeys,
|
| 19 |
-
providerSettings: undefined,
|
| 20 |
-
serverEnv: cloudflareEnv as any,
|
| 21 |
-
defaultBaseUrlKey: '',
|
| 22 |
-
defaultApiTokenKey: '',
|
| 23 |
-
});
|
| 24 |
-
|
| 25 |
-
if (apiKey) {
|
| 26 |
-
return apiKey;
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
// Fall back to hardcoded environment variables names
|
| 30 |
-
switch (provider) {
|
| 31 |
-
case 'Anthropic':
|
| 32 |
-
return env.ANTHROPIC_API_KEY || cloudflareEnv.ANTHROPIC_API_KEY;
|
| 33 |
-
case 'OpenAI':
|
| 34 |
-
return env.OPENAI_API_KEY || cloudflareEnv.OPENAI_API_KEY;
|
| 35 |
-
case 'Google':
|
| 36 |
-
return env.GOOGLE_GENERATIVE_AI_API_KEY || cloudflareEnv.GOOGLE_GENERATIVE_AI_API_KEY;
|
| 37 |
-
case 'Groq':
|
| 38 |
-
return env.GROQ_API_KEY || cloudflareEnv.GROQ_API_KEY;
|
| 39 |
-
case 'HuggingFace':
|
| 40 |
-
return env.HuggingFace_API_KEY || cloudflareEnv.HuggingFace_API_KEY;
|
| 41 |
-
case 'OpenRouter':
|
| 42 |
-
return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
|
| 43 |
-
case 'Deepseek':
|
| 44 |
-
return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY;
|
| 45 |
-
case 'Mistral':
|
| 46 |
-
return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
|
| 47 |
-
case 'OpenAILike':
|
| 48 |
-
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
|
| 49 |
-
case 'Together':
|
| 50 |
-
return env.TOGETHER_API_KEY || cloudflareEnv.TOGETHER_API_KEY;
|
| 51 |
-
case 'xAI':
|
| 52 |
-
return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY;
|
| 53 |
-
case 'Perplexity':
|
| 54 |
-
return env.PERPLEXITY_API_KEY || cloudflareEnv.PERPLEXITY_API_KEY;
|
| 55 |
-
case 'Cohere':
|
| 56 |
-
return env.COHERE_API_KEY;
|
| 57 |
-
case 'AzureOpenAI':
|
| 58 |
-
return env.AZURE_OPENAI_API_KEY;
|
| 59 |
-
default:
|
| 60 |
-
return '';
|
| 61 |
-
}
|
| 62 |
-
}
|
| 63 |
-
|
| 64 |
-
export function getBaseURL(cloudflareEnv: Env, provider: string, providerSettings?: Record<string, IProviderSetting>) {
|
| 65 |
-
const { baseUrl } = getProviderBaseUrlAndKey({
|
| 66 |
-
provider,
|
| 67 |
-
apiKeys: {},
|
| 68 |
-
providerSettings,
|
| 69 |
-
serverEnv: cloudflareEnv as any,
|
| 70 |
-
defaultBaseUrlKey: '',
|
| 71 |
-
defaultApiTokenKey: '',
|
| 72 |
-
});
|
| 73 |
-
|
| 74 |
-
if (baseUrl) {
|
| 75 |
-
return baseUrl;
|
| 76 |
-
}
|
| 77 |
-
|
| 78 |
-
let settingBaseUrl = providerSettings?.[provider].baseUrl;
|
| 79 |
-
|
| 80 |
-
if (settingBaseUrl && settingBaseUrl.length == 0) {
|
| 81 |
-
settingBaseUrl = undefined;
|
| 82 |
-
}
|
| 83 |
-
|
| 84 |
-
switch (provider) {
|
| 85 |
-
case 'Together':
|
| 86 |
-
return (
|
| 87 |
-
settingBaseUrl ||
|
| 88 |
-
env.TOGETHER_API_BASE_URL ||
|
| 89 |
-
cloudflareEnv.TOGETHER_API_BASE_URL ||
|
| 90 |
-
'https://api.together.xyz/v1'
|
| 91 |
-
);
|
| 92 |
-
case 'OpenAILike':
|
| 93 |
-
return settingBaseUrl || env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
|
| 94 |
-
case 'LMStudio':
|
| 95 |
-
return (
|
| 96 |
-
settingBaseUrl || env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234'
|
| 97 |
-
);
|
| 98 |
-
case 'Ollama': {
|
| 99 |
-
let baseUrl =
|
| 100 |
-
settingBaseUrl || env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434';
|
| 101 |
-
|
| 102 |
-
if (env.RUNNING_IN_DOCKER === 'true') {
|
| 103 |
-
baseUrl = baseUrl.replace('localhost', 'host.docker.internal');
|
| 104 |
-
}
|
| 105 |
-
|
| 106 |
-
return baseUrl;
|
| 107 |
-
}
|
| 108 |
-
default:
|
| 109 |
-
return '';
|
| 110 |
-
}
|
| 111 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/lib/.server/llm/model.ts
DELETED
|
@@ -1,190 +0,0 @@
|
|
| 1 |
-
/*
|
| 2 |
-
* @ts-nocheck
|
| 3 |
-
* Preventing TS checks with files presented in the video for a better presentation.
|
| 4 |
-
*/
|
| 5 |
-
import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
|
| 6 |
-
import { createAnthropic } from '@ai-sdk/anthropic';
|
| 7 |
-
import { createOpenAI } from '@ai-sdk/openai';
|
| 8 |
-
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
| 9 |
-
import { ollama } from 'ollama-ai-provider';
|
| 10 |
-
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
| 11 |
-
import { createMistral } from '@ai-sdk/mistral';
|
| 12 |
-
import { createCohere } from '@ai-sdk/cohere';
|
| 13 |
-
import type { LanguageModelV1 } from 'ai';
|
| 14 |
-
import type { IProviderSetting } from '~/types/model';
|
| 15 |
-
|
| 16 |
-
export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
|
| 17 |
-
|
| 18 |
-
type OptionalApiKey = string | undefined;
|
| 19 |
-
|
| 20 |
-
export function getAnthropicModel(apiKey: OptionalApiKey, model: string) {
|
| 21 |
-
const anthropic = createAnthropic({
|
| 22 |
-
apiKey,
|
| 23 |
-
});
|
| 24 |
-
|
| 25 |
-
return anthropic(model);
|
| 26 |
-
}
|
| 27 |
-
export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
|
| 28 |
-
const openai = createOpenAI({
|
| 29 |
-
baseURL,
|
| 30 |
-
apiKey,
|
| 31 |
-
});
|
| 32 |
-
|
| 33 |
-
return openai(model);
|
| 34 |
-
}
|
| 35 |
-
|
| 36 |
-
export function getCohereAIModel(apiKey: OptionalApiKey, model: string) {
|
| 37 |
-
const cohere = createCohere({
|
| 38 |
-
apiKey,
|
| 39 |
-
});
|
| 40 |
-
|
| 41 |
-
return cohere(model);
|
| 42 |
-
}
|
| 43 |
-
|
| 44 |
-
export function getOpenAIModel(apiKey: OptionalApiKey, model: string) {
|
| 45 |
-
const openai = createOpenAI({
|
| 46 |
-
apiKey,
|
| 47 |
-
});
|
| 48 |
-
|
| 49 |
-
return openai(model);
|
| 50 |
-
}
|
| 51 |
-
|
| 52 |
-
export function getMistralModel(apiKey: OptionalApiKey, model: string) {
|
| 53 |
-
const mistral = createMistral({
|
| 54 |
-
apiKey,
|
| 55 |
-
});
|
| 56 |
-
|
| 57 |
-
return mistral(model);
|
| 58 |
-
}
|
| 59 |
-
|
| 60 |
-
export function getGoogleModel(apiKey: OptionalApiKey, model: string) {
|
| 61 |
-
const google = createGoogleGenerativeAI({
|
| 62 |
-
apiKey,
|
| 63 |
-
});
|
| 64 |
-
|
| 65 |
-
return google(model);
|
| 66 |
-
}
|
| 67 |
-
|
| 68 |
-
export function getGroqModel(apiKey: OptionalApiKey, model: string) {
|
| 69 |
-
const openai = createOpenAI({
|
| 70 |
-
baseURL: 'https://api.groq.com/openai/v1',
|
| 71 |
-
apiKey,
|
| 72 |
-
});
|
| 73 |
-
|
| 74 |
-
return openai(model);
|
| 75 |
-
}
|
| 76 |
-
|
| 77 |
-
export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) {
|
| 78 |
-
const openai = createOpenAI({
|
| 79 |
-
baseURL: 'https://api-inference.huggingface.co/v1/',
|
| 80 |
-
apiKey,
|
| 81 |
-
});
|
| 82 |
-
|
| 83 |
-
return openai(model);
|
| 84 |
-
}
|
| 85 |
-
|
| 86 |
-
export function getOllamaModel(baseURL: string, model: string) {
|
| 87 |
-
const ollamaInstance = ollama(model, {
|
| 88 |
-
numCtx: DEFAULT_NUM_CTX,
|
| 89 |
-
}) as LanguageModelV1 & { config: any };
|
| 90 |
-
|
| 91 |
-
ollamaInstance.config.baseURL = `${baseURL}/api`;
|
| 92 |
-
|
| 93 |
-
return ollamaInstance;
|
| 94 |
-
}
|
| 95 |
-
|
| 96 |
-
export function getDeepseekModel(apiKey: OptionalApiKey, model: string) {
|
| 97 |
-
const openai = createOpenAI({
|
| 98 |
-
baseURL: 'https://api.deepseek.com/beta',
|
| 99 |
-
apiKey,
|
| 100 |
-
});
|
| 101 |
-
|
| 102 |
-
return openai(model);
|
| 103 |
-
}
|
| 104 |
-
|
| 105 |
-
export function getOpenRouterModel(apiKey: OptionalApiKey, model: string) {
|
| 106 |
-
const openRouter = createOpenRouter({
|
| 107 |
-
apiKey,
|
| 108 |
-
});
|
| 109 |
-
|
| 110 |
-
return openRouter.chat(model);
|
| 111 |
-
}
|
| 112 |
-
|
| 113 |
-
export function getLMStudioModel(baseURL: string, model: string) {
|
| 114 |
-
const lmstudio = createOpenAI({
|
| 115 |
-
baseUrl: `${baseURL}/v1`,
|
| 116 |
-
apiKey: '',
|
| 117 |
-
});
|
| 118 |
-
|
| 119 |
-
return lmstudio(model);
|
| 120 |
-
}
|
| 121 |
-
|
| 122 |
-
export function getXAIModel(apiKey: OptionalApiKey, model: string) {
|
| 123 |
-
const openai = createOpenAI({
|
| 124 |
-
baseURL: 'https://api.x.ai/v1',
|
| 125 |
-
apiKey,
|
| 126 |
-
});
|
| 127 |
-
|
| 128 |
-
return openai(model);
|
| 129 |
-
}
|
| 130 |
-
|
| 131 |
-
export function getPerplexityModel(apiKey: OptionalApiKey, model: string) {
|
| 132 |
-
const perplexity = createOpenAI({
|
| 133 |
-
baseURL: 'https://api.perplexity.ai/',
|
| 134 |
-
apiKey,
|
| 135 |
-
});
|
| 136 |
-
|
| 137 |
-
return perplexity(model);
|
| 138 |
-
}
|
| 139 |
-
|
| 140 |
-
export function getModel(
|
| 141 |
-
provider: string,
|
| 142 |
-
model: string,
|
| 143 |
-
serverEnv: Env,
|
| 144 |
-
apiKeys?: Record<string, string>,
|
| 145 |
-
providerSettings?: Record<string, IProviderSetting>,
|
| 146 |
-
) {
|
| 147 |
-
/*
|
| 148 |
-
* let apiKey; // Declare first
|
| 149 |
-
* let baseURL;
|
| 150 |
-
*/
|
| 151 |
-
// console.log({provider,model});
|
| 152 |
-
|
| 153 |
-
const apiKey = getAPIKey(serverEnv, provider, apiKeys); // Then assign
|
| 154 |
-
const baseURL = getBaseURL(serverEnv, provider, providerSettings);
|
| 155 |
-
|
| 156 |
-
// console.log({apiKey,baseURL});
|
| 157 |
-
|
| 158 |
-
switch (provider) {
|
| 159 |
-
case 'Anthropic':
|
| 160 |
-
return getAnthropicModel(apiKey, model);
|
| 161 |
-
case 'OpenAI':
|
| 162 |
-
return getOpenAIModel(apiKey, model);
|
| 163 |
-
case 'Groq':
|
| 164 |
-
return getGroqModel(apiKey, model);
|
| 165 |
-
case 'HuggingFace':
|
| 166 |
-
return getHuggingFaceModel(apiKey, model);
|
| 167 |
-
case 'OpenRouter':
|
| 168 |
-
return getOpenRouterModel(apiKey, model);
|
| 169 |
-
case 'Google':
|
| 170 |
-
return getGoogleModel(apiKey, model);
|
| 171 |
-
case 'OpenAILike':
|
| 172 |
-
return getOpenAILikeModel(baseURL, apiKey, model);
|
| 173 |
-
case 'Together':
|
| 174 |
-
return getOpenAILikeModel(baseURL, apiKey, model);
|
| 175 |
-
case 'Deepseek':
|
| 176 |
-
return getDeepseekModel(apiKey, model);
|
| 177 |
-
case 'Mistral':
|
| 178 |
-
return getMistralModel(apiKey, model);
|
| 179 |
-
case 'LMStudio':
|
| 180 |
-
return getLMStudioModel(baseURL, model);
|
| 181 |
-
case 'xAI':
|
| 182 |
-
return getXAIModel(apiKey, model);
|
| 183 |
-
case 'Cohere':
|
| 184 |
-
return getCohereAIModel(apiKey, model);
|
| 185 |
-
case 'Perplexity':
|
| 186 |
-
return getPerplexityModel(apiKey, model);
|
| 187 |
-
default:
|
| 188 |
-
return getOllamaModel(baseURL, model);
|
| 189 |
-
}
|
| 190 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/lib/.server/llm/stream-text.ts
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import { convertToCoreMessages, streamText as _streamText } from 'ai';
|
| 2 |
-
import { getModel } from '~/lib/.server/llm/model';
|
| 3 |
import { MAX_TOKENS } from './constants';
|
| 4 |
import { getSystemPrompt } from '~/lib/common/prompts/prompts';
|
| 5 |
import {
|
|
@@ -8,6 +7,7 @@ import {
|
|
| 8 |
getModelList,
|
| 9 |
MODEL_REGEX,
|
| 10 |
MODIFICATIONS_TAG_NAME,
|
|
|
|
| 11 |
PROVIDER_REGEX,
|
| 12 |
WORK_DIR,
|
| 13 |
} from '~/utils/constants';
|
|
@@ -184,6 +184,8 @@ export async function streamText(props: {
|
|
| 184 |
|
| 185 |
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
|
| 186 |
|
|
|
|
|
|
|
| 187 |
let systemPrompt =
|
| 188 |
PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
|
| 189 |
cwd: WORK_DIR,
|
|
@@ -199,7 +201,12 @@ export async function streamText(props: {
|
|
| 199 |
}
|
| 200 |
|
| 201 |
return _streamText({
|
| 202 |
-
model:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
system: systemPrompt,
|
| 204 |
maxTokens: dynamicMaxTokens,
|
| 205 |
messages: convertToCoreMessages(processedMessages as any),
|
|
|
|
| 1 |
import { convertToCoreMessages, streamText as _streamText } from 'ai';
|
|
|
|
| 2 |
import { MAX_TOKENS } from './constants';
|
| 3 |
import { getSystemPrompt } from '~/lib/common/prompts/prompts';
|
| 4 |
import {
|
|
|
|
| 7 |
getModelList,
|
| 8 |
MODEL_REGEX,
|
| 9 |
MODIFICATIONS_TAG_NAME,
|
| 10 |
+
PROVIDER_LIST,
|
| 11 |
PROVIDER_REGEX,
|
| 12 |
WORK_DIR,
|
| 13 |
} from '~/utils/constants';
|
|
|
|
| 184 |
|
| 185 |
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
|
| 186 |
|
| 187 |
+
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
|
| 188 |
+
|
| 189 |
let systemPrompt =
|
| 190 |
PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
|
| 191 |
cwd: WORK_DIR,
|
|
|
|
| 201 |
}
|
| 202 |
|
| 203 |
return _streamText({
|
| 204 |
+
model: provider.getModelInstance({
|
| 205 |
+
model: currentModel,
|
| 206 |
+
serverEnv,
|
| 207 |
+
apiKeys,
|
| 208 |
+
providerSettings,
|
| 209 |
+
}),
|
| 210 |
system: systemPrompt,
|
| 211 |
maxTokens: dynamicMaxTokens,
|
| 212 |
messages: convertToCoreMessages(processedMessages as any),
|
app/lib/modules/llm/base-provider.ts
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { LanguageModelV1 } from 'ai';
|
| 2 |
+
import type { ProviderInfo, ProviderConfig, ModelInfo } from './types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 5 |
+
import { LLMManager } from './manager';
|
| 6 |
+
|
| 7 |
+
export abstract class BaseProvider implements ProviderInfo {
|
| 8 |
+
abstract name: string;
|
| 9 |
+
abstract staticModels: ModelInfo[];
|
| 10 |
+
abstract config: ProviderConfig;
|
| 11 |
+
|
| 12 |
+
getApiKeyLink?: string;
|
| 13 |
+
labelForGetApiKey?: string;
|
| 14 |
+
icon?: string;
|
| 15 |
+
|
| 16 |
+
getProviderBaseUrlAndKey(options: {
|
| 17 |
+
apiKeys?: Record<string, string>;
|
| 18 |
+
providerSettings?: IProviderSetting;
|
| 19 |
+
serverEnv?: Record<string, string>;
|
| 20 |
+
defaultBaseUrlKey: string;
|
| 21 |
+
defaultApiTokenKey: string;
|
| 22 |
+
}) {
|
| 23 |
+
const { apiKeys, providerSettings, serverEnv, defaultBaseUrlKey, defaultApiTokenKey } = options;
|
| 24 |
+
let settingsBaseUrl = providerSettings?.baseUrl;
|
| 25 |
+
const manager = LLMManager.getInstance();
|
| 26 |
+
|
| 27 |
+
if (settingsBaseUrl && settingsBaseUrl.length == 0) {
|
| 28 |
+
settingsBaseUrl = undefined;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
const baseUrlKey = this.config.baseUrlKey || defaultBaseUrlKey;
|
| 32 |
+
let baseUrl = settingsBaseUrl || serverEnv?.[baseUrlKey] || process?.env?.[baseUrlKey] || manager.env?.[baseUrlKey];
|
| 33 |
+
|
| 34 |
+
if (baseUrl && baseUrl.endsWith('/')) {
|
| 35 |
+
baseUrl = baseUrl.slice(0, -1);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
const apiTokenKey = this.config.apiTokenKey || defaultApiTokenKey;
|
| 39 |
+
const apiKey =
|
| 40 |
+
apiKeys?.[this.name] || serverEnv?.[apiTokenKey] || process?.env?.[apiTokenKey] || manager.env?.[baseUrlKey];
|
| 41 |
+
|
| 42 |
+
return {
|
| 43 |
+
baseUrl,
|
| 44 |
+
apiKey,
|
| 45 |
+
};
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
// Declare the optional getDynamicModels method
|
| 49 |
+
getDynamicModels?(
|
| 50 |
+
apiKeys?: Record<string, string>,
|
| 51 |
+
settings?: IProviderSetting,
|
| 52 |
+
serverEnv?: Record<string, string>,
|
| 53 |
+
): Promise<ModelInfo[]>;
|
| 54 |
+
|
| 55 |
+
abstract getModelInstance(options: {
|
| 56 |
+
model: string;
|
| 57 |
+
serverEnv: Env;
|
| 58 |
+
apiKeys?: Record<string, string>;
|
| 59 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 60 |
+
}): LanguageModelV1;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
type OptionalApiKey = string | undefined;
|
| 64 |
+
|
| 65 |
+
export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) {
|
| 66 |
+
const openai = createOpenAI({
|
| 67 |
+
baseURL,
|
| 68 |
+
apiKey,
|
| 69 |
+
});
|
| 70 |
+
|
| 71 |
+
return openai(model);
|
| 72 |
+
}
|
app/lib/modules/llm/manager.ts
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { IProviderSetting } from '~/types/model';
|
| 2 |
+
import { BaseProvider } from './base-provider';
|
| 3 |
+
import type { ModelInfo, ProviderInfo } from './types';
|
| 4 |
+
import * as providers from './registry';
|
| 5 |
+
|
| 6 |
+
export class LLMManager {
|
| 7 |
+
private static _instance: LLMManager;
|
| 8 |
+
private _providers: Map<string, BaseProvider> = new Map();
|
| 9 |
+
private _modelList: ModelInfo[] = [];
|
| 10 |
+
private readonly _env: any = {};
|
| 11 |
+
|
| 12 |
+
private constructor(_env: Record<string, string>) {
|
| 13 |
+
this._registerProvidersFromDirectory();
|
| 14 |
+
this._env = _env;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
static getInstance(env: Record<string, string> = {}): LLMManager {
|
| 18 |
+
if (!LLMManager._instance) {
|
| 19 |
+
LLMManager._instance = new LLMManager(env);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
return LLMManager._instance;
|
| 23 |
+
}
|
| 24 |
+
get env() {
|
| 25 |
+
return this._env;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
private async _registerProvidersFromDirectory() {
|
| 29 |
+
try {
|
| 30 |
+
/*
|
| 31 |
+
* Dynamically import all files from the providers directory
|
| 32 |
+
* const providerModules = import.meta.glob('./providers/*.ts', { eager: true });
|
| 33 |
+
*/
|
| 34 |
+
|
| 35 |
+
// Look for exported classes that extend BaseProvider
|
| 36 |
+
for (const exportedItem of Object.values(providers)) {
|
| 37 |
+
if (typeof exportedItem === 'function' && exportedItem.prototype instanceof BaseProvider) {
|
| 38 |
+
const provider = new exportedItem();
|
| 39 |
+
|
| 40 |
+
try {
|
| 41 |
+
this.registerProvider(provider);
|
| 42 |
+
} catch (error: any) {
|
| 43 |
+
console.log('Failed To Register Provider: ', provider.name, 'error:', error.message);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
}
|
| 47 |
+
} catch (error) {
|
| 48 |
+
console.error('Error registering providers:', error);
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
registerProvider(provider: BaseProvider) {
|
| 53 |
+
if (this._providers.has(provider.name)) {
|
| 54 |
+
console.warn(`Provider ${provider.name} is already registered. Skipping.`);
|
| 55 |
+
return;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
console.log('Registering Provider: ', provider.name);
|
| 59 |
+
this._providers.set(provider.name, provider);
|
| 60 |
+
this._modelList = [...this._modelList, ...provider.staticModels];
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
getProvider(name: string): BaseProvider | undefined {
|
| 64 |
+
return this._providers.get(name);
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
getAllProviders(): BaseProvider[] {
|
| 68 |
+
return Array.from(this._providers.values());
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
getModelList(): ModelInfo[] {
|
| 72 |
+
return this._modelList;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
async updateModelList(options: {
|
| 76 |
+
apiKeys?: Record<string, string>;
|
| 77 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 78 |
+
serverEnv?: Record<string, string>;
|
| 79 |
+
}): Promise<ModelInfo[]> {
|
| 80 |
+
const { apiKeys, providerSettings, serverEnv } = options;
|
| 81 |
+
|
| 82 |
+
// Get dynamic models from all providers that support them
|
| 83 |
+
const dynamicModels = await Promise.all(
|
| 84 |
+
Array.from(this._providers.values())
|
| 85 |
+
.filter(
|
| 86 |
+
(provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
|
| 87 |
+
!!provider.getDynamicModels,
|
| 88 |
+
)
|
| 89 |
+
.map((provider) =>
|
| 90 |
+
provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => {
|
| 91 |
+
console.error(`Error getting dynamic models ${provider.name} :`, err);
|
| 92 |
+
return [];
|
| 93 |
+
}),
|
| 94 |
+
),
|
| 95 |
+
);
|
| 96 |
+
|
| 97 |
+
// Combine static and dynamic models
|
| 98 |
+
const modelList = [
|
| 99 |
+
...dynamicModels.flat(),
|
| 100 |
+
...Array.from(this._providers.values()).flatMap((p) => p.staticModels || []),
|
| 101 |
+
];
|
| 102 |
+
this._modelList = modelList;
|
| 103 |
+
|
| 104 |
+
return modelList;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
getDefaultProvider(): BaseProvider {
|
| 108 |
+
const firstProvider = this._providers.values().next().value;
|
| 109 |
+
|
| 110 |
+
if (!firstProvider) {
|
| 111 |
+
throw new Error('No providers registered');
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
return firstProvider;
|
| 115 |
+
}
|
| 116 |
+
}
|
app/lib/modules/llm/providers/anthropic.ts
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { LanguageModelV1 } from 'ai';
|
| 4 |
+
import type { IProviderSetting } from '~/types/model';
|
| 5 |
+
import { createAnthropic } from '@ai-sdk/anthropic';
|
| 6 |
+
|
| 7 |
+
export default class AnthropicProvider extends BaseProvider {
|
| 8 |
+
name = 'Anthropic';
|
| 9 |
+
getApiKeyLink = 'https://console.anthropic.com/settings/keys';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'ANTHROPIC_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{
|
| 17 |
+
name: 'claude-3-5-sonnet-latest',
|
| 18 |
+
label: 'Claude 3.5 Sonnet (new)',
|
| 19 |
+
provider: 'Anthropic',
|
| 20 |
+
maxTokenAllowed: 8000,
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
name: 'claude-3-5-sonnet-20240620',
|
| 24 |
+
label: 'Claude 3.5 Sonnet (old)',
|
| 25 |
+
provider: 'Anthropic',
|
| 26 |
+
maxTokenAllowed: 8000,
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
name: 'claude-3-5-haiku-latest',
|
| 30 |
+
label: 'Claude 3.5 Haiku (new)',
|
| 31 |
+
provider: 'Anthropic',
|
| 32 |
+
maxTokenAllowed: 8000,
|
| 33 |
+
},
|
| 34 |
+
{ name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
| 35 |
+
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
| 36 |
+
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
| 37 |
+
];
|
| 38 |
+
getModelInstance: (options: {
|
| 39 |
+
model: string;
|
| 40 |
+
serverEnv: Env;
|
| 41 |
+
apiKeys?: Record<string, string>;
|
| 42 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 43 |
+
}) => LanguageModelV1 = (options) => {
|
| 44 |
+
const { apiKeys, providerSettings, serverEnv, model } = options;
|
| 45 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 46 |
+
apiKeys,
|
| 47 |
+
providerSettings,
|
| 48 |
+
serverEnv: serverEnv as any,
|
| 49 |
+
defaultBaseUrlKey: '',
|
| 50 |
+
defaultApiTokenKey: 'ANTHROPIC_API_KEY',
|
| 51 |
+
});
|
| 52 |
+
const anthropic = createAnthropic({
|
| 53 |
+
apiKey,
|
| 54 |
+
});
|
| 55 |
+
|
| 56 |
+
return anthropic(model);
|
| 57 |
+
};
|
| 58 |
+
}
|
app/lib/modules/llm/providers/cohere.ts
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createCohere } from '@ai-sdk/cohere';
|
| 6 |
+
|
| 7 |
+
export default class CohereProvider extends BaseProvider {
|
| 8 |
+
name = 'Cohere';
|
| 9 |
+
getApiKeyLink = 'https://dashboard.cohere.com/api-keys';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'COHERE_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 17 |
+
{ name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 18 |
+
{ name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 19 |
+
{ name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 20 |
+
{ name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 21 |
+
{ name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 22 |
+
{ name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 23 |
+
{ name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 24 |
+
{ name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 25 |
+
{ name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 26 |
+
];
|
| 27 |
+
|
| 28 |
+
getModelInstance(options: {
|
| 29 |
+
model: string;
|
| 30 |
+
serverEnv: Env;
|
| 31 |
+
apiKeys?: Record<string, string>;
|
| 32 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 33 |
+
}): LanguageModelV1 {
|
| 34 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 35 |
+
|
| 36 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 37 |
+
apiKeys,
|
| 38 |
+
providerSettings: providerSettings?.[this.name],
|
| 39 |
+
serverEnv: serverEnv as any,
|
| 40 |
+
defaultBaseUrlKey: '',
|
| 41 |
+
defaultApiTokenKey: 'COHERE_API_KEY',
|
| 42 |
+
});
|
| 43 |
+
|
| 44 |
+
if (!apiKey) {
|
| 45 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
const cohere = createCohere({
|
| 49 |
+
apiKey,
|
| 50 |
+
});
|
| 51 |
+
|
| 52 |
+
return cohere(model);
|
| 53 |
+
}
|
| 54 |
+
}
|
app/lib/modules/llm/providers/deepseek.ts
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 6 |
+
|
| 7 |
+
export default class DeepseekProvider extends BaseProvider {
|
| 8 |
+
name = 'Deepseek';
|
| 9 |
+
getApiKeyLink = 'https://platform.deepseek.com/apiKeys';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'DEEPSEEK_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
| 17 |
+
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
| 18 |
+
];
|
| 19 |
+
|
| 20 |
+
getModelInstance(options: {
|
| 21 |
+
model: string;
|
| 22 |
+
serverEnv: Env;
|
| 23 |
+
apiKeys?: Record<string, string>;
|
| 24 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 25 |
+
}): LanguageModelV1 {
|
| 26 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 27 |
+
|
| 28 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 29 |
+
apiKeys,
|
| 30 |
+
providerSettings: providerSettings?.[this.name],
|
| 31 |
+
serverEnv: serverEnv as any,
|
| 32 |
+
defaultBaseUrlKey: '',
|
| 33 |
+
defaultApiTokenKey: 'DEEPSEEK_API_KEY',
|
| 34 |
+
});
|
| 35 |
+
|
| 36 |
+
if (!apiKey) {
|
| 37 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
const openai = createOpenAI({
|
| 41 |
+
baseURL: 'https://api.deepseek.com/beta',
|
| 42 |
+
apiKey,
|
| 43 |
+
});
|
| 44 |
+
|
| 45 |
+
return openai(model);
|
| 46 |
+
}
|
| 47 |
+
}
|
app/lib/modules/llm/providers/google.ts
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
| 6 |
+
|
| 7 |
+
export default class GoogleProvider extends BaseProvider {
|
| 8 |
+
name = 'Google';
|
| 9 |
+
getApiKeyLink = 'https://aistudio.google.com/app/apikey';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
| 17 |
+
{ name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
| 18 |
+
{ name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
|
| 19 |
+
{ name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
|
| 20 |
+
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
|
| 21 |
+
{ name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
|
| 22 |
+
{ name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
|
| 23 |
+
];
|
| 24 |
+
|
| 25 |
+
getModelInstance(options: {
|
| 26 |
+
model: string;
|
| 27 |
+
serverEnv: any;
|
| 28 |
+
apiKeys?: Record<string, string>;
|
| 29 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 30 |
+
}): LanguageModelV1 {
|
| 31 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 32 |
+
|
| 33 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 34 |
+
apiKeys,
|
| 35 |
+
providerSettings: providerSettings?.[this.name],
|
| 36 |
+
serverEnv: serverEnv as any,
|
| 37 |
+
defaultBaseUrlKey: '',
|
| 38 |
+
defaultApiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
| 39 |
+
});
|
| 40 |
+
|
| 41 |
+
if (!apiKey) {
|
| 42 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
const google = createGoogleGenerativeAI({
|
| 46 |
+
apiKey,
|
| 47 |
+
});
|
| 48 |
+
|
| 49 |
+
return google(model);
|
| 50 |
+
}
|
| 51 |
+
}
|
app/lib/modules/llm/providers/groq.ts
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 6 |
+
|
| 7 |
+
export default class GroqProvider extends BaseProvider {
|
| 8 |
+
name = 'Groq';
|
| 9 |
+
getApiKeyLink = 'https://console.groq.com/keys';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'GROQ_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 17 |
+
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 18 |
+
{ name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 19 |
+
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 20 |
+
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 21 |
+
{ name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 22 |
+
];
|
| 23 |
+
|
| 24 |
+
getModelInstance(options: {
|
| 25 |
+
model: string;
|
| 26 |
+
serverEnv: Env;
|
| 27 |
+
apiKeys?: Record<string, string>;
|
| 28 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 29 |
+
}): LanguageModelV1 {
|
| 30 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 31 |
+
|
| 32 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 33 |
+
apiKeys,
|
| 34 |
+
providerSettings: providerSettings?.[this.name],
|
| 35 |
+
serverEnv: serverEnv as any,
|
| 36 |
+
defaultBaseUrlKey: '',
|
| 37 |
+
defaultApiTokenKey: 'GROQ_API_KEY',
|
| 38 |
+
});
|
| 39 |
+
|
| 40 |
+
if (!apiKey) {
|
| 41 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
const openai = createOpenAI({
|
| 45 |
+
baseURL: 'https://api.groq.com/openai/v1',
|
| 46 |
+
apiKey,
|
| 47 |
+
});
|
| 48 |
+
|
| 49 |
+
return openai(model);
|
| 50 |
+
}
|
| 51 |
+
}
|
app/lib/modules/llm/providers/huggingface.ts
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 6 |
+
|
| 7 |
+
export default class HuggingFaceProvider extends BaseProvider {
|
| 8 |
+
name = 'HuggingFace';
|
| 9 |
+
getApiKeyLink = 'https://huggingface.co/settings/tokens';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'HuggingFace_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{
|
| 17 |
+
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 18 |
+
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
| 19 |
+
provider: 'HuggingFace',
|
| 20 |
+
maxTokenAllowed: 8000,
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
name: '01-ai/Yi-1.5-34B-Chat',
|
| 24 |
+
label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
| 25 |
+
provider: 'HuggingFace',
|
| 26 |
+
maxTokenAllowed: 8000,
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
name: 'meta-llama/Llama-3.1-70B-Instruct',
|
| 30 |
+
label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
| 31 |
+
provider: 'HuggingFace',
|
| 32 |
+
maxTokenAllowed: 8000,
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
name: 'meta-llama/Llama-3.1-405B',
|
| 36 |
+
label: 'Llama-3.1-405B (HuggingFace)',
|
| 37 |
+
provider: 'HuggingFace',
|
| 38 |
+
maxTokenAllowed: 8000,
|
| 39 |
+
},
|
| 40 |
+
];
|
| 41 |
+
|
| 42 |
+
getModelInstance(options: {
|
| 43 |
+
model: string;
|
| 44 |
+
serverEnv: Env;
|
| 45 |
+
apiKeys?: Record<string, string>;
|
| 46 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 47 |
+
}): LanguageModelV1 {
|
| 48 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 49 |
+
|
| 50 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 51 |
+
apiKeys,
|
| 52 |
+
providerSettings: providerSettings?.[this.name],
|
| 53 |
+
serverEnv: serverEnv as any,
|
| 54 |
+
defaultBaseUrlKey: '',
|
| 55 |
+
defaultApiTokenKey: 'HuggingFace_API_KEY',
|
| 56 |
+
});
|
| 57 |
+
|
| 58 |
+
if (!apiKey) {
|
| 59 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
const openai = createOpenAI({
|
| 63 |
+
baseURL: 'https://api-inference.huggingface.co/v1/',
|
| 64 |
+
apiKey,
|
| 65 |
+
});
|
| 66 |
+
|
| 67 |
+
return openai(model);
|
| 68 |
+
}
|
| 69 |
+
}
|
app/lib/modules/llm/providers/lmstudio.ts
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 5 |
+
import type { LanguageModelV1 } from 'ai';
|
| 6 |
+
|
| 7 |
+
export default class LMStudioProvider extends BaseProvider {
|
| 8 |
+
name = 'LMStudio';
|
| 9 |
+
getApiKeyLink = 'https://lmstudio.ai/';
|
| 10 |
+
labelForGetApiKey = 'Get LMStudio';
|
| 11 |
+
icon = 'i-ph:cloud-arrow-down';
|
| 12 |
+
|
| 13 |
+
config = {
|
| 14 |
+
baseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
staticModels: ModelInfo[] = [];
|
| 18 |
+
|
| 19 |
+
async getDynamicModels(
|
| 20 |
+
apiKeys?: Record<string, string>,
|
| 21 |
+
settings?: IProviderSetting,
|
| 22 |
+
serverEnv: Record<string, string> = {},
|
| 23 |
+
): Promise<ModelInfo[]> {
|
| 24 |
+
try {
|
| 25 |
+
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
| 26 |
+
apiKeys,
|
| 27 |
+
providerSettings: settings,
|
| 28 |
+
serverEnv,
|
| 29 |
+
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
| 30 |
+
defaultApiTokenKey: '',
|
| 31 |
+
});
|
| 32 |
+
|
| 33 |
+
if (!baseUrl) {
|
| 34 |
+
return [];
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
const response = await fetch(`${baseUrl}/v1/models`);
|
| 38 |
+
const data = (await response.json()) as { data: Array<{ id: string }> };
|
| 39 |
+
|
| 40 |
+
return data.data.map((model) => ({
|
| 41 |
+
name: model.id,
|
| 42 |
+
label: model.id,
|
| 43 |
+
provider: this.name,
|
| 44 |
+
maxTokenAllowed: 8000,
|
| 45 |
+
}));
|
| 46 |
+
} catch (error: any) {
|
| 47 |
+
console.log('Error getting LMStudio models:', error.message);
|
| 48 |
+
|
| 49 |
+
return [];
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
getModelInstance: (options: {
|
| 53 |
+
model: string;
|
| 54 |
+
serverEnv: Env;
|
| 55 |
+
apiKeys?: Record<string, string>;
|
| 56 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 57 |
+
}) => LanguageModelV1 = (options) => {
|
| 58 |
+
const { apiKeys, providerSettings, serverEnv, model } = options;
|
| 59 |
+
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
| 60 |
+
apiKeys,
|
| 61 |
+
providerSettings,
|
| 62 |
+
serverEnv: serverEnv as any,
|
| 63 |
+
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
| 64 |
+
defaultApiTokenKey: '',
|
| 65 |
+
});
|
| 66 |
+
const lmstudio = createOpenAI({
|
| 67 |
+
baseUrl: `${baseUrl}/v1`,
|
| 68 |
+
apiKey: '',
|
| 69 |
+
});
|
| 70 |
+
|
| 71 |
+
return lmstudio(model);
|
| 72 |
+
};
|
| 73 |
+
}
|
app/lib/modules/llm/providers/mistral.ts
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createMistral } from '@ai-sdk/mistral';
|
| 6 |
+
|
| 7 |
+
export default class MistralProvider extends BaseProvider {
|
| 8 |
+
name = 'Mistral';
|
| 9 |
+
getApiKeyLink = 'https://console.mistral.ai/api-keys/';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'MISTRAL_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 17 |
+
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 18 |
+
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 19 |
+
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 20 |
+
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 21 |
+
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 22 |
+
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 23 |
+
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 24 |
+
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 25 |
+
];
|
| 26 |
+
|
| 27 |
+
getModelInstance(options: {
|
| 28 |
+
model: string;
|
| 29 |
+
serverEnv: Env;
|
| 30 |
+
apiKeys?: Record<string, string>;
|
| 31 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 32 |
+
}): LanguageModelV1 {
|
| 33 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 34 |
+
|
| 35 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 36 |
+
apiKeys,
|
| 37 |
+
providerSettings: providerSettings?.[this.name],
|
| 38 |
+
serverEnv: serverEnv as any,
|
| 39 |
+
defaultBaseUrlKey: '',
|
| 40 |
+
defaultApiTokenKey: 'MISTRAL_API_KEY',
|
| 41 |
+
});
|
| 42 |
+
|
| 43 |
+
if (!apiKey) {
|
| 44 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
const mistral = createMistral({
|
| 48 |
+
apiKey,
|
| 49 |
+
});
|
| 50 |
+
|
| 51 |
+
return mistral(model);
|
| 52 |
+
}
|
| 53 |
+
}
|
app/lib/modules/llm/providers/ollama.ts
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { ollama } from 'ollama-ai-provider';
|
| 6 |
+
|
| 7 |
+
interface OllamaModelDetails {
|
| 8 |
+
parent_model: string;
|
| 9 |
+
format: string;
|
| 10 |
+
family: string;
|
| 11 |
+
families: string[];
|
| 12 |
+
parameter_size: string;
|
| 13 |
+
quantization_level: string;
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
export interface OllamaModel {
|
| 17 |
+
name: string;
|
| 18 |
+
model: string;
|
| 19 |
+
modified_at: string;
|
| 20 |
+
size: number;
|
| 21 |
+
digest: string;
|
| 22 |
+
details: OllamaModelDetails;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
export interface OllamaApiResponse {
|
| 26 |
+
models: OllamaModel[];
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
export const DEFAULT_NUM_CTX = process?.env?.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768;
|
| 30 |
+
|
| 31 |
+
export default class OllamaProvider extends BaseProvider {
|
| 32 |
+
name = 'Ollama';
|
| 33 |
+
getApiKeyLink = 'https://ollama.com/download';
|
| 34 |
+
labelForGetApiKey = 'Download Ollama';
|
| 35 |
+
icon = 'i-ph:cloud-arrow-down';
|
| 36 |
+
|
| 37 |
+
config = {
|
| 38 |
+
baseUrlKey: 'OLLAMA_API_BASE_URL',
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
staticModels: ModelInfo[] = [];
|
| 42 |
+
|
| 43 |
+
async getDynamicModels(
|
| 44 |
+
apiKeys?: Record<string, string>,
|
| 45 |
+
settings?: IProviderSetting,
|
| 46 |
+
serverEnv: Record<string, string> = {},
|
| 47 |
+
): Promise<ModelInfo[]> {
|
| 48 |
+
try {
|
| 49 |
+
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
| 50 |
+
apiKeys,
|
| 51 |
+
providerSettings: settings,
|
| 52 |
+
serverEnv,
|
| 53 |
+
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
| 54 |
+
defaultApiTokenKey: '',
|
| 55 |
+
});
|
| 56 |
+
|
| 57 |
+
if (!baseUrl) {
|
| 58 |
+
return [];
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
const response = await fetch(`${baseUrl}/api/tags`);
|
| 62 |
+
const data = (await response.json()) as OllamaApiResponse;
|
| 63 |
+
|
| 64 |
+
// console.log({ ollamamodels: data.models });
|
| 65 |
+
|
| 66 |
+
return data.models.map((model: OllamaModel) => ({
|
| 67 |
+
name: model.name,
|
| 68 |
+
label: `${model.name} (${model.details.parameter_size})`,
|
| 69 |
+
provider: this.name,
|
| 70 |
+
maxTokenAllowed: 8000,
|
| 71 |
+
}));
|
| 72 |
+
} catch (e) {
|
| 73 |
+
console.error('Failed to get Ollama models:', e);
|
| 74 |
+
return [];
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
getModelInstance: (options: {
|
| 78 |
+
model: string;
|
| 79 |
+
serverEnv: Env;
|
| 80 |
+
apiKeys?: Record<string, string>;
|
| 81 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 82 |
+
}) => LanguageModelV1 = (options) => {
|
| 83 |
+
const { apiKeys, providerSettings, serverEnv, model } = options;
|
| 84 |
+
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
| 85 |
+
apiKeys,
|
| 86 |
+
providerSettings,
|
| 87 |
+
serverEnv: serverEnv as any,
|
| 88 |
+
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
| 89 |
+
defaultApiTokenKey: '',
|
| 90 |
+
});
|
| 91 |
+
const ollamaInstance = ollama(model, {
|
| 92 |
+
numCtx: DEFAULT_NUM_CTX,
|
| 93 |
+
}) as LanguageModelV1 & { config: any };
|
| 94 |
+
|
| 95 |
+
ollamaInstance.config.baseURL = `${baseUrl}/api`;
|
| 96 |
+
|
| 97 |
+
return ollamaInstance;
|
| 98 |
+
};
|
| 99 |
+
}
|
app/lib/modules/llm/providers/open-router.ts
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
| 6 |
+
|
| 7 |
+
interface OpenRouterModel {
|
| 8 |
+
name: string;
|
| 9 |
+
id: string;
|
| 10 |
+
context_length: number;
|
| 11 |
+
pricing: {
|
| 12 |
+
prompt: number;
|
| 13 |
+
completion: number;
|
| 14 |
+
};
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
interface OpenRouterModelsResponse {
|
| 18 |
+
data: OpenRouterModel[];
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
export default class OpenRouterProvider extends BaseProvider {
|
| 22 |
+
name = 'OpenRouter';
|
| 23 |
+
getApiKeyLink = 'https://openrouter.ai/settings/keys';
|
| 24 |
+
|
| 25 |
+
config = {
|
| 26 |
+
apiTokenKey: 'OPEN_ROUTER_API_KEY',
|
| 27 |
+
};
|
| 28 |
+
|
| 29 |
+
staticModels: ModelInfo[] = [
|
| 30 |
+
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 31 |
+
{
|
| 32 |
+
name: 'anthropic/claude-3.5-sonnet',
|
| 33 |
+
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
| 34 |
+
provider: 'OpenRouter',
|
| 35 |
+
maxTokenAllowed: 8000,
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
name: 'anthropic/claude-3-haiku',
|
| 39 |
+
label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
|
| 40 |
+
provider: 'OpenRouter',
|
| 41 |
+
maxTokenAllowed: 8000,
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
name: 'deepseek/deepseek-coder',
|
| 45 |
+
label: 'Deepseek-Coder V2 236B (OpenRouter)',
|
| 46 |
+
provider: 'OpenRouter',
|
| 47 |
+
maxTokenAllowed: 8000,
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
name: 'google/gemini-flash-1.5',
|
| 51 |
+
label: 'Google Gemini Flash 1.5 (OpenRouter)',
|
| 52 |
+
provider: 'OpenRouter',
|
| 53 |
+
maxTokenAllowed: 8000,
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
name: 'google/gemini-pro-1.5',
|
| 57 |
+
label: 'Google Gemini Pro 1.5 (OpenRouter)',
|
| 58 |
+
provider: 'OpenRouter',
|
| 59 |
+
maxTokenAllowed: 8000,
|
| 60 |
+
},
|
| 61 |
+
{ name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
| 62 |
+
{
|
| 63 |
+
name: 'mistralai/mistral-nemo',
|
| 64 |
+
label: 'OpenRouter Mistral Nemo (OpenRouter)',
|
| 65 |
+
provider: 'OpenRouter',
|
| 66 |
+
maxTokenAllowed: 8000,
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
name: 'qwen/qwen-110b-chat',
|
| 70 |
+
label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
|
| 71 |
+
provider: 'OpenRouter',
|
| 72 |
+
maxTokenAllowed: 8000,
|
| 73 |
+
},
|
| 74 |
+
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
|
| 75 |
+
];
|
| 76 |
+
|
| 77 |
+
async getDynamicModels(
|
| 78 |
+
_apiKeys?: Record<string, string>,
|
| 79 |
+
_settings?: IProviderSetting,
|
| 80 |
+
_serverEnv: Record<string, string> = {},
|
| 81 |
+
): Promise<ModelInfo[]> {
|
| 82 |
+
try {
|
| 83 |
+
const response = await fetch('https://openrouter.ai/api/v1/models', {
|
| 84 |
+
headers: {
|
| 85 |
+
'Content-Type': 'application/json',
|
| 86 |
+
},
|
| 87 |
+
});
|
| 88 |
+
|
| 89 |
+
const data = (await response.json()) as OpenRouterModelsResponse;
|
| 90 |
+
|
| 91 |
+
return data.data
|
| 92 |
+
.sort((a, b) => a.name.localeCompare(b.name))
|
| 93 |
+
.map((m) => ({
|
| 94 |
+
name: m.id,
|
| 95 |
+
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(2)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
| 96 |
+
provider: this.name,
|
| 97 |
+
maxTokenAllowed: 8000,
|
| 98 |
+
}));
|
| 99 |
+
} catch (error) {
|
| 100 |
+
console.error('Error getting OpenRouter models:', error);
|
| 101 |
+
return [];
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
getModelInstance(options: {
|
| 106 |
+
model: string;
|
| 107 |
+
serverEnv: Env;
|
| 108 |
+
apiKeys?: Record<string, string>;
|
| 109 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 110 |
+
}): LanguageModelV1 {
|
| 111 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 112 |
+
|
| 113 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 114 |
+
apiKeys,
|
| 115 |
+
providerSettings: providerSettings?.[this.name],
|
| 116 |
+
serverEnv: serverEnv as any,
|
| 117 |
+
defaultBaseUrlKey: '',
|
| 118 |
+
defaultApiTokenKey: 'OPEN_ROUTER_API_KEY',
|
| 119 |
+
});
|
| 120 |
+
|
| 121 |
+
if (!apiKey) {
|
| 122 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
const openRouter = createOpenRouter({
|
| 126 |
+
apiKey,
|
| 127 |
+
});
|
| 128 |
+
const instance = openRouter.chat(model) as LanguageModelV1;
|
| 129 |
+
|
| 130 |
+
return instance;
|
| 131 |
+
}
|
| 132 |
+
}
|
app/lib/modules/llm/providers/openai-like.ts
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
|
| 6 |
+
export default class OpenAILikeProvider extends BaseProvider {
|
| 7 |
+
name = 'OpenAILike';
|
| 8 |
+
getApiKeyLink = undefined;
|
| 9 |
+
|
| 10 |
+
config = {
|
| 11 |
+
baseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
| 12 |
+
apiTokenKey: 'OPENAI_LIKE_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [];
|
| 16 |
+
|
| 17 |
+
async getDynamicModels(
|
| 18 |
+
apiKeys?: Record<string, string>,
|
| 19 |
+
settings?: IProviderSetting,
|
| 20 |
+
serverEnv: Record<string, string> = {},
|
| 21 |
+
): Promise<ModelInfo[]> {
|
| 22 |
+
try {
|
| 23 |
+
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
| 24 |
+
apiKeys,
|
| 25 |
+
providerSettings: settings,
|
| 26 |
+
serverEnv,
|
| 27 |
+
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
| 28 |
+
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
| 29 |
+
});
|
| 30 |
+
|
| 31 |
+
if (!baseUrl || !apiKey) {
|
| 32 |
+
return [];
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
const response = await fetch(`${baseUrl}/models`, {
|
| 36 |
+
headers: {
|
| 37 |
+
Authorization: `Bearer ${apiKey}`,
|
| 38 |
+
},
|
| 39 |
+
});
|
| 40 |
+
|
| 41 |
+
const res = (await response.json()) as any;
|
| 42 |
+
|
| 43 |
+
return res.data.map((model: any) => ({
|
| 44 |
+
name: model.id,
|
| 45 |
+
label: model.id,
|
| 46 |
+
provider: this.name,
|
| 47 |
+
maxTokenAllowed: 8000,
|
| 48 |
+
}));
|
| 49 |
+
} catch (error) {
|
| 50 |
+
console.error('Error getting OpenAILike models:', error);
|
| 51 |
+
return [];
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
getModelInstance(options: {
|
| 56 |
+
model: string;
|
| 57 |
+
serverEnv: Env;
|
| 58 |
+
apiKeys?: Record<string, string>;
|
| 59 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 60 |
+
}): LanguageModelV1 {
|
| 61 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 62 |
+
|
| 63 |
+
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
| 64 |
+
apiKeys,
|
| 65 |
+
providerSettings: providerSettings?.[this.name],
|
| 66 |
+
serverEnv: serverEnv as any,
|
| 67 |
+
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
| 68 |
+
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
| 69 |
+
});
|
| 70 |
+
|
| 71 |
+
if (!baseUrl || !apiKey) {
|
| 72 |
+
throw new Error(`Missing configuration for ${this.name} provider`);
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
return getOpenAILikeModel(baseUrl, apiKey, model);
|
| 76 |
+
}
|
| 77 |
+
}
|
app/lib/modules/llm/providers/openai.ts
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 6 |
+
|
| 7 |
+
export default class OpenAIProvider extends BaseProvider {
|
| 8 |
+
name = 'OpenAI';
|
| 9 |
+
getApiKeyLink = 'https://platform.openai.com/api-keys';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'OPENAI_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 17 |
+
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 18 |
+
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 19 |
+
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 20 |
+
];
|
| 21 |
+
|
| 22 |
+
getModelInstance(options: {
|
| 23 |
+
model: string;
|
| 24 |
+
serverEnv: Env;
|
| 25 |
+
apiKeys?: Record<string, string>;
|
| 26 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 27 |
+
}): LanguageModelV1 {
|
| 28 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 29 |
+
|
| 30 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 31 |
+
apiKeys,
|
| 32 |
+
providerSettings: providerSettings?.[this.name],
|
| 33 |
+
serverEnv: serverEnv as any,
|
| 34 |
+
defaultBaseUrlKey: '',
|
| 35 |
+
defaultApiTokenKey: 'OPENAI_API_KEY',
|
| 36 |
+
});
|
| 37 |
+
|
| 38 |
+
if (!apiKey) {
|
| 39 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
const openai = createOpenAI({
|
| 43 |
+
apiKey,
|
| 44 |
+
});
|
| 45 |
+
|
| 46 |
+
return openai(model);
|
| 47 |
+
}
|
| 48 |
+
}
|
app/lib/modules/llm/providers/perplexity.ts
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 6 |
+
|
| 7 |
+
export default class PerplexityProvider extends BaseProvider {
|
| 8 |
+
name = 'Perplexity';
|
| 9 |
+
getApiKeyLink = 'https://www.perplexity.ai/settings/api';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'PERPLEXITY_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{
|
| 17 |
+
name: 'llama-3.1-sonar-small-128k-online',
|
| 18 |
+
label: 'Sonar Small Online',
|
| 19 |
+
provider: 'Perplexity',
|
| 20 |
+
maxTokenAllowed: 8192,
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
name: 'llama-3.1-sonar-large-128k-online',
|
| 24 |
+
label: 'Sonar Large Online',
|
| 25 |
+
provider: 'Perplexity',
|
| 26 |
+
maxTokenAllowed: 8192,
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
name: 'llama-3.1-sonar-huge-128k-online',
|
| 30 |
+
label: 'Sonar Huge Online',
|
| 31 |
+
provider: 'Perplexity',
|
| 32 |
+
maxTokenAllowed: 8192,
|
| 33 |
+
},
|
| 34 |
+
];
|
| 35 |
+
|
| 36 |
+
getModelInstance(options: {
|
| 37 |
+
model: string;
|
| 38 |
+
serverEnv: Env;
|
| 39 |
+
apiKeys?: Record<string, string>;
|
| 40 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 41 |
+
}): LanguageModelV1 {
|
| 42 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 43 |
+
|
| 44 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 45 |
+
apiKeys,
|
| 46 |
+
providerSettings: providerSettings?.[this.name],
|
| 47 |
+
serverEnv: serverEnv as any,
|
| 48 |
+
defaultBaseUrlKey: '',
|
| 49 |
+
defaultApiTokenKey: 'PERPLEXITY_API_KEY',
|
| 50 |
+
});
|
| 51 |
+
|
| 52 |
+
if (!apiKey) {
|
| 53 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
const perplexity = createOpenAI({
|
| 57 |
+
baseURL: 'https://api.perplexity.ai/',
|
| 58 |
+
apiKey,
|
| 59 |
+
});
|
| 60 |
+
|
| 61 |
+
return perplexity(model);
|
| 62 |
+
}
|
| 63 |
+
}
|
app/lib/modules/llm/providers/together.ts
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
|
| 6 |
+
export default class TogetherProvider extends BaseProvider {
|
| 7 |
+
name = 'Together';
|
| 8 |
+
getApiKeyLink = 'https://api.together.xyz/settings/api-keys';
|
| 9 |
+
|
| 10 |
+
config = {
|
| 11 |
+
baseUrlKey: 'TOGETHER_API_BASE_URL',
|
| 12 |
+
apiTokenKey: 'TOGETHER_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{
|
| 17 |
+
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 18 |
+
label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 19 |
+
provider: 'Together',
|
| 20 |
+
maxTokenAllowed: 8000,
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
| 24 |
+
label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
| 25 |
+
provider: 'Together',
|
| 26 |
+
maxTokenAllowed: 8000,
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
| 30 |
+
label: 'Mixtral 8x7B Instruct',
|
| 31 |
+
provider: 'Together',
|
| 32 |
+
maxTokenAllowed: 8192,
|
| 33 |
+
},
|
| 34 |
+
];
|
| 35 |
+
|
| 36 |
+
async getDynamicModels(
|
| 37 |
+
apiKeys?: Record<string, string>,
|
| 38 |
+
settings?: IProviderSetting,
|
| 39 |
+
serverEnv: Record<string, string> = {},
|
| 40 |
+
): Promise<ModelInfo[]> {
|
| 41 |
+
try {
|
| 42 |
+
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
| 43 |
+
apiKeys,
|
| 44 |
+
providerSettings: settings,
|
| 45 |
+
serverEnv,
|
| 46 |
+
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
| 47 |
+
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
| 48 |
+
});
|
| 49 |
+
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
|
| 50 |
+
|
| 51 |
+
if (!baseUrl || !apiKey) {
|
| 52 |
+
return [];
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// console.log({ baseUrl, apiKey });
|
| 56 |
+
|
| 57 |
+
const response = await fetch(`${baseUrl}/models`, {
|
| 58 |
+
headers: {
|
| 59 |
+
Authorization: `Bearer ${apiKey}`,
|
| 60 |
+
},
|
| 61 |
+
});
|
| 62 |
+
|
| 63 |
+
const res = (await response.json()) as any;
|
| 64 |
+
const data = (res || []).filter((model: any) => model.type === 'chat');
|
| 65 |
+
|
| 66 |
+
return data.map((m: any) => ({
|
| 67 |
+
name: m.id,
|
| 68 |
+
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
| 69 |
+
provider: this.name,
|
| 70 |
+
maxTokenAllowed: 8000,
|
| 71 |
+
}));
|
| 72 |
+
} catch (error: any) {
|
| 73 |
+
console.error('Error getting Together models:', error.message);
|
| 74 |
+
return [];
|
| 75 |
+
}
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
getModelInstance(options: {
|
| 79 |
+
model: string;
|
| 80 |
+
serverEnv: Env;
|
| 81 |
+
apiKeys?: Record<string, string>;
|
| 82 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 83 |
+
}): LanguageModelV1 {
|
| 84 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 85 |
+
|
| 86 |
+
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
| 87 |
+
apiKeys,
|
| 88 |
+
providerSettings: providerSettings?.[this.name],
|
| 89 |
+
serverEnv: serverEnv as any,
|
| 90 |
+
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
| 91 |
+
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
| 92 |
+
});
|
| 93 |
+
|
| 94 |
+
if (!baseUrl || !apiKey) {
|
| 95 |
+
throw new Error(`Missing configuration for ${this.name} provider`);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
return getOpenAILikeModel(baseUrl, apiKey, model);
|
| 99 |
+
}
|
| 100 |
+
}
|
app/lib/modules/llm/providers/xai.ts
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BaseProvider } from '~/lib/modules/llm/base-provider';
|
| 2 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 3 |
+
import type { IProviderSetting } from '~/types/model';
|
| 4 |
+
import type { LanguageModelV1 } from 'ai';
|
| 5 |
+
import { createOpenAI } from '@ai-sdk/openai';
|
| 6 |
+
|
| 7 |
+
export default class XAIProvider extends BaseProvider {
|
| 8 |
+
name = 'xAI';
|
| 9 |
+
getApiKeyLink = 'https://docs.x.ai/docs/quickstart#creating-an-api-key';
|
| 10 |
+
|
| 11 |
+
config = {
|
| 12 |
+
apiTokenKey: 'XAI_API_KEY',
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
staticModels: ModelInfo[] = [
|
| 16 |
+
{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 },
|
| 17 |
+
{ name: 'grok-2-1212', label: 'xAI Grok2 1212', provider: 'xAI', maxTokenAllowed: 8000 },
|
| 18 |
+
];
|
| 19 |
+
|
| 20 |
+
getModelInstance(options: {
|
| 21 |
+
model: string;
|
| 22 |
+
serverEnv: Env;
|
| 23 |
+
apiKeys?: Record<string, string>;
|
| 24 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 25 |
+
}): LanguageModelV1 {
|
| 26 |
+
const { model, serverEnv, apiKeys, providerSettings } = options;
|
| 27 |
+
|
| 28 |
+
const { apiKey } = this.getProviderBaseUrlAndKey({
|
| 29 |
+
apiKeys,
|
| 30 |
+
providerSettings: providerSettings?.[this.name],
|
| 31 |
+
serverEnv: serverEnv as any,
|
| 32 |
+
defaultBaseUrlKey: '',
|
| 33 |
+
defaultApiTokenKey: 'XAI_API_KEY',
|
| 34 |
+
});
|
| 35 |
+
|
| 36 |
+
if (!apiKey) {
|
| 37 |
+
throw new Error(`Missing API key for ${this.name} provider`);
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
const openai = createOpenAI({
|
| 41 |
+
baseURL: 'https://api.x.ai/v1',
|
| 42 |
+
apiKey,
|
| 43 |
+
});
|
| 44 |
+
|
| 45 |
+
return openai(model);
|
| 46 |
+
}
|
| 47 |
+
}
|
app/lib/modules/llm/registry.ts
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import AnthropicProvider from './providers/anthropic';
|
| 2 |
+
import CohereProvider from './providers/cohere';
|
| 3 |
+
import DeepseekProvider from './providers/deepseek';
|
| 4 |
+
import GoogleProvider from './providers/google';
|
| 5 |
+
import GroqProvider from './providers/groq';
|
| 6 |
+
import HuggingFaceProvider from './providers/huggingface';
|
| 7 |
+
import LMStudioProvider from './providers/lmstudio';
|
| 8 |
+
import MistralProvider from './providers/mistral';
|
| 9 |
+
import OllamaProvider from './providers/ollama';
|
| 10 |
+
import OpenRouterProvider from './providers/open-router';
|
| 11 |
+
import OpenAILikeProvider from './providers/openai-like';
|
| 12 |
+
import OpenAIProvider from './providers/openai';
|
| 13 |
+
import PerplexityProvider from './providers/perplexity';
|
| 14 |
+
import TogetherProvider from './providers/together';
|
| 15 |
+
import XAIProvider from './providers/xai';
|
| 16 |
+
|
| 17 |
+
export {
|
| 18 |
+
AnthropicProvider,
|
| 19 |
+
CohereProvider,
|
| 20 |
+
DeepseekProvider,
|
| 21 |
+
GoogleProvider,
|
| 22 |
+
GroqProvider,
|
| 23 |
+
HuggingFaceProvider,
|
| 24 |
+
MistralProvider,
|
| 25 |
+
OllamaProvider,
|
| 26 |
+
OpenAIProvider,
|
| 27 |
+
OpenRouterProvider,
|
| 28 |
+
OpenAILikeProvider,
|
| 29 |
+
PerplexityProvider,
|
| 30 |
+
XAIProvider,
|
| 31 |
+
TogetherProvider,
|
| 32 |
+
LMStudioProvider,
|
| 33 |
+
};
|
app/lib/modules/llm/types.ts
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import type { LanguageModelV1 } from 'ai';
|
| 2 |
+
import type { IProviderSetting } from '~/types/model';
|
| 3 |
+
|
| 4 |
+
export interface ModelInfo {
|
| 5 |
+
name: string;
|
| 6 |
+
label: string;
|
| 7 |
+
provider: string;
|
| 8 |
+
maxTokenAllowed: number;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
export interface ProviderInfo {
|
| 12 |
+
name: string;
|
| 13 |
+
staticModels: ModelInfo[];
|
| 14 |
+
getDynamicModels?: (
|
| 15 |
+
apiKeys?: Record<string, string>,
|
| 16 |
+
settings?: IProviderSetting,
|
| 17 |
+
serverEnv?: Record<string, string>,
|
| 18 |
+
) => Promise<ModelInfo[]>;
|
| 19 |
+
getModelInstance: (options: {
|
| 20 |
+
model: string;
|
| 21 |
+
serverEnv: Env;
|
| 22 |
+
apiKeys?: Record<string, string>;
|
| 23 |
+
providerSettings?: Record<string, IProviderSetting>;
|
| 24 |
+
}) => LanguageModelV1;
|
| 25 |
+
getApiKeyLink?: string;
|
| 26 |
+
labelForGetApiKey?: string;
|
| 27 |
+
icon?: string;
|
| 28 |
+
}
|
| 29 |
+
export interface ProviderConfig {
|
| 30 |
+
baseUrlKey?: string;
|
| 31 |
+
apiTokenKey?: string;
|
| 32 |
+
}
|
app/types/model.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import type { ModelInfo } from '~/
|
| 2 |
|
| 3 |
export type ProviderInfo = {
|
| 4 |
staticModels: ModelInfo[];
|
|
|
|
| 1 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
| 2 |
|
| 3 |
export type ProviderInfo = {
|
| 4 |
staticModels: ModelInfo[];
|
app/utils/constants.ts
CHANGED
|
@@ -1,8 +1,7 @@
|
|
| 1 |
-
import
|
| 2 |
-
|
| 3 |
-
import
|
| 4 |
-
import {
|
| 5 |
-
import { logStore } from '~/lib/stores/logs';
|
| 6 |
|
| 7 |
export const WORK_DIR_NAME = 'project';
|
| 8 |
export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
|
|
@@ -12,619 +11,334 @@ export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/;
|
|
| 12 |
export const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
|
| 13 |
export const PROMPT_COOKIE_KEY = 'cachedPrompt';
|
| 14 |
|
| 15 |
-
const
|
| 16 |
-
|
| 17 |
-
const PROVIDER_LIST
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
{
|
| 28 |
-
name: 'claude-3-5-sonnet-
|
| 29 |
-
label: 'Claude 3.5 Sonnet (
|
| 30 |
-
provider: 'Anthropic',
|
| 31 |
-
maxTokenAllowed: 8000,
|
| 32 |
-
},
|
| 33 |
-
{
|
| 34 |
-
name: 'claude-3-5-
|
| 35 |
-
label: 'Claude 3.5
|
| 36 |
-
provider: 'Anthropic',
|
| 37 |
-
maxTokenAllowed: 8000,
|
| 38 |
-
},
|
| 39 |
-
{
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
getApiKeyLink: 'https://
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
{ name: 'command-
|
| 68 |
-
{ name: 'command-
|
| 69 |
-
{ name: '
|
| 70 |
-
{ name: '
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
},
|
| 84 |
-
{
|
| 85 |
-
name: 'anthropic/claude-3-
|
| 86 |
-
label: 'Anthropic: Claude 3
|
| 87 |
-
provider: 'OpenRouter',
|
| 88 |
-
maxTokenAllowed: 8000,
|
| 89 |
-
},
|
| 90 |
-
{
|
| 91 |
-
name: '
|
| 92 |
-
label: '
|
| 93 |
-
provider: 'OpenRouter',
|
| 94 |
-
maxTokenAllowed: 8000,
|
| 95 |
-
},
|
| 96 |
-
{
|
| 97 |
-
name: '
|
| 98 |
-
label: '
|
| 99 |
-
provider: 'OpenRouter',
|
| 100 |
-
maxTokenAllowed: 8000,
|
| 101 |
-
},
|
| 102 |
-
{
|
| 103 |
-
name: 'google/gemini-
|
| 104 |
-
label: 'Google Gemini
|
| 105 |
-
provider: 'OpenRouter',
|
| 106 |
-
maxTokenAllowed: 8000,
|
| 107 |
-
},
|
| 108 |
-
{
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
},
|
| 115 |
-
{
|
| 116 |
-
name: '
|
| 117 |
-
label: 'OpenRouter
|
| 118 |
-
provider: 'OpenRouter',
|
| 119 |
-
maxTokenAllowed: 8000,
|
| 120 |
-
},
|
| 121 |
-
{
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
{ name: 'gemini-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
{
|
| 161 |
-
name: '
|
| 162 |
-
label: '
|
| 163 |
-
provider: 'HuggingFace',
|
| 164 |
-
maxTokenAllowed: 8000,
|
| 165 |
-
},
|
| 166 |
-
{
|
| 167 |
-
name: '
|
| 168 |
-
label: '
|
| 169 |
-
provider: 'HuggingFace',
|
| 170 |
-
maxTokenAllowed: 8000,
|
| 171 |
-
},
|
| 172 |
-
{
|
| 173 |
-
name: '
|
| 174 |
-
label: '
|
| 175 |
-
provider: 'HuggingFace',
|
| 176 |
-
maxTokenAllowed: 8000,
|
| 177 |
-
},
|
| 178 |
-
{
|
| 179 |
-
name: '
|
| 180 |
-
label: '
|
| 181 |
-
provider: 'HuggingFace',
|
| 182 |
-
maxTokenAllowed: 8000,
|
| 183 |
-
},
|
| 184 |
-
{
|
| 185 |
-
name: 'Qwen/Qwen2.5-
|
| 186 |
-
label: 'Qwen2.5-
|
| 187 |
-
provider: 'HuggingFace',
|
| 188 |
-
maxTokenAllowed: 8000,
|
| 189 |
-
},
|
| 190 |
-
{
|
| 191 |
-
name: '
|
| 192 |
-
label: '
|
| 193 |
-
provider: 'HuggingFace',
|
| 194 |
-
maxTokenAllowed: 8000,
|
| 195 |
-
},
|
| 196 |
-
{
|
| 197 |
-
name: 'meta-llama/Llama-3.1-
|
| 198 |
-
label: 'Llama-3.1-
|
| 199 |
-
provider: 'HuggingFace',
|
| 200 |
-
maxTokenAllowed: 8000,
|
| 201 |
-
},
|
| 202 |
-
{
|
| 203 |
-
name: '
|
| 204 |
-
label: '
|
| 205 |
-
provider: 'HuggingFace',
|
| 206 |
-
maxTokenAllowed: 8000,
|
| 207 |
-
},
|
| 208 |
-
{
|
| 209 |
-
name: '
|
| 210 |
-
label: '
|
| 211 |
-
provider: 'HuggingFace',
|
| 212 |
-
maxTokenAllowed: 8000,
|
| 213 |
-
},
|
| 214 |
-
{
|
| 215 |
-
name: '
|
| 216 |
-
label: '
|
| 217 |
-
provider: 'HuggingFace',
|
| 218 |
-
maxTokenAllowed: 8000,
|
| 219 |
-
},
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
{ name: 'open-
|
| 256 |
-
{ name: 'open-
|
| 257 |
-
{ name: '
|
| 258 |
-
{ name: '
|
| 259 |
-
{ name: '
|
| 260 |
-
{ name: '
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
]
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
apiTokenKey:
|
| 333 |
-
},
|
| 334 |
-
HuggingFace: {
|
| 335 |
-
apiTokenKey: 'HuggingFace_API_KEY',
|
| 336 |
-
},
|
| 337 |
-
OpenRouter: {
|
| 338 |
-
apiTokenKey: 'OPEN_ROUTER_API_KEY',
|
| 339 |
-
},
|
| 340 |
-
Google: {
|
| 341 |
-
apiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY',
|
| 342 |
-
},
|
| 343 |
-
OpenAILike: {
|
| 344 |
-
baseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
| 345 |
-
apiTokenKey: 'OPENAI_LIKE_API_KEY',
|
| 346 |
-
},
|
| 347 |
-
Together: {
|
| 348 |
-
baseUrlKey: 'TOGETHER_API_BASE_URL',
|
| 349 |
-
apiTokenKey: 'TOGETHER_API_KEY',
|
| 350 |
-
},
|
| 351 |
-
Deepseek: {
|
| 352 |
-
apiTokenKey: 'DEEPSEEK_API_KEY',
|
| 353 |
-
},
|
| 354 |
-
Mistral: {
|
| 355 |
-
apiTokenKey: 'MISTRAL_API_KEY',
|
| 356 |
-
},
|
| 357 |
-
LMStudio: {
|
| 358 |
-
baseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
| 359 |
-
},
|
| 360 |
-
xAI: {
|
| 361 |
-
apiTokenKey: 'XAI_API_KEY',
|
| 362 |
-
},
|
| 363 |
-
Cohere: {
|
| 364 |
-
apiTokenKey: 'COHERE_API_KEY',
|
| 365 |
-
},
|
| 366 |
-
Perplexity: {
|
| 367 |
-
apiTokenKey: 'PERPLEXITY_API_KEY',
|
| 368 |
-
},
|
| 369 |
-
Ollama: {
|
| 370 |
-
baseUrlKey: 'OLLAMA_API_BASE_URL',
|
| 371 |
-
},
|
| 372 |
-
};
|
| 373 |
-
|
| 374 |
-
export const getProviderBaseUrlAndKey = (options: {
|
| 375 |
-
provider: string;
|
| 376 |
-
apiKeys?: Record<string, string>;
|
| 377 |
-
providerSettings?: IProviderSetting;
|
| 378 |
-
serverEnv?: Record<string, string>;
|
| 379 |
-
defaultBaseUrlKey: string;
|
| 380 |
-
defaultApiTokenKey: string;
|
| 381 |
-
}) => {
|
| 382 |
-
const { provider, apiKeys, providerSettings, serverEnv, defaultBaseUrlKey, defaultApiTokenKey } = options;
|
| 383 |
-
let settingsBaseUrl = providerSettings?.baseUrl;
|
| 384 |
-
|
| 385 |
-
if (settingsBaseUrl && settingsBaseUrl.length == 0) {
|
| 386 |
-
settingsBaseUrl = undefined;
|
| 387 |
-
}
|
| 388 |
-
|
| 389 |
-
const baseUrlKey = providerBaseUrlEnvKeys[provider]?.baseUrlKey || defaultBaseUrlKey;
|
| 390 |
-
const baseUrl = settingsBaseUrl || serverEnv?.[baseUrlKey] || process.env[baseUrlKey] || import.meta.env[baseUrlKey];
|
| 391 |
-
|
| 392 |
-
const apiTokenKey = providerBaseUrlEnvKeys[provider]?.apiTokenKey || defaultApiTokenKey;
|
| 393 |
-
const apiKey =
|
| 394 |
-
apiKeys?.[provider] || serverEnv?.[apiTokenKey] || process.env[apiTokenKey] || import.meta.env[apiTokenKey];
|
| 395 |
-
|
| 396 |
-
return {
|
| 397 |
-
baseUrl,
|
| 398 |
-
apiKey,
|
| 399 |
};
|
| 400 |
-
};
|
| 401 |
-
export const DEFAULT_PROVIDER = PROVIDER_LIST[0];
|
| 402 |
-
|
| 403 |
-
const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat();
|
| 404 |
-
|
| 405 |
-
export let MODEL_LIST: ModelInfo[] = [...staticModels];
|
| 406 |
|
|
|
|
| 407 |
export async function getModelList(options: {
|
| 408 |
apiKeys?: Record<string, string>;
|
| 409 |
providerSettings?: Record<string, IProviderSetting>;
|
| 410 |
serverEnv?: Record<string, string>;
|
| 411 |
}) {
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
MODEL_LIST = [
|
| 415 |
-
...(
|
| 416 |
-
await Promise.all(
|
| 417 |
-
PROVIDER_LIST.filter(
|
| 418 |
-
(p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels,
|
| 419 |
-
).map((p) => p.getDynamicModels(p.name, apiKeys, providerSettings?.[p.name], serverEnv)),
|
| 420 |
-
)
|
| 421 |
-
).flat(),
|
| 422 |
-
...staticModels,
|
| 423 |
-
];
|
| 424 |
-
|
| 425 |
-
return MODEL_LIST;
|
| 426 |
-
}
|
| 427 |
-
|
| 428 |
-
async function getTogetherModels(
|
| 429 |
-
name: string,
|
| 430 |
-
apiKeys?: Record<string, string>,
|
| 431 |
-
settings?: IProviderSetting,
|
| 432 |
-
serverEnv: Record<string, string> = {},
|
| 433 |
-
): Promise<ModelInfo[]> {
|
| 434 |
-
try {
|
| 435 |
-
const { baseUrl, apiKey } = getProviderBaseUrlAndKey({
|
| 436 |
-
provider: name,
|
| 437 |
-
apiKeys,
|
| 438 |
-
providerSettings: settings,
|
| 439 |
-
serverEnv,
|
| 440 |
-
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
| 441 |
-
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
| 442 |
-
});
|
| 443 |
-
|
| 444 |
-
if (!baseUrl) {
|
| 445 |
-
return [];
|
| 446 |
-
}
|
| 447 |
-
|
| 448 |
-
if (!apiKey) {
|
| 449 |
-
return [];
|
| 450 |
-
}
|
| 451 |
-
|
| 452 |
-
const response = await fetch(`${baseUrl}/models`, {
|
| 453 |
-
headers: {
|
| 454 |
-
Authorization: `Bearer ${apiKey}`,
|
| 455 |
-
},
|
| 456 |
-
});
|
| 457 |
-
const res = (await response.json()) as any;
|
| 458 |
-
const data: any[] = (res || []).filter((model: any) => model.type == 'chat');
|
| 459 |
-
|
| 460 |
-
return data.map((m: any) => ({
|
| 461 |
-
name: m.id,
|
| 462 |
-
label: `${m.display_name} - in:$${m.pricing.input.toFixed(
|
| 463 |
-
2,
|
| 464 |
-
)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
| 465 |
-
provider: name,
|
| 466 |
-
maxTokenAllowed: 8000,
|
| 467 |
-
}));
|
| 468 |
-
} catch (e) {
|
| 469 |
-
console.error('Error getting OpenAILike models:', e);
|
| 470 |
-
return [];
|
| 471 |
-
}
|
| 472 |
-
}
|
| 473 |
-
|
| 474 |
-
const getOllamaBaseUrl = (name: string, settings?: IProviderSetting, serverEnv: Record<string, string> = {}) => {
|
| 475 |
-
const { baseUrl } = getProviderBaseUrlAndKey({
|
| 476 |
-
provider: name,
|
| 477 |
-
providerSettings: settings,
|
| 478 |
-
serverEnv,
|
| 479 |
-
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
| 480 |
-
defaultApiTokenKey: '',
|
| 481 |
-
});
|
| 482 |
-
|
| 483 |
-
// Check if we're in the browser
|
| 484 |
-
if (typeof window !== 'undefined') {
|
| 485 |
-
// Frontend always uses localhost
|
| 486 |
-
return baseUrl;
|
| 487 |
-
}
|
| 488 |
-
|
| 489 |
-
// Backend: Check if we're running in Docker
|
| 490 |
-
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
| 491 |
-
|
| 492 |
-
return isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
|
| 493 |
-
};
|
| 494 |
-
|
| 495 |
-
async function getOllamaModels(
|
| 496 |
-
name: string,
|
| 497 |
-
_apiKeys?: Record<string, string>,
|
| 498 |
-
settings?: IProviderSetting,
|
| 499 |
-
serverEnv: Record<string, string> = {},
|
| 500 |
-
): Promise<ModelInfo[]> {
|
| 501 |
-
try {
|
| 502 |
-
const baseUrl = getOllamaBaseUrl(name, settings, serverEnv);
|
| 503 |
-
|
| 504 |
-
if (!baseUrl) {
|
| 505 |
-
return [];
|
| 506 |
-
}
|
| 507 |
-
|
| 508 |
-
const response = await fetch(`${baseUrl}/api/tags`);
|
| 509 |
-
const data = (await response.json()) as OllamaApiResponse;
|
| 510 |
-
|
| 511 |
-
return data.models.map((model: OllamaModel) => ({
|
| 512 |
-
name: model.name,
|
| 513 |
-
label: `${model.name} (${model.details.parameter_size})`,
|
| 514 |
-
provider: 'Ollama',
|
| 515 |
-
maxTokenAllowed: 8000,
|
| 516 |
-
}));
|
| 517 |
-
} catch (e: any) {
|
| 518 |
-
logStore.logError('Failed to get Ollama models', e, { baseUrl: settings?.baseUrl });
|
| 519 |
-
logger.warn('Failed to get Ollama models: ', e.message || '');
|
| 520 |
-
|
| 521 |
-
return [];
|
| 522 |
-
}
|
| 523 |
-
}
|
| 524 |
-
|
| 525 |
-
async function getOpenAILikeModels(
|
| 526 |
-
name: string,
|
| 527 |
-
apiKeys?: Record<string, string>,
|
| 528 |
-
settings?: IProviderSetting,
|
| 529 |
-
serverEnv: Record<string, string> = {},
|
| 530 |
-
): Promise<ModelInfo[]> {
|
| 531 |
-
try {
|
| 532 |
-
const { baseUrl, apiKey } = getProviderBaseUrlAndKey({
|
| 533 |
-
provider: name,
|
| 534 |
-
apiKeys,
|
| 535 |
-
providerSettings: settings,
|
| 536 |
-
serverEnv,
|
| 537 |
-
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
| 538 |
-
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
| 539 |
-
});
|
| 540 |
-
|
| 541 |
-
if (!baseUrl) {
|
| 542 |
-
return [];
|
| 543 |
-
}
|
| 544 |
-
|
| 545 |
-
const response = await fetch(`${baseUrl}/models`, {
|
| 546 |
-
headers: {
|
| 547 |
-
Authorization: `Bearer ${apiKey}`,
|
| 548 |
-
},
|
| 549 |
-
});
|
| 550 |
-
const res = (await response.json()) as any;
|
| 551 |
-
|
| 552 |
-
return res.data.map((model: any) => ({
|
| 553 |
-
name: model.id,
|
| 554 |
-
label: model.id,
|
| 555 |
-
provider: name,
|
| 556 |
-
}));
|
| 557 |
-
} catch (e) {
|
| 558 |
-
console.error('Error getting OpenAILike models:', e);
|
| 559 |
-
return [];
|
| 560 |
-
}
|
| 561 |
-
}
|
| 562 |
-
|
| 563 |
-
type OpenRouterModelsResponse = {
|
| 564 |
-
data: {
|
| 565 |
-
name: string;
|
| 566 |
-
id: string;
|
| 567 |
-
context_length: number;
|
| 568 |
-
pricing: {
|
| 569 |
-
prompt: number;
|
| 570 |
-
completion: number;
|
| 571 |
-
};
|
| 572 |
-
}[];
|
| 573 |
-
};
|
| 574 |
-
|
| 575 |
-
async function getOpenRouterModels(): Promise<ModelInfo[]> {
|
| 576 |
-
const data: OpenRouterModelsResponse = await (
|
| 577 |
-
await fetch('https://openrouter.ai/api/v1/models', {
|
| 578 |
-
headers: {
|
| 579 |
-
'Content-Type': 'application/json',
|
| 580 |
-
},
|
| 581 |
-
})
|
| 582 |
-
).json();
|
| 583 |
-
|
| 584 |
-
return data.data
|
| 585 |
-
.sort((a, b) => a.name.localeCompare(b.name))
|
| 586 |
-
.map((m) => ({
|
| 587 |
-
name: m.id,
|
| 588 |
-
label: `${m.name} - in:$${(m.pricing.prompt * 1_000_000).toFixed(
|
| 589 |
-
2,
|
| 590 |
-
)} out:$${(m.pricing.completion * 1_000_000).toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
| 591 |
-
provider: 'OpenRouter',
|
| 592 |
-
maxTokenAllowed: 8000,
|
| 593 |
-
}));
|
| 594 |
-
}
|
| 595 |
-
|
| 596 |
-
async function getLMStudioModels(
|
| 597 |
-
name: string,
|
| 598 |
-
apiKeys?: Record<string, string>,
|
| 599 |
-
settings?: IProviderSetting,
|
| 600 |
-
serverEnv: Record<string, string> = {},
|
| 601 |
-
): Promise<ModelInfo[]> {
|
| 602 |
-
try {
|
| 603 |
-
const { baseUrl } = getProviderBaseUrlAndKey({
|
| 604 |
-
provider: name,
|
| 605 |
-
apiKeys,
|
| 606 |
-
providerSettings: settings,
|
| 607 |
-
serverEnv,
|
| 608 |
-
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
| 609 |
-
defaultApiTokenKey: '',
|
| 610 |
-
});
|
| 611 |
-
|
| 612 |
-
if (!baseUrl) {
|
| 613 |
-
return [];
|
| 614 |
-
}
|
| 615 |
-
|
| 616 |
-
const response = await fetch(`${baseUrl}/v1/models`);
|
| 617 |
-
const data = (await response.json()) as any;
|
| 618 |
-
|
| 619 |
-
return data.data.map((model: any) => ({
|
| 620 |
-
name: model.id,
|
| 621 |
-
label: model.id,
|
| 622 |
-
provider: 'LMStudio',
|
| 623 |
-
}));
|
| 624 |
-
} catch (e: any) {
|
| 625 |
-
logStore.logError('Failed to get LMStudio models', e, { baseUrl: settings?.baseUrl });
|
| 626 |
-
return [];
|
| 627 |
-
}
|
| 628 |
}
|
| 629 |
|
| 630 |
async function initializeModelList(options: {
|
|
@@ -632,46 +346,16 @@ async function initializeModelList(options: {
|
|
| 632 |
providerSettings?: Record<string, IProviderSetting>;
|
| 633 |
apiKeys?: Record<string, string>;
|
| 634 |
}): Promise<ModelInfo[]> {
|
| 635 |
-
const { providerSettings, apiKeys
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
if (storedApiKeys) {
|
| 643 |
-
const parsedKeys = JSON.parse(storedApiKeys);
|
| 644 |
-
|
| 645 |
-
if (typeof parsedKeys === 'object' && parsedKeys !== null) {
|
| 646 |
-
apiKeys = parsedKeys;
|
| 647 |
-
}
|
| 648 |
-
}
|
| 649 |
-
} catch (error: any) {
|
| 650 |
-
logStore.logError('Failed to fetch API keys from cookies', error);
|
| 651 |
-
logger.warn(`Failed to fetch apikeys from cookies: ${error?.message}`);
|
| 652 |
-
}
|
| 653 |
-
}
|
| 654 |
-
|
| 655 |
-
MODEL_LIST = [
|
| 656 |
-
...(
|
| 657 |
-
await Promise.all(
|
| 658 |
-
PROVIDER_LIST.filter(
|
| 659 |
-
(p): p is ProviderInfo & { getDynamicModels: () => Promise<ModelInfo[]> } => !!p.getDynamicModels,
|
| 660 |
-
).map((p) => p.getDynamicModels(p.name, apiKeys, providerSettings?.[p.name], env)),
|
| 661 |
-
)
|
| 662 |
-
).flat(),
|
| 663 |
-
...staticModels,
|
| 664 |
-
];
|
| 665 |
|
| 666 |
-
return
|
| 667 |
}
|
| 668 |
|
| 669 |
// initializeModelList({})
|
| 670 |
-
export {
|
| 671 |
-
getOllamaModels,
|
| 672 |
-
getOpenAILikeModels,
|
| 673 |
-
getLMStudioModels,
|
| 674 |
-
initializeModelList,
|
| 675 |
-
getOpenRouterModels,
|
| 676 |
-
PROVIDER_LIST,
|
| 677 |
-
};
|
|
|
|
| 1 |
+
import type { IProviderSetting } from '~/types/model';
|
| 2 |
+
|
| 3 |
+
import { LLMManager } from '~/lib/modules/llm/manager';
|
| 4 |
+
import type { ModelInfo } from '~/lib/modules/llm/types';
|
|
|
|
| 5 |
|
| 6 |
export const WORK_DIR_NAME = 'project';
|
| 7 |
export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
|
|
|
|
| 11 |
export const DEFAULT_MODEL = 'claude-3-5-sonnet-latest';
|
| 12 |
export const PROMPT_COOKIE_KEY = 'cachedPrompt';
|
| 13 |
|
| 14 |
+
const llmManager = LLMManager.getInstance(import.meta.env);
|
| 15 |
+
|
| 16 |
+
export const PROVIDER_LIST = llmManager.getAllProviders();
|
| 17 |
+
export const DEFAULT_PROVIDER = llmManager.getDefaultProvider();
|
| 18 |
+
|
| 19 |
+
let MODEL_LIST = llmManager.getModelList();
|
| 20 |
+
|
| 21 |
+
/*
|
| 22 |
+
*const PROVIDER_LIST_OLD: ProviderInfo[] = [
|
| 23 |
+
* {
|
| 24 |
+
* name: 'Anthropic',
|
| 25 |
+
* staticModels: [
|
| 26 |
+
* {
|
| 27 |
+
* name: 'claude-3-5-sonnet-latest',
|
| 28 |
+
* label: 'Claude 3.5 Sonnet (new)',
|
| 29 |
+
* provider: 'Anthropic',
|
| 30 |
+
* maxTokenAllowed: 8000,
|
| 31 |
+
* },
|
| 32 |
+
* {
|
| 33 |
+
* name: 'claude-3-5-sonnet-20240620',
|
| 34 |
+
* label: 'Claude 3.5 Sonnet (old)',
|
| 35 |
+
* provider: 'Anthropic',
|
| 36 |
+
* maxTokenAllowed: 8000,
|
| 37 |
+
* },
|
| 38 |
+
* {
|
| 39 |
+
* name: 'claude-3-5-haiku-latest',
|
| 40 |
+
* label: 'Claude 3.5 Haiku (new)',
|
| 41 |
+
* provider: 'Anthropic',
|
| 42 |
+
* maxTokenAllowed: 8000,
|
| 43 |
+
* },
|
| 44 |
+
* { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
| 45 |
+
* { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
| 46 |
+
* { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
| 47 |
+
* ],
|
| 48 |
+
* getApiKeyLink: 'https://console.anthropic.com/settings/keys',
|
| 49 |
+
* },
|
| 50 |
+
* {
|
| 51 |
+
* name: 'Ollama',
|
| 52 |
+
* staticModels: [],
|
| 53 |
+
* getDynamicModels: getOllamaModels,
|
| 54 |
+
* getApiKeyLink: 'https://ollama.com/download',
|
| 55 |
+
* labelForGetApiKey: 'Download Ollama',
|
| 56 |
+
* icon: 'i-ph:cloud-arrow-down',
|
| 57 |
+
* },
|
| 58 |
+
* {
|
| 59 |
+
* name: 'OpenAILike',
|
| 60 |
+
* staticModels: [],
|
| 61 |
+
* getDynamicModels: getOpenAILikeModels,
|
| 62 |
+
* },
|
| 63 |
+
* {
|
| 64 |
+
* name: 'Cohere',
|
| 65 |
+
* staticModels: [
|
| 66 |
+
* { name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 67 |
+
* { name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 68 |
+
* { name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 69 |
+
* { name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 70 |
+
* { name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 71 |
+
* { name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 72 |
+
* { name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 73 |
+
* { name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 74 |
+
* { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 75 |
+
* { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
| 76 |
+
* ],
|
| 77 |
+
* getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
|
| 78 |
+
* },
|
| 79 |
+
* {
|
| 80 |
+
* name: 'OpenRouter',
|
| 81 |
+
* staticModels: [
|
| 82 |
+
* { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 83 |
+
* {
|
| 84 |
+
* name: 'anthropic/claude-3.5-sonnet',
|
| 85 |
+
* label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
| 86 |
+
* provider: 'OpenRouter',
|
| 87 |
+
* maxTokenAllowed: 8000,
|
| 88 |
+
* },
|
| 89 |
+
* {
|
| 90 |
+
* name: 'anthropic/claude-3-haiku',
|
| 91 |
+
* label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
|
| 92 |
+
* provider: 'OpenRouter',
|
| 93 |
+
* maxTokenAllowed: 8000,
|
| 94 |
+
* },
|
| 95 |
+
* {
|
| 96 |
+
* name: 'deepseek/deepseek-coder',
|
| 97 |
+
* label: 'Deepseek-Coder V2 236B (OpenRouter)',
|
| 98 |
+
* provider: 'OpenRouter',
|
| 99 |
+
* maxTokenAllowed: 8000,
|
| 100 |
+
* },
|
| 101 |
+
* {
|
| 102 |
+
* name: 'google/gemini-flash-1.5',
|
| 103 |
+
* label: 'Google Gemini Flash 1.5 (OpenRouter)',
|
| 104 |
+
* provider: 'OpenRouter',
|
| 105 |
+
* maxTokenAllowed: 8000,
|
| 106 |
+
* },
|
| 107 |
+
* {
|
| 108 |
+
* name: 'google/gemini-pro-1.5',
|
| 109 |
+
* label: 'Google Gemini Pro 1.5 (OpenRouter)',
|
| 110 |
+
* provider: 'OpenRouter',
|
| 111 |
+
* maxTokenAllowed: 8000,
|
| 112 |
+
* },
|
| 113 |
+
* { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
| 114 |
+
* {
|
| 115 |
+
* name: 'mistralai/mistral-nemo',
|
| 116 |
+
* label: 'OpenRouter Mistral Nemo (OpenRouter)',
|
| 117 |
+
* provider: 'OpenRouter',
|
| 118 |
+
* maxTokenAllowed: 8000,
|
| 119 |
+
* },
|
| 120 |
+
* {
|
| 121 |
+
* name: 'qwen/qwen-110b-chat',
|
| 122 |
+
* label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
|
| 123 |
+
* provider: 'OpenRouter',
|
| 124 |
+
* maxTokenAllowed: 8000,
|
| 125 |
+
* },
|
| 126 |
+
* { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
|
| 127 |
+
* ],
|
| 128 |
+
* getDynamicModels: getOpenRouterModels,
|
| 129 |
+
* getApiKeyLink: 'https://openrouter.ai/settings/keys',
|
| 130 |
+
* },
|
| 131 |
+
* {
|
| 132 |
+
* name: 'Google',
|
| 133 |
+
* staticModels: [
|
| 134 |
+
* { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
| 135 |
+
* { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
| 136 |
+
* { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
|
| 137 |
+
* { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
|
| 138 |
+
* { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
|
| 139 |
+
* { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
|
| 140 |
+
* { name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
|
| 141 |
+
* ],
|
| 142 |
+
* getApiKeyLink: 'https://aistudio.google.com/app/apikey',
|
| 143 |
+
* },
|
| 144 |
+
* {
|
| 145 |
+
* name: 'Groq',
|
| 146 |
+
* staticModels: [
|
| 147 |
+
* { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 148 |
+
* { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 149 |
+
* { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 150 |
+
* { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 151 |
+
* { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 152 |
+
* { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
| 153 |
+
* ],
|
| 154 |
+
* getApiKeyLink: 'https://console.groq.com/keys',
|
| 155 |
+
* },
|
| 156 |
+
* {
|
| 157 |
+
* name: 'HuggingFace',
|
| 158 |
+
* staticModels: [
|
| 159 |
+
* {
|
| 160 |
+
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 161 |
+
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
| 162 |
+
* provider: 'HuggingFace',
|
| 163 |
+
* maxTokenAllowed: 8000,
|
| 164 |
+
* },
|
| 165 |
+
* {
|
| 166 |
+
* name: '01-ai/Yi-1.5-34B-Chat',
|
| 167 |
+
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
| 168 |
+
* provider: 'HuggingFace',
|
| 169 |
+
* maxTokenAllowed: 8000,
|
| 170 |
+
* },
|
| 171 |
+
* {
|
| 172 |
+
* name: 'codellama/CodeLlama-34b-Instruct-hf',
|
| 173 |
+
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
| 174 |
+
* provider: 'HuggingFace',
|
| 175 |
+
* maxTokenAllowed: 8000,
|
| 176 |
+
* },
|
| 177 |
+
* {
|
| 178 |
+
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
| 179 |
+
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
| 180 |
+
* provider: 'HuggingFace',
|
| 181 |
+
* maxTokenAllowed: 8000,
|
| 182 |
+
* },
|
| 183 |
+
* {
|
| 184 |
+
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 185 |
+
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
| 186 |
+
* provider: 'HuggingFace',
|
| 187 |
+
* maxTokenAllowed: 8000,
|
| 188 |
+
* },
|
| 189 |
+
* {
|
| 190 |
+
* name: 'Qwen/Qwen2.5-72B-Instruct',
|
| 191 |
+
* label: 'Qwen2.5-72B-Instruct (HuggingFace)',
|
| 192 |
+
* provider: 'HuggingFace',
|
| 193 |
+
* maxTokenAllowed: 8000,
|
| 194 |
+
* },
|
| 195 |
+
* {
|
| 196 |
+
* name: 'meta-llama/Llama-3.1-70B-Instruct',
|
| 197 |
+
* label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
| 198 |
+
* provider: 'HuggingFace',
|
| 199 |
+
* maxTokenAllowed: 8000,
|
| 200 |
+
* },
|
| 201 |
+
* {
|
| 202 |
+
* name: 'meta-llama/Llama-3.1-405B',
|
| 203 |
+
* label: 'Llama-3.1-405B (HuggingFace)',
|
| 204 |
+
* provider: 'HuggingFace',
|
| 205 |
+
* maxTokenAllowed: 8000,
|
| 206 |
+
* },
|
| 207 |
+
* {
|
| 208 |
+
* name: '01-ai/Yi-1.5-34B-Chat',
|
| 209 |
+
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
| 210 |
+
* provider: 'HuggingFace',
|
| 211 |
+
* maxTokenAllowed: 8000,
|
| 212 |
+
* },
|
| 213 |
+
* {
|
| 214 |
+
* name: 'codellama/CodeLlama-34b-Instruct-hf',
|
| 215 |
+
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
| 216 |
+
* provider: 'HuggingFace',
|
| 217 |
+
* maxTokenAllowed: 8000,
|
| 218 |
+
* },
|
| 219 |
+
* {
|
| 220 |
+
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
| 221 |
+
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
| 222 |
+
* provider: 'HuggingFace',
|
| 223 |
+
* maxTokenAllowed: 8000,
|
| 224 |
+
* },
|
| 225 |
+
* ],
|
| 226 |
+
* getApiKeyLink: 'https://huggingface.co/settings/tokens',
|
| 227 |
+
* },
|
| 228 |
+
* {
|
| 229 |
+
* name: 'OpenAI',
|
| 230 |
+
* staticModels: [
|
| 231 |
+
* { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 232 |
+
* { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 233 |
+
* { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 234 |
+
* { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
| 235 |
+
* ],
|
| 236 |
+
* getApiKeyLink: 'https://platform.openai.com/api-keys',
|
| 237 |
+
* },
|
| 238 |
+
* {
|
| 239 |
+
* name: 'xAI',
|
| 240 |
+
* staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
|
| 241 |
+
* getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
|
| 242 |
+
* },
|
| 243 |
+
* {
|
| 244 |
+
* name: 'Deepseek',
|
| 245 |
+
* staticModels: [
|
| 246 |
+
* { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
| 247 |
+
* { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
| 248 |
+
* ],
|
| 249 |
+
* getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
|
| 250 |
+
* },
|
| 251 |
+
* {
|
| 252 |
+
* name: 'Mistral',
|
| 253 |
+
* staticModels: [
|
| 254 |
+
* { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 255 |
+
* { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 256 |
+
* { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 257 |
+
* { name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 258 |
+
* { name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 259 |
+
* { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 260 |
+
* { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 261 |
+
* { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 262 |
+
* { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
|
| 263 |
+
* ],
|
| 264 |
+
* getApiKeyLink: 'https://console.mistral.ai/api-keys/',
|
| 265 |
+
* },
|
| 266 |
+
* {
|
| 267 |
+
* name: 'LMStudio',
|
| 268 |
+
* staticModels: [],
|
| 269 |
+
* getDynamicModels: getLMStudioModels,
|
| 270 |
+
* getApiKeyLink: 'https://lmstudio.ai/',
|
| 271 |
+
* labelForGetApiKey: 'Get LMStudio',
|
| 272 |
+
* icon: 'i-ph:cloud-arrow-down',
|
| 273 |
+
* },
|
| 274 |
+
* {
|
| 275 |
+
* name: 'Together',
|
| 276 |
+
* getDynamicModels: getTogetherModels,
|
| 277 |
+
* staticModels: [
|
| 278 |
+
* {
|
| 279 |
+
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 280 |
+
* label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
| 281 |
+
* provider: 'Together',
|
| 282 |
+
* maxTokenAllowed: 8000,
|
| 283 |
+
* },
|
| 284 |
+
* {
|
| 285 |
+
* name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
| 286 |
+
* label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
| 287 |
+
* provider: 'Together',
|
| 288 |
+
* maxTokenAllowed: 8000,
|
| 289 |
+
* },
|
| 290 |
+
*
|
| 291 |
+
* {
|
| 292 |
+
* name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
| 293 |
+
* label: 'Mixtral 8x7B Instruct',
|
| 294 |
+
* provider: 'Together',
|
| 295 |
+
* maxTokenAllowed: 8192,
|
| 296 |
+
* },
|
| 297 |
+
* ],
|
| 298 |
+
* getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
|
| 299 |
+
* },
|
| 300 |
+
* {
|
| 301 |
+
* name: 'Perplexity',
|
| 302 |
+
* staticModels: [
|
| 303 |
+
* {
|
| 304 |
+
* name: 'llama-3.1-sonar-small-128k-online',
|
| 305 |
+
* label: 'Sonar Small Online',
|
| 306 |
+
* provider: 'Perplexity',
|
| 307 |
+
* maxTokenAllowed: 8192,
|
| 308 |
+
* },
|
| 309 |
+
* {
|
| 310 |
+
* name: 'llama-3.1-sonar-large-128k-online',
|
| 311 |
+
* label: 'Sonar Large Online',
|
| 312 |
+
* provider: 'Perplexity',
|
| 313 |
+
* maxTokenAllowed: 8192,
|
| 314 |
+
* },
|
| 315 |
+
* {
|
| 316 |
+
* name: 'llama-3.1-sonar-huge-128k-online',
|
| 317 |
+
* label: 'Sonar Huge Online',
|
| 318 |
+
* provider: 'Perplexity',
|
| 319 |
+
* maxTokenAllowed: 8192,
|
| 320 |
+
* },
|
| 321 |
+
* ],
|
| 322 |
+
* getApiKeyLink: 'https://www.perplexity.ai/settings/api',
|
| 323 |
+
* },
|
| 324 |
+
*];
|
| 325 |
+
*/
|
| 326 |
+
|
| 327 |
+
const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {};
|
| 328 |
+
PROVIDER_LIST.forEach((provider) => {
|
| 329 |
+
providerBaseUrlEnvKeys[provider.name] = {
|
| 330 |
+
baseUrlKey: provider.config.baseUrlKey,
|
| 331 |
+
apiTokenKey: provider.config.apiTokenKey,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
};
|
| 333 |
+
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
|
| 335 |
+
// Export the getModelList function using the manager
|
| 336 |
export async function getModelList(options: {
|
| 337 |
apiKeys?: Record<string, string>;
|
| 338 |
providerSettings?: Record<string, IProviderSetting>;
|
| 339 |
serverEnv?: Record<string, string>;
|
| 340 |
}) {
|
| 341 |
+
return await llmManager.updateModelList(options);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
}
|
| 343 |
|
| 344 |
async function initializeModelList(options: {
|
|
|
|
| 346 |
providerSettings?: Record<string, IProviderSetting>;
|
| 347 |
apiKeys?: Record<string, string>;
|
| 348 |
}): Promise<ModelInfo[]> {
|
| 349 |
+
const { providerSettings, apiKeys, env } = options;
|
| 350 |
+
const list = await getModelList({
|
| 351 |
+
apiKeys,
|
| 352 |
+
providerSettings,
|
| 353 |
+
serverEnv: env,
|
| 354 |
+
});
|
| 355 |
+
MODEL_LIST = list || MODEL_LIST;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 356 |
|
| 357 |
+
return list;
|
| 358 |
}
|
| 359 |
|
| 360 |
// initializeModelList({})
|
| 361 |
+
export { initializeModelList, providerBaseUrlEnvKeys, MODEL_LIST };
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/utils/types.ts
CHANGED
|
@@ -19,10 +19,3 @@ export interface OllamaModel {
|
|
| 19 |
export interface OllamaApiResponse {
|
| 20 |
models: OllamaModel[];
|
| 21 |
}
|
| 22 |
-
|
| 23 |
-
export interface ModelInfo {
|
| 24 |
-
name: string;
|
| 25 |
-
label: string;
|
| 26 |
-
provider: string;
|
| 27 |
-
maxTokenAllowed: number;
|
| 28 |
-
}
|
|
|
|
| 19 |
export interface OllamaApiResponse {
|
| 20 |
models: OllamaModel[];
|
| 21 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pnpm-lock.yaml
CHANGED
|
@@ -11957,4 +11957,4 @@ snapshots:
|
|
| 11957 |
|
| 11958 |
zod@3.23.8: {}
|
| 11959 |
|
| 11960 |
-
zwitch@2.0.4: {}
|
|
|
|
| 11957 |
|
| 11958 |
zod@3.23.8: {}
|
| 11959 |
|
| 11960 |
+
zwitch@2.0.4: {}
|
worker-configuration.d.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
interface Env {
|
|
|
|
| 2 |
ANTHROPIC_API_KEY: string;
|
| 3 |
OPENAI_API_KEY: string;
|
| 4 |
GROQ_API_KEY: string;
|
|
|
|
| 1 |
interface Env {
|
| 2 |
+
DEFAULT_NUM_CTX:Settings;
|
| 3 |
ANTHROPIC_API_KEY: string;
|
| 4 |
OPENAI_API_KEY: string;
|
| 5 |
GROQ_API_KEY: string;
|