Remove deprecated oldModels support from codebase
Browse filesEliminated all references to oldModels, including its import, configuration, and usage in model selection logic and API routes. This simplifies model management and removes unnecessary legacy code.
- chart/env/prod.yaml +1 -89
- src/lib/server/api/routes/groups/models.ts +1 -6
- src/lib/server/config.ts +1 -1
- src/lib/server/models.ts +0 -15
- src/lib/utils/models.ts +1 -7
- src/routes/+layout.ts +2 -3
chart/env/prod.yaml
CHANGED
|
@@ -54,103 +54,15 @@ envVars:
|
|
| 54 |
LOG_LEVEL: "debug"
|
| 55 |
NODE_ENV: "prod"
|
| 56 |
NODE_LOG_STRUCTURED_DATA: true
|
| 57 |
-
OLD_MODELS: >
|
| 58 |
-
[
|
| 59 |
-
{ "name": "bigcode/starcoder" },
|
| 60 |
-
{ "name": "OpenAssistant/oasst-sft-6-llama-30b-xor" },
|
| 61 |
-
{ "name": "HuggingFaceH4/zephyr-7b-alpha" },
|
| 62 |
-
{ "name": "openchat/openchat_3.5" },
|
| 63 |
-
{ "name": "openchat/openchat-3.5-1210" },
|
| 64 |
-
{ "name": "tiiuae/falcon-180B-chat" },
|
| 65 |
-
{ "name": "codellama/CodeLlama-34b-Instruct-hf" },
|
| 66 |
-
{ "name": "google/gemma-7b-it" },
|
| 67 |
-
{ "name": "meta-llama/Llama-2-70b-chat-hf" },
|
| 68 |
-
{ "name": "codellama/CodeLlama-70b-Instruct-hf" },
|
| 69 |
-
{ "name": "openchat/openchat-3.5-0106" },
|
| 70 |
-
{ "name": "meta-llama/Meta-Llama-3-70B-Instruct" },
|
| 71 |
-
{ "name": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8" },
|
| 72 |
-
{
|
| 73 |
-
"name": "CohereForAI/c4ai-command-r-plus-08-2024",
|
| 74 |
-
"transferTo": "CohereLabs/c4ai-command-r-plus-08-2024"
|
| 75 |
-
},
|
| 76 |
-
{
|
| 77 |
-
"name": "CohereForAI/c4ai-command-r-plus",
|
| 78 |
-
"transferTo": "CohereLabs/c4ai-command-r-plus-08-2024"
|
| 79 |
-
},
|
| 80 |
-
{
|
| 81 |
-
"name": "01-ai/Yi-1.5-34B-Chat",
|
| 82 |
-
"transferTo": "CohereLabs/c4ai-command-r-plus-08-2024"
|
| 83 |
-
},
|
| 84 |
-
{
|
| 85 |
-
"name": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 86 |
-
"transferTo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
| 87 |
-
},
|
| 88 |
-
{
|
| 89 |
-
"name": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 90 |
-
"transferTo": "NousResearch/Hermes-3-Llama-3.1-8B"
|
| 91 |
-
},
|
| 92 |
-
{
|
| 93 |
-
"name": "mistralai/Mistral-7B-Instruct-v0.3",
|
| 94 |
-
"transferTo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
| 95 |
-
},
|
| 96 |
-
{
|
| 97 |
-
"name": "microsoft/Phi-3-mini-4k-instruct",
|
| 98 |
-
"transferTo": "microsoft/Phi-4"
|
| 99 |
-
},
|
| 100 |
-
{
|
| 101 |
-
"name": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
| 102 |
-
"transferTo": "meta-llama/Llama-3.3-70B-Instruct"
|
| 103 |
-
},
|
| 104 |
-
{
|
| 105 |
-
"name": "Qwen/QwQ-32B-Preview",
|
| 106 |
-
"transferTo": "Qwen/QwQ-32B"
|
| 107 |
-
},
|
| 108 |
-
{
|
| 109 |
-
"name": "mistralai/Mistral-Nemo-Instruct-2407",
|
| 110 |
-
"transferTo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
| 111 |
-
},
|
| 112 |
-
{
|
| 113 |
-
"name": "microsoft/Phi-3.5-mini-instruct",
|
| 114 |
-
"transferTo": "microsoft/Phi-4"
|
| 115 |
-
},
|
| 116 |
-
{
|
| 117 |
-
"name": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 118 |
-
"transferTo": "Qwen/QwQ-32B"
|
| 119 |
-
},
|
| 120 |
-
{
|
| 121 |
-
"name": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 122 |
-
"transferTo" : "Qwen/Qwen2.5-VL-32B-Instruct"
|
| 123 |
-
}
|
| 124 |
-
]
|
| 125 |
PUBLIC_ORIGIN: "https://huggingface.co"
|
| 126 |
PUBLIC_SHARE_PREFIX: "https://hf.co/chat"
|
| 127 |
-
PUBLIC_ANNOUNCEMENT_BANNERS: >
|
| 128 |
-
[
|
| 129 |
-
{
|
| 130 |
-
"title": "Qwen 3 235B is available!",
|
| 131 |
-
"linkTitle": "Try it out!",
|
| 132 |
-
"linkHref": "https://huggingface.co/chat/models/Qwen/Qwen3-235B-A22B"
|
| 133 |
-
}
|
| 134 |
-
]
|
| 135 |
PUBLIC_APP_NAME: "HuggingChat"
|
| 136 |
PUBLIC_APP_ASSETS: "huggingchat"
|
| 137 |
-
PUBLIC_APP_COLOR: "yellow"
|
| 138 |
PUBLIC_APP_DESCRIPTION: "Making the community's best AI chat models available to everyone."
|
| 139 |
PUBLIC_APP_GUEST_MESSAGE: "Sign in with a free Hugging Face account to continue using HuggingChat."
|
| 140 |
PUBLIC_APP_DATA_SHARING: 0
|
| 141 |
PUBLIC_PLAUSIBLE_SCRIPT_URL: "/js/script.js"
|
| 142 |
-
|
| 143 |
-
{
|
| 144 |
-
"name": "NousResearch/Hermes-3-Llama-3.1-8B",
|
| 145 |
-
"unlisted": true,
|
| 146 |
-
"endpoints": [{"type" : "inference-client"}],
|
| 147 |
-
"parameters": {
|
| 148 |
-
"temperature": 0.1,
|
| 149 |
-
"max_new_tokens": 256
|
| 150 |
-
}
|
| 151 |
-
}
|
| 152 |
-
|
| 153 |
-
|
| 154 |
HF_ORG_ADMIN: '644171cfbd0c97265298aa99'
|
| 155 |
HF_ORG_EARLY_ACCESS: '5e67bd5b1009063689407478'
|
| 156 |
infisical:
|
|
|
|
| 54 |
LOG_LEVEL: "debug"
|
| 55 |
NODE_ENV: "prod"
|
| 56 |
NODE_LOG_STRUCTURED_DATA: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
PUBLIC_ORIGIN: "https://huggingface.co"
|
| 58 |
PUBLIC_SHARE_PREFIX: "https://hf.co/chat"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
PUBLIC_APP_NAME: "HuggingChat"
|
| 60 |
PUBLIC_APP_ASSETS: "huggingchat"
|
|
|
|
| 61 |
PUBLIC_APP_DESCRIPTION: "Making the community's best AI chat models available to everyone."
|
| 62 |
PUBLIC_APP_GUEST_MESSAGE: "Sign in with a free Hugging Face account to continue using HuggingChat."
|
| 63 |
PUBLIC_APP_DATA_SHARING: 0
|
| 64 |
PUBLIC_PLAUSIBLE_SCRIPT_URL: "/js/script.js"
|
| 65 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
HF_ORG_ADMIN: '644171cfbd0c97265298aa99'
|
| 67 |
HF_ORG_EARLY_ACCESS: '5e67bd5b1009063689407478'
|
| 68 |
infisical:
|
src/lib/server/api/routes/groups/models.ts
CHANGED
|
@@ -70,12 +70,7 @@ export const modelGroup = new Elysia().group("/models", (app) =>
|
|
| 70 |
}
|
| 71 |
})
|
| 72 |
.get("/old", async () => {
|
| 73 |
-
|
| 74 |
-
const { oldModels } = await import("$lib/server/models");
|
| 75 |
-
return oldModels satisfies GETOldModelsResponse;
|
| 76 |
-
} catch (e) {
|
| 77 |
-
return [] as GETOldModelsResponse;
|
| 78 |
-
}
|
| 79 |
})
|
| 80 |
.group("/:namespace/:model?", (app) =>
|
| 81 |
app
|
|
|
|
| 70 |
}
|
| 71 |
})
|
| 72 |
.get("/old", async () => {
|
| 73 |
+
return [] as GETOldModelsResponse;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
})
|
| 75 |
.group("/:namespace/:model?", (app) =>
|
| 76 |
app
|
src/lib/server/config.ts
CHANGED
|
@@ -151,7 +151,7 @@ export const ready = (async () => {
|
|
| 151 |
}
|
| 152 |
})();
|
| 153 |
|
| 154 |
-
type ExtraConfigKeys = "HF_TOKEN" | "
|
| 155 |
|
| 156 |
type ConfigProxy = ConfigManager & { [K in ConfigKey | ExtraConfigKeys]: string };
|
| 157 |
|
|
|
|
| 151 |
}
|
| 152 |
})();
|
| 153 |
|
| 154 |
+
type ExtraConfigKeys = "HF_TOKEN" | "ENABLE_ASSISTANTS";
|
| 155 |
|
| 156 |
type ConfigProxy = ConfigManager & { [K in ConfigKey | ExtraConfigKeys]: string };
|
| 157 |
|
src/lib/server/models.ts
CHANGED
|
@@ -340,21 +340,6 @@ export const validModelIdSchema = z.enum(models.map((m) => m.id) as [string, ...
|
|
| 340 |
|
| 341 |
export const defaultModel = models[0];
|
| 342 |
|
| 343 |
-
// Models that have been deprecated
|
| 344 |
-
export const oldModels = config.OLD_MODELS
|
| 345 |
-
? z
|
| 346 |
-
.array(
|
| 347 |
-
z.object({
|
| 348 |
-
id: z.string().optional(),
|
| 349 |
-
name: z.string().min(1),
|
| 350 |
-
displayName: z.string().min(1).optional(),
|
| 351 |
-
transferTo: validModelIdSchema.optional(),
|
| 352 |
-
})
|
| 353 |
-
)
|
| 354 |
-
.parse(JSON5.parse(sanitizeJSONEnv(config.OLD_MODELS, "[]")))
|
| 355 |
-
.map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name }))
|
| 356 |
-
: [];
|
| 357 |
-
|
| 358 |
export const validateModel = (_models: BackendModel[]) => {
|
| 359 |
// Zod enum function requires 2 parameters
|
| 360 |
return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]);
|
|
|
|
| 340 |
|
| 341 |
export const defaultModel = models[0];
|
| 342 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 343 |
export const validateModel = (_models: BackendModel[]) => {
|
| 344 |
// Zod enum function requires 2 parameters
|
| 345 |
return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]);
|
src/lib/utils/models.ts
CHANGED
|
@@ -2,18 +2,12 @@ import type { Model } from "$lib/types/Model";
|
|
| 2 |
|
| 3 |
export const findCurrentModel = (
|
| 4 |
models: Model[],
|
| 5 |
-
|
| 6 |
id?: string
|
| 7 |
): Model => {
|
| 8 |
if (id) {
|
| 9 |
const direct = models.find((m) => m.id === id);
|
| 10 |
if (direct) return direct;
|
| 11 |
-
|
| 12 |
-
const legacy = oldModels.find((m) => m.id === id);
|
| 13 |
-
if (legacy?.transferTo) {
|
| 14 |
-
const mapped = models.find((m) => m.id === legacy.transferTo);
|
| 15 |
-
if (mapped) return mapped;
|
| 16 |
-
}
|
| 17 |
}
|
| 18 |
|
| 19 |
return models[0];
|
|
|
|
| 2 |
|
| 3 |
export const findCurrentModel = (
|
| 4 |
models: Model[],
|
| 5 |
+
_oldModels: { id: string; transferTo?: string }[] = [],
|
| 6 |
id?: string
|
| 7 |
): Model => {
|
| 8 |
if (id) {
|
| 9 |
const direct = models.find((m) => m.id === id);
|
| 10 |
if (direct) return direct;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
}
|
| 12 |
|
| 13 |
return models[0];
|
src/routes/+layout.ts
CHANGED
|
@@ -8,11 +8,10 @@ export const load = async ({ depends, fetch, url }) => {
|
|
| 8 |
|
| 9 |
const client = useAPIClient({ fetch, origin: url.origin });
|
| 10 |
|
| 11 |
-
const [settings, models,
|
| 12 |
await Promise.all([
|
| 13 |
client.user.settings.get().then(handleResponse),
|
| 14 |
client.models.get().then(handleResponse),
|
| 15 |
-
client.models.old.get().then(handleResponse),
|
| 16 |
client.user.get().then(handleResponse),
|
| 17 |
client["public-config"].get().then(handleResponse),
|
| 18 |
client["feature-flags"].get().then(handleResponse),
|
|
@@ -39,7 +38,7 @@ export const load = async ({ depends, fetch, url }) => {
|
|
| 39 |
nConversations,
|
| 40 |
conversations,
|
| 41 |
models,
|
| 42 |
-
oldModels,
|
| 43 |
user,
|
| 44 |
settings: {
|
| 45 |
...settings,
|
|
|
|
| 8 |
|
| 9 |
const client = useAPIClient({ fetch, origin: url.origin });
|
| 10 |
|
| 11 |
+
const [settings, models, user, publicConfig, featureFlags, conversationsData] =
|
| 12 |
await Promise.all([
|
| 13 |
client.user.settings.get().then(handleResponse),
|
| 14 |
client.models.get().then(handleResponse),
|
|
|
|
| 15 |
client.user.get().then(handleResponse),
|
| 16 |
client["public-config"].get().then(handleResponse),
|
| 17 |
client["feature-flags"].get().then(handleResponse),
|
|
|
|
| 38 |
nConversations,
|
| 39 |
conversations,
|
| 40 |
models,
|
| 41 |
+
oldModels: [],
|
| 42 |
user,
|
| 43 |
settings: {
|
| 44 |
...settings,
|