Manus AI commited on
Commit
8603e8c
·
1 Parent(s): 29b7599

Fix: Use direct Inference API URL for abliterated model as per successful example

Browse files
Files changed (1) hide show
  1. server/_core/llm.ts +2 -2
server/_core/llm.ts CHANGED
@@ -217,7 +217,7 @@ const resolveApiUrl = (model: string) => {
217
  }
218
 
219
  // Use the Hugging Face Router endpoint to leverage external providers
220
- return "https://router.huggingface.co/v1/chat/completions";
221
  };
222
 
223
  const assertApiKey = () => {
@@ -289,7 +289,7 @@ export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
289
 
290
  // Use a more widely supported model if the abliterated one fails,
291
  // or ensure the provider is correctly specified.
292
- const model = params.model || "Qwen/Qwen2.5-72B-Instruct";
293
 
294
  const payload: Record<string, unknown> = {
295
  model: model,
 
217
  }
218
 
219
  // Use the Hugging Face Router endpoint to leverage external providers
220
+ return "https://api-inference.huggingface.co/models/huihui-ai/Qwen2.5-72B-Instruct-abliterated/v1/chat/completions";
221
  };
222
 
223
  const assertApiKey = () => {
 
289
 
290
  // Use a more widely supported model if the abliterated one fails,
291
  // or ensure the provider is correctly specified.
292
+ const model = params.model || "huihui-ai/Qwen2.5-72B-Instruct-abliterated";
293
 
294
  const payload: Record<string, unknown> = {
295
  model: model,