Spaces:
Sleeping
Sleeping
File size: 1,570 Bytes
4c41b3d 9733766 4c41b3d 98ecdda 4c41b3d 9733766 4c41b3d 9733766 8603e8c 9733766 7d80e05 9733766 47b7140 4c41b3d d22687e 9733766 d22687e 9733766 4c41b3d 4d470d2 9733766 4c41b3d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | import { ENV } from "./env";
export type Role = "system" | "user" | "assistant" | "tool" | "function";
export type Message = {
role: Role;
content: string;
};
export type InvokeParams = {
messages: Message[];
model?: string;
};
export type InvokeResult = {
choices: Array<{
message: {
role: Role;
content: string;
};
}>;
};
export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
const apiKey = ENV.forgeApiKey || process.env.HF_TOKEN || process.env.HF_ACCESS_TOKEN;
// تحديد النموذج: كوين هو الافتراضي، أو أي نموذج آخر يتم تمريره (مثل ديب سيك)
const model = params.model || "huihui-ai/Qwen2.5-72B-Instruct-abliterated";
// الرابط المباشر الذي يعمل في مثالك
const apiUrl = `https://api-inference.huggingface.co/models/${model}/v1/chat/completions`;
console.log(`[LLM] Invoking ${model} directly at ${apiUrl}`);
const response = await fetch(apiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: model,
messages: params.messages,
max_tokens: 2048,
temperature: 0.8,
}),
});
if (!response.ok) {
const errorText = await response.text();
console.error(`[LLM Error] Status: ${response.status}, Body: ${errorText}`);
throw new Error(`LLM invoke failed: ${response.status} - ${errorText}`);
}
return (await response.json()) as InvokeResult;
}
|