Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- server/_core/llm.ts +2 -1
- server/routers/chat.ts +3 -0
server/_core/llm.ts
CHANGED
|
@@ -57,6 +57,7 @@ export type ToolChoice =
|
|
| 57 |
|
| 58 |
export type InvokeParams = {
|
| 59 |
messages: Message[];
|
|
|
|
| 60 |
tools?: Tool[];
|
| 61 |
toolChoice?: ToolChoice;
|
| 62 |
tool_choice?: ToolChoice;
|
|
@@ -286,7 +287,7 @@ export async function invokeLLM(params: InvokeParams): Promise<InvokeResult> {
|
|
| 286 |
} = params;
|
| 287 |
|
| 288 |
const payload: Record<string, unknown> = {
|
| 289 |
-
model: process.env.HF_TOKEN && !process.env.BUILT_IN_FORGE_API_KEY ? "
|
| 290 |
messages: messages.map(normalizeMessage),
|
| 291 |
};
|
| 292 |
|
|
|
|
| 57 |
|
| 58 |
export type InvokeParams = {
|
| 59 |
messages: Message[];
|
| 60 |
+
model?: string;
|
| 61 |
tools?: Tool[];
|
| 62 |
toolChoice?: ToolChoice;
|
| 63 |
tool_choice?: ToolChoice;
|
|
|
|
| 287 |
} = params;
|
| 288 |
|
| 289 |
const payload: Record<string, unknown> = {
|
| 290 |
+
model: params.model || (process.env.HF_TOKEN && !process.env.BUILT_IN_FORGE_API_KEY ? "huihui-ai/Qwen2.5-72B-Instruct-abliterated" : "gemini-2.5-flash"),
|
| 291 |
messages: messages.map(normalizeMessage),
|
| 292 |
};
|
| 293 |
|
server/routers/chat.ts
CHANGED
|
@@ -20,6 +20,7 @@ export const chatRouter = router({
|
|
| 20 |
|
| 21 |
if (input.mode === "qwen" || input.mode === "auto") {
|
| 22 |
response = await invokeLLM({
|
|
|
|
| 23 |
messages: [
|
| 24 |
{
|
| 25 |
role: "system",
|
|
@@ -35,6 +36,7 @@ Focus on: security, efficiency, and reliability.`,
|
|
| 35 |
});
|
| 36 |
} else if (input.mode === "deepseek") {
|
| 37 |
response = await invokeLLM({
|
|
|
|
| 38 |
messages: [
|
| 39 |
{
|
| 40 |
role: "system",
|
|
@@ -139,6 +141,7 @@ ${input.context ? `Context: ${input.context}` : ""}`,
|
|
| 139 |
|
| 140 |
// 2. Static Analysis & Multi-Criteria Scoring via LLM
|
| 141 |
const response = await invokeLLM({
|
|
|
|
| 142 |
messages: [
|
| 143 |
{
|
| 144 |
role: "system",
|
|
|
|
| 20 |
|
| 21 |
if (input.mode === "qwen" || input.mode === "auto") {
|
| 22 |
response = await invokeLLM({
|
| 23 |
+
model: "huihui-ai/Qwen2.5-72B-Instruct-abliterated",
|
| 24 |
messages: [
|
| 25 |
{
|
| 26 |
role: "system",
|
|
|
|
| 36 |
});
|
| 37 |
} else if (input.mode === "deepseek") {
|
| 38 |
response = await invokeLLM({
|
| 39 |
+
model: "huihui-ai/DeepSeek-R1-Distill-Qwen-32B-abliterated",
|
| 40 |
messages: [
|
| 41 |
{
|
| 42 |
role: "system",
|
|
|
|
| 141 |
|
| 142 |
// 2. Static Analysis & Multi-Criteria Scoring via LLM
|
| 143 |
const response = await invokeLLM({
|
| 144 |
+
model: "huihui-ai/DeepSeek-R1-Distill-Qwen-32B-abliterated",
|
| 145 |
messages: [
|
| 146 |
{
|
| 147 |
role: "system",
|