| import { getProviderConfig } from "./providers.js"; | |
| export async function callLLM({ provider, model, prompt }) { | |
| const cfg = getProviderConfig(provider, model); | |
| let body; | |
| switch (cfg.name) { | |
| case "gemini": | |
| body = { | |
| contents: [{ parts: [{ text: prompt }] }] | |
| }; | |
| break; | |
| case "dashscope": | |
| body = { | |
| model: cfg.model, | |
| input: { prompt } | |
| }; | |
| break; | |
| default: | |
| body = { | |
| model: cfg.model, | |
| messages: [ | |
| { role: "system", content: "You are a market research analyst." }, | |
| { role: "user", content: prompt } | |
| ], | |
| temperature: 0.4 | |
| }; | |
| } | |
| const res = await fetch(cfg.baseUrl, { | |
| method: "POST", | |
| headers: cfg.headers, | |
| body: JSON.stringify(body) | |
| }); | |
| const text = await res.text(); | |
| if (!res.ok) { | |
| throw new Error(`LLM error ${res.status}: ${text}`); | |
| } | |
| const json = JSON.parse(text); | |
| // Normalize output | |
| if (cfg.name === "gemini") { | |
| return json.candidates?.[0]?.content?.parts?.[0]?.text || ""; | |
| } | |
| if (cfg.name === "dashscope") { | |
| return json.output?.text || ""; | |
| } | |
| return json.choices?.[0]?.message?.content || ""; | |
| } | |