Spaces:
Sleeping
Sleeping
| import { InferenceClient } from "@huggingface/inference"; | |
| const apiKey = process.env.HUGGING_FACE_API_TOKEN; | |
| if (!apiKey) { | |
| throw new Error("Missing HUGGING_FACE_API_TOKEN environment variable."); | |
| } | |
| const hf = new InferenceClient(apiKey); | |
| // Generate text embeddings | |
| export async function generateEmbedding(text: string): Promise<number[]> { | |
| try { | |
| const raw = await hf.featureExtraction({ | |
| model: "sentence-transformers/all-MiniLM-L6-v2", | |
| inputs: text, | |
| }); | |
| // Normalize HuggingFace outputs into flat number[] | |
| if (typeof raw === "number") { | |
| return [raw]; | |
| } | |
| if (Array.isArray(raw)) { | |
| // raw could be number[] OR number[][] | |
| if (typeof raw[0] === "number") { | |
| return raw as number[]; | |
| } | |
| // number[][] → flatten | |
| return (raw as number[][]).flat(); | |
| } | |
| throw new Error("Unexpected embedding format from HuggingFace."); | |
| } catch (error: unknown) { | |
| console.error("Embedding generation error:", error); | |
| throw error; | |
| } | |
| } | |
| // Text generation | |
| export async function runInference( | |
| modelId: string, | |
| inputs: string, | |
| parameters?: Record<string, unknown>, | |
| ) { | |
| try { | |
| const response = await hf.textGeneration({ | |
| model: modelId, | |
| inputs, | |
| parameters: { | |
| max_new_tokens: 250, | |
| temperature: 0.7, | |
| ...parameters, | |
| }, | |
| }); | |
| return response; | |
| } catch (error: unknown) { | |
| console.error("Hugging Face Inference Error:", error); | |
| throw error; | |
| } | |
| } | |