Spaces:
Build error
Build error
| /** | |
| * Section 1: Backend Core - LLM Engine with NVIDIA API Integration | |
| * | |
| * This module handles: | |
| * - NVIDIA API client initialization | |
| * - Smart LLM fallback chain (Llama-3 70B primary) | |
| * - DeepSeek-style reasoning generation | |
| * - Error handling and retry logic | |
| */ | |
| import { invokeLLM } from "./_core/llm"; | |
| /** | |
| * NVIDIA Model Configuration | |
| * Defines the fallback chain for LLM models | |
| */ | |
| export const LLM_MODELS = { | |
| primary: "meta-llama/llama-3-70b-instruct", | |
| fallbacks: [ | |
| "meta-llama/llama-2-70b-chat-hf", | |
| "mistralai/mistral-large", | |
| "meta-llama/llama-3-8b-instruct", | |
| ], | |
| }; | |
| /** | |
| * Image Generation Models | |
| */ | |
| export const IMAGE_MODELS = { | |
| primary: "nvidia/sdxl", | |
| fallback: "black-forest-labs/flux-1-dev", | |
| }; | |
| /** | |
| * Video Generation Model | |
| */ | |
| export const VIDEO_MODEL = "nvidia/video-generation"; | |
| /** | |
| * Interface for LLM response with reasoning | |
| */ | |
| export interface LLMResponseWithReasoning { | |
| reasoning: string; | |
| response: string; | |
| model: string; | |
| tokensUsed: number; | |
| } | |
| /** | |
| * Generate a response with optional reasoning (DeepSeek-style) | |
| * | |
| * @param userPrompt - The user's input message | |
| * @param searchResults - Optional search results to include in context | |
| * @param enableReasoning - Whether to generate internal reasoning first | |
| * @param conversationHistory - Previous messages for context | |
| * @returns Response with reasoning and final answer | |
| */ | |
| export async function generateResponseWithReasoning( | |
| userPrompt: string, | |
| searchResults?: string, | |
| enableReasoning: boolean = false, | |
| conversationHistory: Array<{ role: string; content: string }> = [] | |
| ): Promise<LLMResponseWithReasoning> { | |
| try { | |
| let reasoning = ""; | |
| // Step 1: Generate reasoning if enabled (DeepSeek-style) | |
| if (enableReasoning) { | |
| reasoning = await generateReasoning(userPrompt, searchResults); | |
| } | |
| // Step 2: Build the system prompt with context | |
| const systemPrompt = buildSystemPrompt(searchResults, reasoning); | |
| // Step 3: Prepare messages for LLM | |
| const messages = [ | |
| { role: "system", content: systemPrompt }, | |
| ...conversationHistory.map((msg) => ({ | |
| role: msg.role as "user" | "assistant", | |
| content: msg.content, | |
| })), | |
| { role: "user", content: userPrompt }, | |
| ]; | |
| // Step 4: Call LLM with fallback chain | |
| const response = await callLLMWithFallback(messages); | |
| return { | |
| reasoning, | |
| response: response.content, | |
| model: response.model, | |
| tokensUsed: response.tokensUsed || 0, | |
| }; | |
| } catch (error) { | |
| console.error("Error generating response:", error); | |
| throw new Error("Failed to generate response from LLM"); | |
| } | |
| } | |
| /** | |
| * Generate internal reasoning (DeepSeek-style thought process) | |
| * | |
| * @param userPrompt - The user's input | |
| * @param searchResults - Optional search context | |
| * @returns Reasoning text | |
| */ | |
| async function generateReasoning( | |
| userPrompt: string, | |
| searchResults?: string | |
| ): Promise<string> { | |
| const reasoningPrompt = `You are an expert AI assistant. Analyze the following user request and provide your internal reasoning process (your thoughts on how to approach this). | |
| User Request: "${userPrompt}" | |
| ${searchResults ? `\nSearch Context:\n${searchResults}` : ""} | |
| Provide a concise internal reasoning (2-3 sentences) on how you will approach this request. Be direct and analytical.`; | |
| try { | |
| const response = await invokeLLM({ | |
| messages: [ | |
| { | |
| role: "system", | |
| content: | |
| "You are a reasoning engine. Provide concise internal thoughts.", | |
| }, | |
| { role: "user", content: reasoningPrompt }, | |
| ], | |
| }); | |
| const content = response.choices?.[0]?.message?.content || ""; | |
| return typeof content === "string" ? content : JSON.stringify(content); | |
| } catch (error) { | |
| console.warn("Failed to generate reasoning, continuing without it:", error); | |
| return ""; | |
| } | |
| } | |
| /** | |
| * Build system prompt with optional search context and reasoning | |
| */ | |
| function buildSystemPrompt( | |
| searchResults?: string, | |
| reasoning?: string | |
| ): string { | |
| let prompt = | |
| "You are Domify Academy Bot, an expert AI assistant. Provide clear, concise, and accurate responses. "; | |
| if (searchResults) { | |
| prompt += | |
| "\n\nYou have access to recent search results. Use them to provide up-to-date information. "; | |
| prompt += "Cite sources when relevant."; | |
| } | |
| if (reasoning) { | |
| prompt += | |
| "\n\nYou have already analyzed this request. Use your reasoning to guide your response."; | |
| } | |
| prompt += | |
| "\n\nWhen providing code, use proper markdown formatting with language specification (e.g., ```python). "; | |
| prompt += | |
| "Highlight important concepts in your response using **bold** text."; | |
| return prompt; | |
| } | |
| /** | |
| * Call LLM with intelligent fallback chain | |
| * Tries primary model first, then falls back to alternates if busy | |
| */ | |
| async function callLLMWithFallback( | |
| messages: Array<{ role: string; content: string }> | |
| ): Promise<{ content: string; model: string; tokensUsed?: number }> { | |
| const models = [LLM_MODELS.primary, ...LLM_MODELS.fallbacks]; | |
| for (let i = 0; i < models.length; i++) { | |
| try { | |
| const model = models[i]!; | |
| console.log(`Attempting LLM call with model: ${model}`); | |
| const response = await invokeLLM({ | |
| messages: messages as any, | |
| }); | |
| const content = response.choices?.[0]?.message?.content || ""; | |
| const contentStr = typeof content === "string" ? content : JSON.stringify(content); | |
| return { | |
| content: contentStr, | |
| model: model as string, | |
| tokensUsed: (response.usage?.total_tokens as number) ?? 0, | |
| }; | |
| } catch (error) { | |
| console.warn(`Model ${models[i]} failed:`, error); | |
| if (i === models.length - 1) { | |
| throw new Error("All LLM models exhausted"); | |
| } | |
| } | |
| } | |
| throw new Error("Failed to call any LLM model"); | |
| } | |
| /** | |
| * Generate an image using NVIDIA SDXL or Flux | |
| * | |
| * @param prompt - Image generation prompt | |
| * @returns Image URL | |
| */ | |
| export async function generateImage(prompt: string): Promise<string> { | |
| try { | |
| console.log("Generating image with prompt:", prompt); | |
| // Use the built-in image generation from Manus | |
| const { generateImage: builtInGenerateImage } = await import( | |
| "./_core/imageGeneration" | |
| ); | |
| const result = await builtInGenerateImage({ prompt }); | |
| return result.url || ""; | |
| } catch (error) { | |
| console.error("Image generation failed:", error); | |
| throw new Error("Failed to generate image"); | |
| } | |
| return ""; | |
| } | |
| /** | |
| * Generate a video from an image (optional feature) | |
| * | |
| * @param imageUrl - URL of the image to convert | |
| * @param prompt - Optional prompt for video generation | |
| * @returns Video URL | |
| */ | |
| export async function generateVideo( | |
| imageUrl: string, | |
| prompt?: string | |
| ): Promise<string> { | |
| try { | |
| console.log("Generating video from image:", imageUrl); | |
| // This would call NVIDIA's video generation API | |
| // For now, returning a placeholder | |
| // In production, integrate with NVIDIA video generation endpoint | |
| throw new Error( | |
| "Video generation not yet implemented. Contact support for this feature." | |
| ); | |
| } catch (error) { | |
| console.error("Video generation failed:", error); | |
| throw error; | |
| } | |
| } | |