Spaces:
Paused
Paused
| // OpenRouter Model Interfaces | |
| export interface OpenRouterModelArchitecture { | |
| input_modalities: string[]; | |
| output_modalities: string[]; | |
| tokenizer: string; | |
| instruct_type: string | null; | |
| } | |
| export interface OpenRouterModelPricing { | |
| prompt: string; | |
| completion: string; | |
| request: string; | |
| image: string; | |
| web_search: string; | |
| internal_reasoning: string; | |
| input_cache_read: string; | |
| input_cache_write: string; | |
| } | |
| export interface OpenRouterModelTopProvider { | |
| context_length: number; | |
| max_completion_tokens: number; | |
| is_moderated: boolean; | |
| } | |
| export interface OpenRouterModel { | |
| id: string; | |
| canonical_slug: string; | |
| name: string; | |
| created: number; | |
| description: string; | |
| context_length: number; | |
| architecture: OpenRouterModelArchitecture; | |
| pricing: OpenRouterModelPricing; | |
| top_provider: OpenRouterModelTopProvider; | |
| per_request_limits: Record<string, unknown>; | |
| supported_parameters: string[]; | |
| } | |
| export interface OpenRouterModelsResponse { | |
| data: OpenRouterModel[]; | |
| } | |
| // Existing interfaces | |
| export interface OpenRouterMessage { | |
| role: "system" | "user" | "assistant"; | |
| content: string; | |
| } | |
| export interface OpenRouterRequest { | |
| model: string; | |
| messages: OpenRouterMessage[]; | |
| max_tokens?: number; | |
| temperature?: number; | |
| stream?: boolean; | |
| } | |
| // Cache for model information to avoid repeated API calls | |
| const modelCache = new Map<string, OpenRouterModel>(); | |
| // Fetch available models from OpenRouter | |
| export async function fetchOpenRouterModels( | |
| apiKey?: string | |
| ): Promise<OpenRouterModel[]> { | |
| console.log('๐ fetchOpenRouterModels called, API key provided:', !!apiKey); | |
| const headers: Record<string, string> = { | |
| "Content-Type": "application/json", | |
| "HTTP-Referer": process.env.NEXT_PUBLIC_SITE_URL || "http://localhost:3000", | |
| "X-Title": "DeepSite - AI Website Builder", | |
| }; | |
| // Add authorization header if API key is provided (for potentially better results) | |
| if (apiKey) { | |
| headers["Authorization"] = `Bearer ${apiKey}`; | |
| } | |
| console.log('๐ก Making request to OpenRouter API...'); | |
| console.log('๐ Headers:', Object.keys(headers)); | |
| const response = await fetch("https://openrouter.ai/api/v1/models", { | |
| method: "GET", | |
| headers, | |
| }); | |
| console.log('๐ฅ OpenRouter API response status:', response.status); | |
| if (!response.ok) { | |
| const errorText = await response.text(); | |
| console.error('โ OpenRouter API error:', response.status, errorText); | |
| throw new Error(`Failed to fetch OpenRouter models: ${response.statusText}`); | |
| } | |
| const data: OpenRouterModelsResponse = await response.json(); | |
| console.log('โ OpenRouter API returned', data.data.length, 'models'); | |
| return data.data; | |
| } | |
| export async function callOpenRouter( | |
| request: OpenRouterRequest, | |
| apiKey: string, | |
| signal?: AbortSignal | |
| ): Promise<Response> { | |
| console.log('๐ CallOpenRouter called with:', { | |
| model: request.model, | |
| apiKeyProvided: !!apiKey, | |
| apiKeyPrefix: apiKey ? apiKey.substring(0, 10) + '...' : 'none' | |
| }); | |
| const response = await fetch("https://openrouter.ai/api/v1/chat/completions", { | |
| method: "POST", | |
| headers: { | |
| "Authorization": `Bearer ${apiKey}`, | |
| "Content-Type": "application/json", | |
| "HTTP-Referer": process.env.NEXT_PUBLIC_SITE_URL || "http://localhost:3000", | |
| "X-Title": "DeepSite - AI Website Builder", | |
| }, | |
| body: JSON.stringify({ | |
| ...request, | |
| stream: true, // Always use streaming for consistency | |
| }), | |
| signal, | |
| }); | |
| console.log('๐ฅ OpenRouter chat response status:', response.status); | |
| if (!response.ok) { | |
| const errorData = await response.json().catch(() => ({})); | |
| console.error('โ OpenRouter error details:', { | |
| status: response.status, | |
| statusText: response.statusText, | |
| errorData | |
| }); | |
| // Handle specific OpenRouter error cases | |
| if (response.status === 401) { | |
| throw new Error("Invalid OpenRouter API key. Please check your API key and try again."); | |
| } else if (response.status === 429) { | |
| throw new Error("OpenRouter rate limit exceeded. Please try again later."); | |
| } else if (response.status === 402) { | |
| throw new Error("Insufficient credits in your OpenRouter account. Please add credits and try again."); | |
| } else if (response.status === 400) { | |
| throw new Error(errorData.error?.message || "Invalid request to OpenRouter API. Please check your model selection."); | |
| } | |
| throw new Error( | |
| errorData.error?.message || | |
| `OpenRouter API error: ${response.status} ${response.statusText}` | |
| ); | |
| } | |
| console.log('โ OpenRouter chat request successful'); | |
| return response; | |
| } | |
| export async function* parseOpenRouterStream(response: Response) { | |
| const reader = response.body?.getReader(); | |
| if (!reader) { | |
| throw new Error("No readable stream in OpenRouter response"); | |
| } | |
| const decoder = new TextDecoder(); | |
| let buffer = ""; | |
| let chunkCount = 0; | |
| let contentCount = 0; | |
| console.log('๐ Starting OpenRouter stream parsing...'); | |
| try { | |
| while (true) { | |
| const { done, value } = await reader.read(); | |
| if (done) { | |
| console.log('โ OpenRouter stream parsing completed:', { | |
| totalChunks: chunkCount, | |
| totalContentChunks: contentCount, | |
| bufferRemaining: buffer.length | |
| }); | |
| break; | |
| } | |
| chunkCount++; | |
| // Append new chunk to buffer | |
| buffer += decoder.decode(value, { stream: true }); | |
| // Process complete lines from buffer | |
| while (true) { | |
| const lineEnd = buffer.indexOf('\n'); | |
| if (lineEnd === -1) break; | |
| const line = buffer.slice(0, lineEnd).trim(); | |
| buffer = buffer.slice(lineEnd + 1); | |
| // Skip empty lines | |
| if (!line) continue; | |
| // Handle SSE comments (ignore them as per OpenRouter docs) | |
| if (line.startsWith(':')) { | |
| console.log('๐ฌ SSE comment:', line); | |
| continue; | |
| } | |
| if (line.startsWith('data: ')) { | |
| const data = line.slice(6); | |
| if (data === '[DONE]') { | |
| console.log('๐ Received [DONE] signal from OpenRouter'); | |
| return; | |
| } | |
| try { | |
| const parsed = JSON.parse(data); | |
| const content = parsed.choices?.[0]?.delta?.content; | |
| if (content) { | |
| contentCount++; | |
| console.log(`๐ Content chunk ${contentCount}:`, { | |
| length: content.length, | |
| preview: content.substring(0, 50) + (content.length > 50 ? '...' : '') | |
| }); | |
| yield content; | |
| } | |
| } catch (parseError) { | |
| console.warn('โ ๏ธ Failed to parse OpenRouter SSE data:', { | |
| data: data.substring(0, 100), | |
| error: parseError | |
| }); | |
| // Continue processing other lines instead of breaking | |
| } | |
| } | |
| } | |
| } | |
| } catch (streamError) { | |
| console.error('โ Error in OpenRouter stream parsing:', streamError); | |
| throw streamError; | |
| } finally { | |
| reader.releaseLock(); | |
| } | |
| } | |
| // Get model information from OpenRouter API with caching | |
| export async function getOpenRouterModelInfo( | |
| modelId: string, | |
| apiKey?: string | |
| ): Promise<OpenRouterModel | null> { | |
| // Check cache first | |
| if (modelCache.has(modelId)) { | |
| console.log('๐ Using cached model info for:', modelId); | |
| return modelCache.get(modelId) || null; | |
| } | |
| try { | |
| console.log('๐ Fetching model info for:', modelId); | |
| const models = await fetchOpenRouterModels(apiKey); | |
| // Cache all models for future use | |
| models.forEach(model => { | |
| modelCache.set(model.id, model); | |
| }); | |
| const modelInfo = models.find(model => model.id === modelId); | |
| if (!modelInfo) { | |
| console.warn('โ ๏ธ Model not found in OpenRouter API:', modelId); | |
| return null; | |
| } | |
| console.log('โ Found model info:', { | |
| id: modelInfo.id, | |
| contextLength: modelInfo.context_length, | |
| maxCompletionTokens: modelInfo.top_provider.max_completion_tokens | |
| }); | |
| return modelInfo; | |
| } catch (error) { | |
| console.error('โ Failed to fetch model info:', error); | |
| return null; | |
| } | |
| } | |
| // Calculate safe max_tokens based on model context length and estimated input tokens | |
| export function calculateSafeMaxTokens( | |
| contextLength: number, | |
| estimatedInputTokens: number, | |
| maxCompletionTokens?: number | |
| ): number { | |
| // Leave some buffer for safety (10% of context length or minimum 1000 tokens) | |
| const safetyBuffer = Math.max(Math.floor(contextLength * 0.1), 1000); | |
| // Calculate available tokens for output | |
| const availableTokens = contextLength - estimatedInputTokens - safetyBuffer; | |
| // Respect model's max completion tokens if available | |
| const modelMaxTokens = maxCompletionTokens || availableTokens; | |
| // Use the smaller of available tokens or model's max completion tokens | |
| const safeMaxTokens = Math.min(availableTokens, modelMaxTokens); | |
| // Ensure we don't go below a reasonable minimum | |
| const finalMaxTokens = Math.max(safeMaxTokens, 1000); | |
| console.log('๐งฎ Token calculation:', { | |
| contextLength, | |
| estimatedInputTokens, | |
| safetyBuffer, | |
| availableTokens, | |
| modelMaxTokens, | |
| finalMaxTokens | |
| }); | |
| return finalMaxTokens; | |
| } | |
| // Rough token estimation (1 token โ 4 characters for most models) | |
| export function estimateTokenCount(text: string): number { | |
| // More sophisticated estimation could use tiktoken library, but this is a reasonable approximation | |
| return Math.ceil(text.length / 4); | |
| } | |