File size: 9,506 Bytes
dcd5e1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9e8594b
dcd5e1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
// OpenRouter Model Interfaces
export interface OpenRouterModelArchitecture {
  input_modalities: string[];
  output_modalities: string[];
  tokenizer: string;
  instruct_type: string | null;
}

export interface OpenRouterModelPricing {
  prompt: string;
  completion: string;
  request: string;
  image: string;
  web_search: string;
  internal_reasoning: string;
  input_cache_read: string;
  input_cache_write: string;
}

export interface OpenRouterModelTopProvider {
  context_length: number;
  max_completion_tokens: number;
  is_moderated: boolean;
}

export interface OpenRouterModel {
  id: string;
  canonical_slug: string;
  name: string;
  created: number;
  description: string;
  context_length: number;
  architecture: OpenRouterModelArchitecture;
  pricing: OpenRouterModelPricing;
  top_provider: OpenRouterModelTopProvider;
  per_request_limits: Record<string, unknown>;
  supported_parameters: string[];
}

export interface OpenRouterModelsResponse {
  data: OpenRouterModel[];
}

// Existing interfaces
export interface OpenRouterMessage {
  role: "system" | "user" | "assistant";
  content: string;
}

export interface OpenRouterRequest {
  model: string;
  messages: OpenRouterMessage[];
  max_tokens?: number;
  temperature?: number;
  stream?: boolean;
}

// Cache for model information to avoid repeated API calls
const modelCache = new Map<string, OpenRouterModel>();

// Fetch available models from OpenRouter
export async function fetchOpenRouterModels(
  apiKey?: string
): Promise<OpenRouterModel[]> {
  console.log('๐Ÿ”„ fetchOpenRouterModels called, API key provided:', !!apiKey);
  
  const headers: Record<string, string> = {
    "Content-Type": "application/json",
    "HTTP-Referer": process.env.NEXT_PUBLIC_SITE_URL || "http://localhost:3000",
    "X-Title": "DeepSite - AI Website Builder",
  };

  // Add authorization header if API key is provided (for potentially better results)
  if (apiKey) {
    headers["Authorization"] = `Bearer ${apiKey}`;
  }

  console.log('๐Ÿ“ก Making request to OpenRouter API...');
  console.log('๐Ÿ”— Headers:', Object.keys(headers));

  const response = await fetch("https://openrouter.ai/api/v1/models", {
    method: "GET",
    headers,
  });

  console.log('๐Ÿ“ฅ OpenRouter API response status:', response.status);

  if (!response.ok) {
    const errorText = await response.text();
    console.error('โŒ OpenRouter API error:', response.status, errorText);
    throw new Error(`Failed to fetch OpenRouter models: ${response.statusText}`);
  }

  const data: OpenRouterModelsResponse = await response.json();
  console.log('โœ… OpenRouter API returned', data.data.length, 'models');
  
  return data.data;
}

export async function callOpenRouter(
  request: OpenRouterRequest,
  apiKey: string,
  signal?: AbortSignal
): Promise<Response> {
  console.log('๐Ÿ”‘ CallOpenRouter called with:', {
    model: request.model,
    apiKeyProvided: !!apiKey,
    apiKeyPrefix: apiKey ? apiKey.substring(0, 10) + '...' : 'none'
  });

  const response = await fetch("https://openrouter.ai/api/v1/chat/completions", {
    method: "POST",
    headers: {
      "Authorization": `Bearer ${apiKey}`,
      "Content-Type": "application/json",
      "HTTP-Referer": process.env.NEXT_PUBLIC_SITE_URL || "http://localhost:3000",
      "X-Title": "DeepSite - AI Website Builder",
    },
    body: JSON.stringify({
      ...request,
      stream: true, // Always use streaming for consistency
    }),
    signal,
  });

  console.log('๐Ÿ“ฅ OpenRouter chat response status:', response.status);

  if (!response.ok) {
    const errorData = await response.json().catch(() => ({}));
    console.error('โŒ OpenRouter error details:', {
      status: response.status,
      statusText: response.statusText,
      errorData
    });
    
    // Handle specific OpenRouter error cases
    if (response.status === 401) {
      throw new Error("Invalid OpenRouter API key. Please check your API key and try again.");
    } else if (response.status === 429) {
      throw new Error("OpenRouter rate limit exceeded. Please try again later.");
    } else if (response.status === 402) {
      throw new Error("Insufficient credits in your OpenRouter account. Please add credits and try again.");
    } else if (response.status === 400) {
      throw new Error(errorData.error?.message || "Invalid request to OpenRouter API. Please check your model selection.");
    }
    
    throw new Error(
      errorData.error?.message || 
      `OpenRouter API error: ${response.status} ${response.statusText}`
    );
  }

  console.log('โœ… OpenRouter chat request successful');
  return response;
}

export async function* parseOpenRouterStream(response: Response) {
  const reader = response.body?.getReader();
  if (!reader) {
    throw new Error("No readable stream in OpenRouter response");
  }

  const decoder = new TextDecoder();
  let buffer = "";
  let chunkCount = 0;
  let contentCount = 0;

  console.log('๐Ÿ”„ Starting OpenRouter stream parsing...');

  try {
    while (true) {
      const { done, value } = await reader.read();
      if (done) {
        console.log('โœ… OpenRouter stream parsing completed:', {
          totalChunks: chunkCount,
          totalContentChunks: contentCount,
          bufferRemaining: buffer.length
        });
        break;
      }

      chunkCount++;
      // Append new chunk to buffer
      buffer += decoder.decode(value, { stream: true });

      // Process complete lines from buffer
      while (true) {
        const lineEnd = buffer.indexOf('\n');
        if (lineEnd === -1) break;

        const line = buffer.slice(0, lineEnd).trim();
        buffer = buffer.slice(lineEnd + 1);

        // Skip empty lines
        if (!line) continue;

        // Handle SSE comments (ignore them as per OpenRouter docs)
        if (line.startsWith(':')) {
          console.log('๐Ÿ’ฌ SSE comment:', line);
          continue;
        }

        if (line.startsWith('data: ')) {
          const data = line.slice(6);
          if (data === '[DONE]') {
            console.log('๐Ÿ Received [DONE] signal from OpenRouter');
            return;
          }

          try {
            const parsed = JSON.parse(data);
            const content = parsed.choices?.[0]?.delta?.content;
            if (content) {
              contentCount++;
              console.log(`๐Ÿ“ Content chunk ${contentCount}:`, {
                length: content.length,
                preview: content.substring(0, 50) + (content.length > 50 ? '...' : '')
              });
              yield content;
            }
          } catch (parseError) {
            console.warn('โš ๏ธ Failed to parse OpenRouter SSE data:', {
              data: data.substring(0, 100),
              error: parseError
            });
            // Continue processing other lines instead of breaking
          }
        }
      }
    }
  } catch (streamError) {
    console.error('โŒ Error in OpenRouter stream parsing:', streamError);
    throw streamError;
  } finally {
    reader.releaseLock();
  }
}

// Get model information from OpenRouter API with caching
export async function getOpenRouterModelInfo(
  modelId: string, 
  apiKey?: string
): Promise<OpenRouterModel | null> {
  // Check cache first
  if (modelCache.has(modelId)) {
    console.log('๐Ÿ“‹ Using cached model info for:', modelId);
    return modelCache.get(modelId) || null;
  }

  try {
    console.log('๐Ÿ” Fetching model info for:', modelId);
    const models = await fetchOpenRouterModels(apiKey);
    
    // Cache all models for future use
    models.forEach(model => {
      modelCache.set(model.id, model);
    });

    const modelInfo = models.find(model => model.id === modelId);
    if (!modelInfo) {
      console.warn('โš ๏ธ Model not found in OpenRouter API:', modelId);
      return null;
    }

    console.log('โœ… Found model info:', {
      id: modelInfo.id,
      contextLength: modelInfo.context_length,
      maxCompletionTokens: modelInfo.top_provider.max_completion_tokens
    });

    return modelInfo;
  } catch (error) {
    console.error('โŒ Failed to fetch model info:', error);
    return null;
  }
}

// Calculate safe max_tokens based on model context length and estimated input tokens
export function calculateSafeMaxTokens(
  contextLength: number,
  estimatedInputTokens: number,
  maxCompletionTokens?: number
): number {
  // Leave some buffer for safety (10% of context length or minimum 1000 tokens)
  const safetyBuffer = Math.max(Math.floor(contextLength * 0.1), 1000);
  
  // Calculate available tokens for output
  const availableTokens = contextLength - estimatedInputTokens - safetyBuffer;
  
  // Respect model's max completion tokens if available
  const modelMaxTokens = maxCompletionTokens || availableTokens;
  
  // Use the smaller of available tokens or model's max completion tokens
  const safeMaxTokens = Math.min(availableTokens, modelMaxTokens);
  
  // Ensure we don't go below a reasonable minimum
  const finalMaxTokens = Math.max(safeMaxTokens, 1000);
  
  console.log('๐Ÿงฎ Token calculation:', {
    contextLength,
    estimatedInputTokens,
    safetyBuffer,
    availableTokens,
    modelMaxTokens,
    finalMaxTokens
  });
  
  return finalMaxTokens;
}

// Rough token estimation (1 token โ‰ˆ 4 characters for most models)
export function estimateTokenCount(text: string): number {
  // More sophisticated estimation could use tiktoken library, but this is a reasonable approximation
  return Math.ceil(text.length / 4);
}