Leon4gr45 commited on
Commit
7c9579c
·
verified ·
1 Parent(s): 71c6167

Upload folder using huggingface_hub

Browse files
app/api/generate-ai-code-stream/route.ts CHANGED
@@ -1,8 +1,4 @@
1
  import { NextRequest, NextResponse } from 'next/server';
2
- import { createGroq } from '@ai-sdk/groq';
3
- import { createAnthropic } from '@ai-sdk/anthropic';
4
- import { createOpenAI } from '@ai-sdk/openai';
5
- import { createGoogleGenerativeAI } from '@ai-sdk/google';
6
  import { streamText } from 'ai';
7
  import type { SandboxState } from '@/types/sandbox';
8
  import { selectFilesForEdit, getFileContents, formatFilesForAI } from '@/lib/context-selector';
@@ -10,40 +6,11 @@ import { executeSearchPlan, formatSearchResultsForAI, selectTargetFile } from '@
10
  import { FileManifest } from '@/types/file-manifest';
11
  import type { ConversationState, ConversationMessage, ConversationEdit } from '@/types/conversation';
12
  import { appConfig } from '@/config/app.config';
 
13
 
14
  // Force dynamic route to enable streaming
15
  export const dynamic = 'force-dynamic';
16
 
17
- // Check if we're using Vercel AI Gateway
18
- const isUsingAIGateway = !!process.env.AI_GATEWAY_API_KEY;
19
- const aiGatewayBaseURL = 'https://ai-gateway.vercel.sh/v1';
20
-
21
- console.log('[generate-ai-code-stream] AI Gateway config:', {
22
- isUsingAIGateway,
23
- hasGroqKey: !!process.env.GROQ_API_KEY,
24
- hasAIGatewayKey: !!process.env.AI_GATEWAY_API_KEY
25
- });
26
-
27
- const groq = createGroq({
28
- apiKey: process.env.AI_GATEWAY_API_KEY ?? process.env.GROQ_API_KEY,
29
- baseURL: isUsingAIGateway ? aiGatewayBaseURL : undefined,
30
- });
31
-
32
- const anthropic = createAnthropic({
33
- apiKey: process.env.AI_GATEWAY_API_KEY ?? process.env.ANTHROPIC_API_KEY,
34
- baseURL: isUsingAIGateway ? aiGatewayBaseURL : (process.env.ANTHROPIC_BASE_URL || 'https://api.anthropic.com/v1'),
35
- });
36
-
37
- const googleGenerativeAI = createGoogleGenerativeAI({
38
- apiKey: process.env.AI_GATEWAY_API_KEY ?? process.env.GEMINI_API_KEY,
39
- baseURL: isUsingAIGateway ? aiGatewayBaseURL : undefined,
40
- });
41
-
42
- const openai = createOpenAI({
43
- apiKey: process.env.AI_GATEWAY_API_KEY ?? process.env.OPENAI_API_KEY,
44
- baseURL: isUsingAIGateway ? aiGatewayBaseURL : process.env.OPENAI_BASE_URL,
45
- });
46
-
47
  // Helper function to analyze user preferences from conversation history
48
  function analyzeUserPreferences(messages: ConversationMessage[]): {
49
  commonPatterns: string[];
@@ -1213,33 +1180,12 @@ MORPH FAST APPLY MODE (EDIT-ONLY):
1213
  const packagesToInstall: string[] = [];
1214
 
1215
  // Determine which provider to use based on model
 
1216
  const isAnthropic = model.startsWith('anthropic/');
1217
  const isGoogle = model.startsWith('google/');
1218
  const isOpenAI = model.startsWith('openai/');
1219
- const isKimiGroq = model === 'moonshotai/kimi-k2-instruct-0905';
1220
- const modelProvider = isAnthropic ? anthropic :
1221
- (isOpenAI ? openai :
1222
- (isGoogle ? googleGenerativeAI :
1223
- (isKimiGroq ? groq : groq)));
1224
-
1225
- // Fix model name transformation for different providers
1226
- let actualModel: string;
1227
- if (isAnthropic) {
1228
- actualModel = model.replace('anthropic/', '');
1229
- } else if (isOpenAI) {
1230
- actualModel = model.replace('openai/', '');
1231
- } else if (isKimiGroq) {
1232
- // Kimi on Groq - use full model string
1233
- actualModel = 'moonshotai/kimi-k2-instruct-0905';
1234
- } else if (isGoogle) {
1235
- // Google uses specific model names - convert our naming to theirs
1236
- actualModel = model.replace('google/', '');
1237
- } else {
1238
- actualModel = model;
1239
- }
1240
 
1241
- console.log(`[generate-ai-code-stream] Using provider: ${isAnthropic ? 'Anthropic' : isGoogle ? 'Google' : isOpenAI ? 'OpenAI' : 'Groq'}, model: ${actualModel}`);
1242
- console.log(`[generate-ai-code-stream] AI Gateway enabled: ${isUsingAIGateway}`);
1243
  console.log(`[generate-ai-code-stream] Model string: ${model}`);
1244
 
1245
  // Make streaming API call with appropriate provider
@@ -1336,8 +1282,6 @@ It's better to have 3 complete files than 10 incomplete files.`
1336
  } catch (streamError: any) {
1337
  console.error(`[generate-ai-code-stream] Error calling streamText (attempt ${retryCount + 1}/${maxRetries + 1}):`, streamError);
1338
 
1339
- // Check if this is a Groq service unavailable error
1340
- const isGroqServiceError = isKimiGroq && streamError.message?.includes('Service unavailable');
1341
  const isRetryableError = streamError.message?.includes('Service unavailable') ||
1342
  streamError.message?.includes('rate limit') ||
1343
  streamError.message?.includes('timeout');
@@ -1355,17 +1299,11 @@ It's better to have 3 complete files than 10 incomplete files.`
1355
  // Wait before retry with exponential backoff
1356
  await new Promise(resolve => setTimeout(resolve, retryCount * 2000));
1357
 
1358
- // If Groq fails, try switching to a fallback model
1359
- if (isGroqServiceError && retryCount === maxRetries) {
1360
- console.log('[generate-ai-code-stream] Groq service unavailable, falling back to GPT-4');
1361
- streamOptions.model = openai('gpt-4-turbo');
1362
- actualModel = 'gpt-4-turbo';
1363
- }
1364
  } else {
1365
  // Final error, send to user
1366
  await sendProgress({
1367
  type: 'error',
1368
- message: `Failed to initialize ${isGoogle ? 'Gemini' : isAnthropic ? 'Claude' : isOpenAI ? 'GPT-5' : isKimiGroq ? 'Kimi (Groq)' : 'Groq'} streaming: ${streamError.message}`
1369
  });
1370
 
1371
  // If this is a Google model error, provide helpful info
 
1
  import { NextRequest, NextResponse } from 'next/server';
 
 
 
 
2
  import { streamText } from 'ai';
3
  import type { SandboxState } from '@/types/sandbox';
4
  import { selectFilesForEdit, getFileContents, formatFilesForAI } from '@/lib/context-selector';
 
6
  import { FileManifest } from '@/types/file-manifest';
7
  import type { ConversationState, ConversationMessage, ConversationEdit } from '@/types/conversation';
8
  import { appConfig } from '@/config/app.config';
9
+ import { getProviderForModel } from '@/lib/ai/provider-manager';
10
 
11
  // Force dynamic route to enable streaming
12
  export const dynamic = 'force-dynamic';
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  // Helper function to analyze user preferences from conversation history
15
  function analyzeUserPreferences(messages: ConversationMessage[]): {
16
  commonPatterns: string[];
 
1180
  const packagesToInstall: string[] = [];
1181
 
1182
  // Determine which provider to use based on model
1183
+ const { client: modelProvider, actualModel } = getProviderForModel(model);
1184
  const isAnthropic = model.startsWith('anthropic/');
1185
  const isGoogle = model.startsWith('google/');
1186
  const isOpenAI = model.startsWith('openai/');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187
 
1188
+ console.log(`[generate-ai-code-stream] Using provider for model: ${actualModel}`);
 
1189
  console.log(`[generate-ai-code-stream] Model string: ${model}`);
1190
 
1191
  // Make streaming API call with appropriate provider
 
1282
  } catch (streamError: any) {
1283
  console.error(`[generate-ai-code-stream] Error calling streamText (attempt ${retryCount + 1}/${maxRetries + 1}):`, streamError);
1284
 
 
 
1285
  const isRetryableError = streamError.message?.includes('Service unavailable') ||
1286
  streamError.message?.includes('rate limit') ||
1287
  streamError.message?.includes('timeout');
 
1299
  // Wait before retry with exponential backoff
1300
  await new Promise(resolve => setTimeout(resolve, retryCount * 2000));
1301
 
 
 
 
 
 
 
1302
  } else {
1303
  // Final error, send to user
1304
  await sendProgress({
1305
  type: 'error',
1306
+ message: `Failed to initialize ${isGoogle ? 'Gemini' : isAnthropic ? 'Claude' : isOpenAI ? 'GPT-5' : 'AI'} streaming: ${streamError.message}`
1307
  });
1308
 
1309
  // If this is a Google model error, provide helpful info
app/api/scrape-url-enhanced/route.ts CHANGED
@@ -18,7 +18,6 @@ function sanitizeQuotes(text: string): string {
18
 
19
  export async function POST(request: NextRequest) {
20
  try {
21
- console.log('--- APPLYING COMBINED FIX V1 ---');
22
  const { url } = await request.json();
23
 
24
  if (!url) {
 
18
 
19
  export async function POST(request: NextRequest) {
20
  try {
 
21
  const { url } = await request.json();
22
 
23
  if (!url) {