File size: 3,404 Bytes
cdf89b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import { createHuggingFace } from '@ai-sdk/huggingface';
import { createOpenAI } from '@ai-sdk/openai';
import { createAnthropic } from '@ai-sdk/anthropic';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { streamText, toTextStreamResponse } from 'ai';
import { NextRequest } from 'next/server';

// Create provider instances with proper configuration
const huggingface = createHuggingFace({
  apiKey: process.env.HUGGINGFACE_API_KEY,
  // Use the default base URL: https://router.huggingface.co/v1
});

const openai = createOpenAI({
  apiKey: process.env.OPENAI_API_KEY,
});

const anthropic = createAnthropic({
  apiKey: process.env.ANTHROPIC_API_KEY,
});

const google = createGoogleGenerativeAI({
  apiKey: process.env.GOOGLE_API_KEY,
});

// Get provider and model from request
const getProviderForModel = (modelName: string) => {
  if (modelName.includes('deepseek-ai') || modelName.includes('Qwen') || 
      modelName.includes('moonshotai') || modelName.includes('zai-org') || 
      modelName.includes('MiniMaxAI') || modelName.includes('meta-llama') || 
      modelName.includes('gemma')) {
    return huggingface;
  }
  if (modelName.includes('gpt')) {
    return openai;
  }
  if (modelName.includes('claude')) {
    return anthropic;
  }
  if (modelName.includes('gemini')) {
    return google;
  }
  
  // Default to Hugging Face for unknown models
  return huggingface;
};

export async function POST(req: NextRequest) {
  try {
    const { messages, model } = await req.json();

    if (!model) {
      return new Response('Model is required', { status: 400 });
    }

    if (!messages || !Array.isArray(messages)) {
      return new Response('Messages array is required', { status: 400 });
    }

    const provider = getProviderForModel(model);
    
    // Convert messages to the format expected by AI SDK
    const systemMessage = messages.find((m: any) => m.role === 'system');
    const userMessages = messages
      .filter((m: any) => m.role === 'user')
      .map((m: any) => m.content)
      .join('\n\n');

    const messagesToSend = systemMessage ? [systemMessage.content, userMessages] : [userMessages];

    // Use streamText with proper Hugging Face streaming
    const result = await streamText({
      model: provider(model),
      messages: messagesToSend,
      maxTokens: 2000,
      temperature: 0.7,
      topP: 0.9,
      presencePenalty: 0.1,
      frequencyPenalty: 0.1,
      onError: ({ error }) => {
        console.error('Stream error:', error);
      },
    });

    // Return the streaming response
    return result.toTextStreamResponse();
  } catch (error) {
    console.error('AI API Error:', error);
    
    // Return a helpful error message
    const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
    
    return new Response(JSON.stringify({
      error: 'Failed to process AI request',
      details: errorMessage,
      message: 'Please check your API keys and model configuration. Make sure you have a valid API key for the selected model provider.',
      model: model || 'unknown',
      provider: model ? getProviderForModel(model).constructor.name : 'unknown'
    }), {
      status: 500,
      headers: {
        'Content-Type': 'application/json',
      },
    });
  }
}

// Support both POST and GET for compatibility
export async function GET(req: NextRequest) {
  return POST(req);
}