likhonsheikh's picture
Upload folder using huggingface_hub
cdf89b8 verified
import { createHuggingFace } from '@ai-sdk/huggingface';
import { createOpenAI } from '@ai-sdk/openai';
import { createAnthropic } from '@ai-sdk/anthropic';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { streamText, toTextStreamResponse } from 'ai';
import { NextRequest } from 'next/server';
// Create provider instances with proper configuration
const huggingface = createHuggingFace({
apiKey: process.env.HUGGINGFACE_API_KEY,
// Use the default base URL: https://router.huggingface.co/v1
});
const openai = createOpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
const anthropic = createAnthropic({
apiKey: process.env.ANTHROPIC_API_KEY,
});
const google = createGoogleGenerativeAI({
apiKey: process.env.GOOGLE_API_KEY,
});
// Get provider and model from request
const getProviderForModel = (modelName: string) => {
if (modelName.includes('deepseek-ai') || modelName.includes('Qwen') ||
modelName.includes('moonshotai') || modelName.includes('zai-org') ||
modelName.includes('MiniMaxAI') || modelName.includes('meta-llama') ||
modelName.includes('gemma')) {
return huggingface;
}
if (modelName.includes('gpt')) {
return openai;
}
if (modelName.includes('claude')) {
return anthropic;
}
if (modelName.includes('gemini')) {
return google;
}
// Default to Hugging Face for unknown models
return huggingface;
};
export async function POST(req: NextRequest) {
try {
const { messages, model } = await req.json();
if (!model) {
return new Response('Model is required', { status: 400 });
}
if (!messages || !Array.isArray(messages)) {
return new Response('Messages array is required', { status: 400 });
}
const provider = getProviderForModel(model);
// Convert messages to the format expected by AI SDK
const systemMessage = messages.find((m: any) => m.role === 'system');
const userMessages = messages
.filter((m: any) => m.role === 'user')
.map((m: any) => m.content)
.join('\n\n');
const messagesToSend = systemMessage ? [systemMessage.content, userMessages] : [userMessages];
// Use streamText with proper Hugging Face streaming
const result = await streamText({
model: provider(model),
messages: messagesToSend,
maxTokens: 2000,
temperature: 0.7,
topP: 0.9,
presencePenalty: 0.1,
frequencyPenalty: 0.1,
onError: ({ error }) => {
console.error('Stream error:', error);
},
});
// Return the streaming response
return result.toTextStreamResponse();
} catch (error) {
console.error('AI API Error:', error);
// Return a helpful error message
const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';
return new Response(JSON.stringify({
error: 'Failed to process AI request',
details: errorMessage,
message: 'Please check your API keys and model configuration. Make sure you have a valid API key for the selected model provider.',
model: model || 'unknown',
provider: model ? getProviderForModel(model).constructor.name : 'unknown'
}), {
status: 500,
headers: {
'Content-Type': 'application/json',
},
});
}
}
// Support both POST and GET for compatibility
export async function GET(req: NextRequest) {
return POST(req);
}