import OpenAI from 'openai'; // Initialize OpenAI client (for local simulation or external API) const openai = new OpenAI({ apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY || 'dummy-key-for-demo', }); export default async function handler(req, res) { if (req.method !== 'POST') { return res.status(405).json({ error: 'Method not allowed' }); } const { prompt } = req.body; if (!prompt) { return res.status(400).json({ error: 'Prompt is required' }); } try { // Simulate MedGemma response // Replace this with actual MedGemma inference logic or API call const response = await openai.chat.completions.create({ model: 'medgemma-2-27b-it', // Example model name messages: [ { role: 'system', content: 'You are a specialized dental assistant trained on medical data. Provide accurate, professional, and empathetic dental advice. Always include a disclaimer that this is not a substitute for professional medical advice.' }, { role: 'user', content: prompt } ], max_tokens: 500, temperature: 0.7, }); const reply = response.choices[0]?.message?.content || 'I am unable to generate a response at this time.'; res.status(200).json({ reply }); } catch (error) { console.error('API Error:', error); res.status(500).json({ error: 'Internal server error' }); } }