dvc890 commited on
Commit
38d2529
·
verified ·
1 Parent(s): 604195d

Upload 48 files

Browse files
Files changed (3) hide show
  1. ai-routes.js +606 -0
  2. pages/AIAssistant.tsx +10 -3
  3. server.js +16 -706
ai-routes.js ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ const express = require('express');
3
+ const router = express.Router();
4
+ const OpenAI = require('openai');
5
+ const { User, ConfigModel } = require('./models');
6
+
7
+ // --- AI Client Initialization ---
8
+
9
+ let openAIClient = null;
10
+ function getOpenRouter() {
11
+ if (openAIClient) return openAIClient;
12
+ if (!process.env.OPENROUTER_API_KEY) return null;
13
+
14
+ openAIClient = new OpenAI({
15
+ baseURL: "https://openrouter.ai/api/v1",
16
+ apiKey: process.env.OPENROUTER_API_KEY,
17
+ defaultHeaders: {
18
+ "HTTP-Referer": "https://smart-school-ai.com", // Placeholder
19
+ "X-Title": "Smart School System",
20
+ },
21
+ });
22
+ return openAIClient;
23
+ }
24
+
25
+ let genAIContext = null;
26
+ async function getGenAI() {
27
+ if (genAIContext) return genAIContext;
28
+ const { GoogleGenAI, Type, Modality } = await import("@google/genai");
29
+ const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
30
+ genAIContext = { ai, Type, Modality };
31
+ return genAIContext;
32
+ }
33
+
34
+ // --- Helpers ---
35
+
36
+ const wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
37
+
38
+ async function callAIWithRetry(aiModel, params, retries = 1) {
39
+ for (let i = 0; i < retries; i++) {
40
+ try {
41
+ return await aiModel.generateContent(params);
42
+ } catch (e) {
43
+ if (e.status === 400 || e.status === 401 || e.status === 403) throw e;
44
+ if (i < retries - 1) {
45
+ await wait(1000 * Math.pow(2, i));
46
+ continue;
47
+ }
48
+ throw e;
49
+ }
50
+ }
51
+ }
52
+
53
+ function convertGeminiToOpenAI(baseParams) {
54
+ const messages = [];
55
+ if (baseParams.config?.systemInstruction) {
56
+ messages.push({ role: 'system', content: baseParams.config.systemInstruction });
57
+ }
58
+ if (baseParams.contents && Array.isArray(baseParams.contents)) {
59
+ baseParams.contents.forEach(content => {
60
+ let role = 'user';
61
+ if (content.role === 'model') role = 'assistant';
62
+
63
+ const messageContent = [];
64
+ if (content.parts && Array.isArray(content.parts)) {
65
+ content.parts.forEach(p => {
66
+ if (p.text) {
67
+ messageContent.push({ type: 'text', text: p.text });
68
+ } else if (p.inlineData && p.inlineData.mimeType.startsWith('image/')) {
69
+ messageContent.push({
70
+ type: 'image_url',
71
+ image_url: {
72
+ url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}`
73
+ }
74
+ });
75
+ }
76
+ });
77
+ }
78
+ if (messageContent.length > 0) {
79
+ if (messageContent.length === 1 && messageContent[0].type === 'text') {
80
+ messages.push({ role: role, content: messageContent[0].text });
81
+ } else {
82
+ messages.push({ role: role, content: messageContent });
83
+ }
84
+ }
85
+ });
86
+ }
87
+ return messages;
88
+ }
89
+
90
+ // --- Dynamic Provider Management ---
91
+
92
+ const PROVIDERS = {
93
+ GEMINI: 'GEMINI',
94
+ OPENROUTER: 'OPENROUTER',
95
+ GEMMA: 'GEMMA'
96
+ };
97
+
98
+ let activeProviderOrder = [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
99
+
100
+ function deprioritizeProvider(providerName) {
101
+ if (activeProviderOrder[activeProviderOrder.length - 1] === providerName) return;
102
+ console.log(`📉 [AI Debug] Performance Opt: Deprioritizing ${providerName} due to quota limits.`);
103
+ activeProviderOrder = activeProviderOrder.filter(p => p !== providerName).concat(providerName);
104
+ console.log(`🔄 [AI Debug] New Provider Order: ${activeProviderOrder.join(' -> ')}`);
105
+ }
106
+
107
+ function isQuotaError(e) {
108
+ const msg = e.message || '';
109
+ return e.status === 429 || e.status === 503 ||
110
+ msg.includes('Quota') ||
111
+ msg.includes('overloaded') ||
112
+ msg.includes('RESOURCE_EXHAUSTED') ||
113
+ msg.includes('Rate limit') ||
114
+ msg.includes('credits');
115
+ }
116
+
117
+ // --- Provider Callers ---
118
+
119
+ async function callGeminiProvider(aiModelObj, baseParams) {
120
+ const primaryModels = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
121
+ let lastError = null;
122
+ for (const modelName of primaryModels) {
123
+ try {
124
+ console.log(`🚀 [AI Debug] Calling Gemini non-stream: ${modelName}`);
125
+ const currentParams = { ...baseParams, model: modelName };
126
+ return await callAIWithRetry(aiModelObj, currentParams, 1);
127
+ } catch (e) {
128
+ lastError = e;
129
+ console.error(`⚠️ [AI Debug] Gemini ${modelName} Error:`, e.status, e.message);
130
+ if (isQuotaError(e)) {
131
+ console.warn(`⚠️ [AI Debug] Gemini ${modelName} exhausted. Trying next internal model...`);
132
+ continue;
133
+ }
134
+ throw e;
135
+ }
136
+ }
137
+ throw lastError;
138
+ }
139
+
140
+ async function callOpenRouterProvider(baseParams) {
141
+ const openRouter = getOpenRouter();
142
+ if (!openRouter) throw new Error("OpenRouter not configured");
143
+
144
+ const openRouterModels = [
145
+ 'qwen/qwen3-coder:free',
146
+ 'openai/gpt-oss-120b:free',
147
+ 'qwen/qwen3-235b-a22b:free',
148
+ 'tngtech/deepseek-r1t-chimera:free'
149
+ ];
150
+
151
+ const openAIMessages = convertGeminiToOpenAI(baseParams);
152
+ let lastError = null;
153
+
154
+ for (const modelName of openRouterModels) {
155
+ try {
156
+ console.log(`🛡️ [AI Debug] Switching to OpenRouter Model: ${modelName}`);
157
+ const completion = await openRouter.chat.completions.create({
158
+ model: modelName,
159
+ messages: openAIMessages,
160
+ });
161
+ if (!completion || !completion.choices || !completion.choices[0]) {
162
+ throw new Error(`Invalid response structure from ${modelName}`);
163
+ }
164
+ const content = completion.choices[0].message.content || "";
165
+ return { text: content };
166
+ } catch (e) {
167
+ lastError = e;
168
+ console.warn(`⚠️ [AI Debug] OpenRouter Model ${modelName} failed.`, e.message);
169
+ }
170
+ }
171
+ throw lastError || new Error("OpenRouter failed");
172
+ }
173
+
174
+ async function callGemmaProvider(aiModelObj, baseParams) {
175
+ const fallbackModels = ['gemma-3-27b-it', 'gemma-3-12b-it', 'gemma-3-4b-it'];
176
+ const gemmaConfig = { ...baseParams.config };
177
+ if (gemmaConfig.systemInstruction) delete gemmaConfig.systemInstruction;
178
+
179
+ let lastError = null;
180
+ for (const modelName of fallbackModels) {
181
+ try {
182
+ console.log(`🛡️ [AI Debug] Switching to Final Backup (Gemma 3): ${modelName}`);
183
+ const currentParams = {
184
+ ...baseParams,
185
+ model: modelName,
186
+ config: gemmaConfig
187
+ };
188
+ return await callAIWithRetry(aiModelObj, currentParams, 1);
189
+ } catch (e) {
190
+ lastError = e;
191
+ console.warn(`⚠️ [AI Debug] Backup Model ${modelName} failed.`, e.message);
192
+ }
193
+ }
194
+ throw lastError || new Error("Gemma failed");
195
+ }
196
+
197
+ // --- Streaming Helpers ---
198
+
199
+ async function streamGemini(aiModelObj, baseParams, res) {
200
+ const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
201
+ let lastError = null;
202
+ for (const modelName of models) {
203
+ try {
204
+ console.log(`🌊 [AI Debug] STREAMING Gemini model: ${modelName}`);
205
+ const currentParams = { ...baseParams, model: modelName };
206
+ const streamResult = await aiModelObj.generateContentStream(currentParams);
207
+ let fullText = '';
208
+ for await (const chunk of streamResult) {
209
+ const text = chunk.text;
210
+ if (text) {
211
+ fullText += text;
212
+ res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
213
+ }
214
+ }
215
+ console.log(`✅ [AI Debug] Gemini ${modelName} stream complete.`);
216
+ return fullText;
217
+ } catch (e) {
218
+ lastError = e;
219
+ console.error(`❌ [AI Debug] Gemini Stream Error (${modelName}):`, e.status, e.message);
220
+ if (isQuotaError(e)) {
221
+ console.warn(`[AI Debug] Stream Gemini ${modelName} quota exhausted. Switching to next internal model...`);
222
+ continue;
223
+ }
224
+ throw e;
225
+ }
226
+ }
227
+ throw lastError || new Error("Gemini streaming failed after retrying internal models");
228
+ }
229
+
230
+ async function streamGemma(aiModelObj, baseParams, res) {
231
+ const models = ['gemma-3-27b-it', 'gemma-3-12b-it', 'gemma-3-4b-it'];
232
+ const gemmaConfig = { ...baseParams.config };
233
+ if (gemmaConfig.systemInstruction) delete gemmaConfig.systemInstruction;
234
+
235
+ let lastError = null;
236
+ for (const modelName of models) {
237
+ try {
238
+ console.log(`🛡️ [AI Debug] Streaming Fallback to Gemma: ${modelName}`);
239
+ const currentParams = { ...baseParams, model: modelName, config: gemmaConfig };
240
+ const streamResult = await aiModelObj.generateContentStream(currentParams);
241
+ let fullText = '';
242
+ for await (const chunk of streamResult) {
243
+ const text = chunk.text;
244
+ if (text) {
245
+ fullText += text;
246
+ res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
247
+ }
248
+ }
249
+ return fullText;
250
+ } catch (e) {
251
+ lastError = e;
252
+ console.warn(`Stream Gemma ${modelName} failed: ${e.message}`);
253
+ }
254
+ }
255
+ throw lastError || new Error("Gemma streaming failed");
256
+ }
257
+
258
+ async function streamOpenRouter(baseParams, res) {
259
+ const openRouter = getOpenRouter();
260
+ if (!openRouter) throw new Error("OpenRouter not configured");
261
+
262
+ const openRouterModels = [
263
+ 'qwen/qwen3-coder:free',
264
+ 'openai/gpt-oss-120b:free',
265
+ 'qwen/qwen3-235b-a22b:free',
266
+ 'tngtech/deepseek-r1t-chimera:free'
267
+ ];
268
+ const messages = convertGeminiToOpenAI(baseParams);
269
+
270
+ let lastError = null;
271
+ for (const modelName of openRouterModels) {
272
+ try {
273
+ console.log(`🛡️ [AI Debug] Streaming via OpenRouter: ${modelName}`);
274
+ const stream = await openRouter.chat.completions.create({
275
+ model: modelName,
276
+ messages: messages,
277
+ stream: true
278
+ });
279
+ let fullText = '';
280
+ for await (const chunk of stream) {
281
+ const text = chunk.choices[0]?.delta?.content || '';
282
+ if (text) {
283
+ fullText += text;
284
+ res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
285
+ }
286
+ }
287
+ return fullText;
288
+ } catch (e) {
289
+ lastError = e;
290
+ console.warn(`[AI Debug] Stream OpenRouter ${modelName} failed`, e.message);
291
+ }
292
+ }
293
+ throw lastError || new Error("All OpenRouter streams failed");
294
+ }
295
+
296
+ // --- Main Generation Functions ---
297
+
298
+ async function generateContentWithSmartFallback(aiModelObj, baseParams) {
299
+ let hasAudio = false;
300
+ if (baseParams.contents && Array.isArray(baseParams.contents)) {
301
+ for (const c of baseParams.contents) {
302
+ if (c.parts) {
303
+ for (const p of c.parts) {
304
+ if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) {
305
+ hasAudio = true;
306
+ }
307
+ }
308
+ }
309
+ }
310
+ }
311
+
312
+ if (hasAudio) {
313
+ console.log("🎤 [AI Debug] Audio detected, forcing Gemini.");
314
+ try {
315
+ return await callGeminiProvider(aiModelObj, baseParams);
316
+ } catch (e) {
317
+ console.error("❌ [AI Debug] Audio Gemini Failed:", e.message);
318
+ if (isQuotaError(e)) {
319
+ deprioritizeProvider(PROVIDERS.GEMINI);
320
+ throw new Error('QUOTA_EXCEEDED_AUDIO: 语音服务繁忙,请稍后再试或使用纯文本/图片提问。');
321
+ }
322
+ throw e;
323
+ }
324
+ }
325
+
326
+ let finalError = null;
327
+ for (const provider of activeProviderOrder) {
328
+ try {
329
+ if (provider === PROVIDERS.GEMINI) {
330
+ return await callGeminiProvider(aiModelObj, baseParams);
331
+ } else if (provider === PROVIDERS.OPENROUTER) {
332
+ return await callOpenRouterProvider(baseParams);
333
+ } else if (provider === PROVIDERS.GEMMA) {
334
+ return await callGemmaProvider(aiModelObj, baseParams);
335
+ }
336
+ } catch (e) {
337
+ finalError = e;
338
+ if (isQuotaError(e)) {
339
+ deprioritizeProvider(provider);
340
+ continue;
341
+ }
342
+ console.warn(`⚠️ [AI Debug] ${provider} failed with non-quota error:`, e.message);
343
+ }
344
+ }
345
+ throw new Error('All AI models (Gemini, OpenRouter, Gemma) are currently unavailable.');
346
+ }
347
+
348
+ async function streamContentWithSmartFallback(aiModelObj, baseParams, res) {
349
+ let hasAudio = false;
350
+ if (baseParams.contents && Array.isArray(baseParams.contents)) {
351
+ for (const c of baseParams.contents) {
352
+ if (c.parts) {
353
+ for (const p of c.parts) {
354
+ if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) {
355
+ hasAudio = true;
356
+ }
357
+ }
358
+ }
359
+ }
360
+ }
361
+
362
+ if (hasAudio) {
363
+ try {
364
+ return await streamGemini(aiModelObj, baseParams, res);
365
+ } catch(e) {
366
+ deprioritizeProvider(PROVIDERS.GEMINI);
367
+ throw new Error('QUOTA_EXCEEDED_AUDIO: 语音服务繁忙,请稍后再试或使用纯文本/图片提问。');
368
+ }
369
+ }
370
+
371
+ let finalError = null;
372
+ console.log(`🚦 [AI Debug] Starting stream with order: ${activeProviderOrder.join(' -> ')}`);
373
+
374
+ for (const provider of activeProviderOrder) {
375
+ try {
376
+ if (provider === PROVIDERS.GEMINI) {
377
+ return await streamGemini(aiModelObj, baseParams, res);
378
+ } else if (provider === PROVIDERS.OPENROUTER) {
379
+ return await streamOpenRouter(baseParams, res);
380
+ } else if (provider === PROVIDERS.GEMMA) {
381
+ return await streamGemma(aiModelObj, baseParams, res);
382
+ }
383
+ } catch (e) {
384
+ finalError = e;
385
+ if (isQuotaError(e)) {
386
+ deprioritizeProvider(provider);
387
+ continue;
388
+ }
389
+ console.warn(`Streaming ${provider} failed:`, e.message);
390
+ }
391
+ }
392
+ throw finalError || new Error('All streaming models unavailable.');
393
+ }
394
+
395
+ // --- Middleware ---
396
+
397
+ const checkAIAccess = async (req, res, next) => {
398
+ const username = req.headers['x-user-username'];
399
+ const role = req.headers['x-user-role'];
400
+ if (!username) return res.status(401).json({ error: 'Unauthorized' });
401
+
402
+ const config = await ConfigModel.findOne({ key: 'main' });
403
+
404
+ if (config && config.enableAI === false && role !== 'ADMIN') {
405
+ return res.status(503).json({ error: 'MAINTENANCE', message: 'AI 功能正在维护中,请联系管理员。' });
406
+ }
407
+
408
+ if (role === 'ADMIN') return next();
409
+
410
+ const user = await User.findOne({ username });
411
+ if (!user) return res.status(404).json({ error: 'User not found' });
412
+
413
+ if (user.role === 'STUDENT' || user.role === 'PRINCIPAL') return res.status(403).json({ error: 'Permission denied' });
414
+ if (!user.aiAccess) return res.status(403).json({ error: 'AI Access not enabled for this user' });
415
+
416
+ next();
417
+ };
418
+
419
+ // --- Routes ---
420
+
421
+ // POST /api/ai/chat
422
+ router.post('/chat', checkAIAccess, async (req, res) => {
423
+ const { text, audio, history } = req.body;
424
+
425
+ res.setHeader('Content-Type', 'text/event-stream');
426
+ res.setHeader('Cache-Control', 'no-cache');
427
+ res.setHeader('Connection', 'keep-alive');
428
+ res.flushHeaders();
429
+
430
+ try {
431
+ const { ai, Modality } = await getGenAI();
432
+
433
+ const currentParts = [];
434
+ if (audio) {
435
+ currentParts.push({ inlineData: { mimeType: 'audio/wav', data: audio } });
436
+ }
437
+ if (text) {
438
+ currentParts.push({ text: text });
439
+ }
440
+ if (currentParts.length === 0) {
441
+ res.write(`data: ${JSON.stringify({ error: 'No input' })}\n\n`);
442
+ res.end();
443
+ return;
444
+ }
445
+
446
+ const fullContents = [];
447
+ if (history && Array.isArray(history)) {
448
+ history.forEach(msg => {
449
+ const role = msg.role === 'user' ? 'user' : 'model';
450
+ if (msg.text) {
451
+ fullContents.push({ role: role, parts: [{ text: msg.text }] });
452
+ }
453
+ });
454
+ }
455
+ fullContents.push({ role: 'user', parts: currentParts });
456
+
457
+ // Step 1: Thinking
458
+ const answerText = await streamContentWithSmartFallback(ai.models, {
459
+ contents: fullContents,
460
+ config: {
461
+ systemInstruction: "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。如果学生使用语音,你也应当在回答中体现出自然的口语风格。回复支持 Markdown 格式。",
462
+ }
463
+ }, res);
464
+
465
+ const finalAnswer = answerText || "抱歉,我没有听清,请再说一遍。";
466
+
467
+ // Step 2: Speaking
468
+ try {
469
+ const ttsResponse = await ai.models.generateContent({
470
+ model: "gemini-2.5-flash-preview-tts",
471
+ contents: [{ parts: [{ text: finalAnswer }] }],
472
+ config: {
473
+ responseModalities: [Modality.AUDIO],
474
+ speechConfig: {
475
+ voiceConfig: {
476
+ prebuiltVoiceConfig: { voiceName: 'Kore' },
477
+ },
478
+ },
479
+ },
480
+ });
481
+ const audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
482
+ if (audioBytes) {
483
+ res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
484
+ }
485
+ } catch (ttsError) {
486
+ console.warn("⚠️ TTS Generation skipped (Quota or Error). Returning text only.");
487
+ }
488
+
489
+ await ConfigModel.findOneAndUpdate({ key: 'main' }, { $inc: { aiTotalCalls: 1 } }, { upsert: true });
490
+
491
+ res.write('data: [DONE]\n\n');
492
+ res.end();
493
+
494
+ } catch (e) {
495
+ console.error("AI Chat Error:", e);
496
+ const errPayload = { error: 'Unknown Error' };
497
+ if (e.message?.includes('QUOTA_EXCEEDED_MEDIA') || e.message?.includes('QUOTA_EXCEEDED_AUDIO')) {
498
+ errPayload.error = 'QUOTA_EXCEEDED_MEDIA';
499
+ errPayload.message = e.message;
500
+ } else if (e.status === 429 || e.message?.includes('QUOTA_EXCEEDED') || e.message?.includes('RESOURCE_EXHAUSTED')) {
501
+ errPayload.error = 'QUOTA_EXCEEDED';
502
+ errPayload.message = '所有AI模型(包括备用线路)的免费额度均已耗尽,请明天再试。';
503
+ } else {
504
+ errPayload.error = 'SERVER_ERROR';
505
+ errPayload.message = e.message || 'AI Service Unavailable';
506
+ }
507
+ res.write(`data: ${JSON.stringify(errPayload)}\n\n`);
508
+ res.end();
509
+ }
510
+ });
511
+
512
+ // POST /api/ai/evaluate
513
+ router.post('/evaluate', checkAIAccess, async (req, res) => {
514
+ const { question, audio, image } = req.body;
515
+
516
+ if (!question || (!audio && !image)) return res.status(400).json({ error: 'Missing question or input (audio/image)' });
517
+
518
+ try {
519
+ const { ai, Type, Modality } = await getGenAI();
520
+
521
+ const evalParts = [{ text: `请作为一名严谨的老师,对学生的回答进行评分。题目是:${question}。` }];
522
+ if (audio) {
523
+ evalParts.push({ text: "学生的回答在音频中。" });
524
+ evalParts.push({ inlineData: { mimeType: 'audio/wav', data: audio } });
525
+ }
526
+ if (image) {
527
+ evalParts.push({ text: "学生的回答写在图片中,请识别图片中的文字内容并进行批改。" });
528
+ evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
529
+ }
530
+ evalParts.push({ text: `请分析:1. 内容准确性 2. 表达/书写规范。返回 JSON: {score(0-100), feedback(简短评语), transcription(识别内容)}` });
531
+
532
+ // 1. Analyze
533
+ const response = await generateContentWithSmartFallback(ai.models, {
534
+ contents: { parts: evalParts },
535
+ config: {
536
+ responseMimeType: "application/json",
537
+ responseSchema: {
538
+ type: Type.OBJECT,
539
+ properties: {
540
+ score: { type: Type.NUMBER },
541
+ feedback: { type: Type.STRING },
542
+ transcription: { type: Type.STRING }
543
+ },
544
+ required: ["score", "feedback", "transcription"]
545
+ }
546
+ }
547
+ });
548
+
549
+ let resultJson;
550
+ let rawText = response.text || "{}";
551
+ if (rawText.includes('```json')) {
552
+ rawText = rawText.replace(/```json/g, '').replace(/```/g, '').trim();
553
+ } else if (rawText.includes('```')) {
554
+ rawText = rawText.replace(/```/g, '').trim();
555
+ }
556
+
557
+ try {
558
+ resultJson = JSON.parse(rawText);
559
+ } catch (jsonErr) {
560
+ resultJson = {
561
+ score: 0,
562
+ feedback: rawText,
563
+ transcription: "(解析 JSON 失败,显示原始回复)"
564
+ };
565
+ }
566
+
567
+ // 2. TTS
568
+ let feedbackAudio = null;
569
+ if (resultJson.feedback) {
570
+ try {
571
+ const ttsResponse = await ai.models.generateContent({
572
+ model: "gemini-2.5-flash-preview-tts",
573
+ contents: [{ parts: [{ text: resultJson.feedback }] }],
574
+ config: {
575
+ responseModalities: [Modality.AUDIO],
576
+ speechConfig: {
577
+ voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } },
578
+ },
579
+ },
580
+ });
581
+ feedbackAudio = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
582
+ } catch (ttsErr) {
583
+ console.warn("⚠️ TTS Generation failed:", ttsErr.message);
584
+ }
585
+ }
586
+
587
+ await ConfigModel.findOneAndUpdate({ key: 'main' }, { $inc: { aiTotalCalls: 1 } }, { upsert: true });
588
+
589
+ res.json({
590
+ ...resultJson,
591
+ audio: feedbackAudio
592
+ });
593
+
594
+ } catch (e) {
595
+ console.error("AI Eval Error:", e);
596
+ if (e.message?.includes('QUOTA_EXCEEDED_MEDIA') || e.message?.includes('QUOTA_EXCEEDED_AUDIO')) {
597
+ return res.status(429).json({ error: 'QUOTA_EXCEEDED', message: e.message });
598
+ }
599
+ if (e.status === 429 || e.message?.includes('QUOTA_EXCEEDED') || e.message?.includes('RESOURCE_EXHAUSTED')) {
600
+ return res.status(429).json({ error: 'QUOTA_EXCEEDED', message: '所有AI模型的免费额度均已耗尽,请明天再试。' });
601
+ }
602
+ res.status(500).json({ error: e.message || 'AI Service Unavailable' });
603
+ }
604
+ });
605
+
606
+ module.exports = router;
pages/AIAssistant.tsx CHANGED
@@ -224,15 +224,22 @@ export const AIAssistant: React.FC = () => {
224
  const reader = response.body.getReader();
225
  const decoder = new TextDecoder();
226
  let aiTextAccumulated = '';
 
227
 
228
  while (true) {
229
  const { done, value } = await reader.read();
230
  if (done) break;
231
 
232
- const chunk = decoder.decode(value, { stream: true });
233
- const lines = chunk.split('\n\n');
234
 
235
- for (const line of lines) {
 
 
 
 
 
 
236
  if (line.startsWith('data: ')) {
237
  const jsonStr = line.replace('data: ', '').trim();
238
  if (jsonStr === '[DONE]') break;
 
224
  const reader = response.body.getReader();
225
  const decoder = new TextDecoder();
226
  let aiTextAccumulated = '';
227
+ let buffer = ''; // Buffer for handling split chunks
228
 
229
  while (true) {
230
  const { done, value } = await reader.read();
231
  if (done) break;
232
 
233
+ // Append new data to buffer
234
+ buffer += decoder.decode(value, { stream: true });
235
 
236
+ // Split by double newline (SSE event separator)
237
+ const parts = buffer.split('\n\n');
238
+
239
+ // Keep the last part in buffer as it might be incomplete
240
+ buffer = parts.pop() || '';
241
+
242
+ for (const line of parts) {
243
  if (line.startsWith('data: ')) {
244
  const jsonStr = line.replace('data: ', '').trim();
245
  if (jsonStr === '[DONE]') break;
server.js CHANGED
@@ -6,35 +6,8 @@ const {
6
  WishModel, FeedbackModel
7
  } = require('./models');
8
 
9
- // Initialize OpenAI (OpenRouter) Client
10
- const OpenAI = require('openai');
11
- let openAIClient = null;
12
-
13
- function getOpenRouter() {
14
- if (openAIClient) return openAIClient;
15
- if (!process.env.OPENROUTER_API_KEY) return null;
16
-
17
- openAIClient = new OpenAI({
18
- baseURL: "https://openrouter.ai/api/v1",
19
- apiKey: process.env.OPENROUTER_API_KEY,
20
- defaultHeaders: {
21
- "HTTP-Referer": "https://smart-school-ai.com", // Placeholder
22
- "X-Title": "Smart School System",
23
- },
24
- });
25
- return openAIClient;
26
- }
27
-
28
- // Initialize Gemini via Dynamic Import (Helper)
29
- // @google/genai is an ESM-only package, so we cannot use require() in this CommonJS file.
30
- let genAIContext = null;
31
- async function getGenAI() {
32
- if (genAIContext) return genAIContext;
33
- const { GoogleGenAI, Type, Modality } = await import("@google/genai");
34
- const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
35
- genAIContext = { ai, Type, Modality };
36
- return genAIContext;
37
- }
38
 
39
  const express = require('express');
40
  const mongoose = require('mongoose');
@@ -48,7 +21,18 @@ const PORT = 7860;
48
  const MONGO_URI = 'mongodb+srv://dv890a:db8822723@chatpro.gw3v0v7.mongodb.net/chatpro?retryWrites=true&w=majority&appName=chatpro&authSource=admin';
49
 
50
  const app = express();
51
- app.use(compression());
 
 
 
 
 
 
 
 
 
 
 
52
  app.use(cors());
53
  app.use(bodyParser.json({ limit: '50mb' })); // Increased limit for audio
54
  app.use(express.static(path.join(__dirname, 'dist'), {
@@ -122,682 +106,8 @@ const generateStudentNo = async () => {
122
  return `${year}${random}`;
123
  };
124
 
125
- // --- Helper: Basic Retry Logic (Network blips) ---
126
- const wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
127
-
128
- async function callAIWithRetry(aiModel, params, retries = 1) {
129
- for (let i = 0; i < retries; i++) {
130
- try {
131
- return await aiModel.generateContent(params);
132
- } catch (e) {
133
- // If it's a critical auth error or bad request, don't retry locally
134
- if (e.status === 400 || e.status === 401 || e.status === 403) throw e;
135
- if (i < retries - 1) {
136
- await wait(1000 * Math.pow(2, i));
137
- continue;
138
- }
139
- throw e;
140
- }
141
- }
142
- }
143
-
144
- // --- Adapter: Google Gemini Params -> OpenAI Messages ---
145
- function convertGeminiToOpenAI(baseParams) {
146
- const messages = [];
147
-
148
- // 1. System Instruction
149
- if (baseParams.config?.systemInstruction) {
150
- messages.push({ role: 'system', content: baseParams.config.systemInstruction });
151
- }
152
-
153
- // 2. Chat History & User Input
154
- if (baseParams.contents && Array.isArray(baseParams.contents)) {
155
- baseParams.contents.forEach(content => {
156
- let role = 'user';
157
- if (content.role === 'model') role = 'assistant';
158
-
159
- const messageContent = [];
160
-
161
- if (content.parts && Array.isArray(content.parts)) {
162
- content.parts.forEach(p => {
163
- if (p.text) {
164
- messageContent.push({ type: 'text', text: p.text });
165
- } else if (p.inlineData && p.inlineData.mimeType.startsWith('image/')) {
166
- // Support Image for OpenRouter models that support vision
167
- // Convert inline base64 to data URL
168
- messageContent.push({
169
- type: 'image_url',
170
- image_url: {
171
- url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}`
172
- }
173
- });
174
- }
175
- // Audio is intentionally ignored for OpenRouter as we handle audio constraint separately
176
- });
177
- }
178
-
179
- if (messageContent.length > 0) {
180
- // Optimization: If simple text, send as string (better compatibility with some models)
181
- if (messageContent.length === 1 && messageContent[0].type === 'text') {
182
- messages.push({ role: role, content: messageContent[0].text });
183
- } else {
184
- messages.push({ role: role, content: messageContent });
185
- }
186
- }
187
- });
188
- }
189
-
190
- return messages;
191
- }
192
-
193
- // --- DYNAMIC PROVIDER MANAGEMENT ---
194
- const PROVIDERS = {
195
- GEMINI: 'GEMINI',
196
- OPENROUTER: 'OPENROUTER',
197
- GEMMA: 'GEMMA'
198
- };
199
-
200
- // Initial Order. When a provider fails due to Quota, it gets moved to the back.
201
- let activeProviderOrder = [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
202
-
203
- function deprioritizeProvider(providerName) {
204
- // If it's already at the end, do nothing
205
- if (activeProviderOrder[activeProviderOrder.length - 1] === providerName) return;
206
-
207
- console.log(`📉 [AI Debug] Performance Opt: Deprioritizing ${providerName} due to quota limits.`);
208
- // Move to end
209
- activeProviderOrder = activeProviderOrder.filter(p => p !== providerName).concat(providerName);
210
- console.log(`🔄 [AI Debug] New Provider Order: ${activeProviderOrder.join(' -> ')}`);
211
- }
212
-
213
- function isQuotaError(e) {
214
- const msg = e.message || '';
215
- return e.status === 429 || e.status === 503 ||
216
- msg.includes('Quota') ||
217
- msg.includes('overloaded') ||
218
- msg.includes('RESOURCE_EXHAUSTED') ||
219
- msg.includes('Rate limit') ||
220
- msg.includes('credits');
221
- }
222
-
223
- // --- INDIVIDUAL PROVIDER CALLERS ---
224
-
225
- async function callGeminiProvider(aiModelObj, baseParams) {
226
- const primaryModels = [
227
- 'gemini-2.5-flash', // Standard (20 RPD) - Try first
228
- 'gemini-2.5-flash-lite' // Lite (20 RPD) - Separate quota bucket
229
- ];
230
-
231
- let lastError = null;
232
- for (const modelName of primaryModels) {
233
- try {
234
- console.log(`🚀 [AI Debug] Calling Gemini non-stream: ${modelName}`);
235
- const currentParams = { ...baseParams, model: modelName };
236
- return await callAIWithRetry(aiModelObj, currentParams, 1);
237
- } catch (e) {
238
- lastError = e;
239
- console.error(`⚠️ [AI Debug] Gemini ${modelName} Error:`, e.status, e.message);
240
- if (isQuotaError(e)) {
241
- // IMPORTANT: Continue to next internal model, do not throw yet
242
- console.warn(`⚠️ [AI Debug] Gemini ${modelName} exhausted. Trying next internal model...`);
243
- continue;
244
- }
245
- throw e; // Fail fast on non-quota errors
246
- }
247
- }
248
- throw lastError; // All internal models failed
249
- }
250
-
251
- async function callOpenRouterProvider(baseParams) {
252
- const openRouter = getOpenRouter();
253
- if (!openRouter) throw new Error("OpenRouter not configured");
254
-
255
- const openRouterModels = [
256
- 'qwen/qwen3-coder:free',
257
- 'openai/gpt-oss-120b:free',
258
- 'qwen/qwen3-235b-a22b:free',
259
- 'tngtech/deepseek-r1t-chimera:free'
260
- ];
261
-
262
- const openAIMessages = convertGeminiToOpenAI(baseParams);
263
- let lastError = null;
264
-
265
- for (const modelName of openRouterModels) {
266
- try {
267
- console.log(`🛡️ [AI Debug] Switching to OpenRouter Model: ${modelName}`);
268
- const completion = await openRouter.chat.completions.create({
269
- model: modelName,
270
- messages: openAIMessages,
271
- });
272
-
273
- // Defensive Check
274
- if (!completion || !completion.choices || !completion.choices[0] || !completion.choices[0].message) {
275
- throw new Error(`Invalid response structure from ${modelName}`);
276
- }
277
-
278
- const content = completion.choices[0].message.content || "";
279
- return { text: content };
280
-
281
- } catch (e) {
282
- lastError = e;
283
- console.warn(`⚠️ [AI Debug] OpenRouter Model ${modelName} failed.`, e.message);
284
- // Continue to next OpenRouter model
285
- }
286
- }
287
- throw lastError || new Error("OpenRouter failed");
288
- }
289
-
290
- async function callGemmaProvider(aiModelObj, baseParams) {
291
- const fallbackModels = [
292
- 'gemma-3-27b-it',
293
- 'gemma-3-12b-it',
294
- 'gemma-3-4b-it'
295
- ];
296
-
297
- // Strip system instruction for Gemma compatibility
298
- const gemmaConfig = { ...baseParams.config };
299
- if (gemmaConfig.systemInstruction) delete gemmaConfig.systemInstruction;
300
-
301
- let lastError = null;
302
- for (const modelName of fallbackModels) {
303
- try {
304
- console.log(`🛡️ [AI Debug] Switching to Final Backup (Gemma 3): ${modelName}`);
305
- const currentParams = {
306
- ...baseParams,
307
- model: modelName,
308
- config: gemmaConfig
309
- };
310
- return await callAIWithRetry(aiModelObj, currentParams, 1);
311
- } catch (e) {
312
- lastError = e;
313
- console.warn(`⚠️ [AI Debug] Backup Model ${modelName} failed.`, e.message);
314
- }
315
- }
316
- throw lastError || new Error("Gemma failed");
317
- }
318
-
319
- // --- STREAMING PROVIDER HELPERS ---
320
-
321
- async function streamGemini(aiModelObj, baseParams, res) {
322
- // Try multiple Flash models internally for quota resilience
323
- const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
324
-
325
- let lastError = null;
326
- for (const modelName of models) {
327
- try {
328
- console.log(`🌊 [AI Debug] STREAMING Gemini model: ${modelName}`);
329
- const currentParams = { ...baseParams, model: modelName };
330
- const streamResult = await aiModelObj.generateContentStream(currentParams);
331
-
332
- let fullText = '';
333
- for await (const chunk of streamResult) {
334
- const text = chunk.text;
335
- if (text) {
336
- fullText += text;
337
- res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
338
- }
339
- }
340
- console.log(`✅ [AI Debug] Gemini ${modelName} stream complete.`);
341
- return fullText; // Success
342
- } catch (e) {
343
- lastError = e;
344
- console.error(`❌ [AI Debug] Gemini Stream Error (${modelName}):`, e.status, e.message);
345
-
346
- if (isQuotaError(e)) {
347
- // IMPORTANT: Continue to next internal model
348
- console.warn(`[AI Debug] Stream Gemini ${modelName} quota exhausted. Switching to next internal model...`);
349
- continue;
350
- }
351
- throw e; // Non-quota error, fail fast
352
- }
353
- }
354
- throw lastError || new Error("Gemini streaming failed after retrying internal models");
355
- }
356
-
357
- async function streamGemma(aiModelObj, baseParams, res) {
358
- const models = ['gemma-3-27b-it', 'gemma-3-12b-it', 'gemma-3-4b-it'];
359
- const gemmaConfig = { ...baseParams.config };
360
- if (gemmaConfig.systemInstruction) delete gemmaConfig.systemInstruction;
361
-
362
- let lastError = null;
363
- for (const modelName of models) {
364
- try {
365
- console.log(`🛡️ [AI Debug] Streaming Fallback to Gemma: ${modelName}`);
366
- const currentParams = { ...baseParams, model: modelName, config: gemmaConfig };
367
- const streamResult = await aiModelObj.generateContentStream(currentParams);
368
-
369
- let fullText = '';
370
- for await (const chunk of streamResult) {
371
- const text = chunk.text;
372
- if (text) {
373
- fullText += text;
374
- res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
375
- }
376
- }
377
- return fullText;
378
- } catch (e) {
379
- lastError = e;
380
- console.warn(`Stream Gemma ${modelName} failed: ${e.message}`);
381
- // Continue to next gemma model
382
- }
383
- }
384
- throw lastError || new Error("Gemma streaming failed");
385
- }
386
-
387
- async function streamOpenRouter(baseParams, res) {
388
- const openRouter = getOpenRouter();
389
- if (!openRouter) throw new Error("OpenRouter not configured");
390
-
391
- // Updated free model list
392
- const openRouterModels = [
393
- 'qwen/qwen3-coder:free',
394
- 'openai/gpt-oss-120b:free',
395
- 'qwen/qwen3-235b-a22b:free',
396
- 'tngtech/deepseek-r1t-chimera:free'
397
- ];
398
- const messages = convertGeminiToOpenAI(baseParams);
399
-
400
- let lastError = null;
401
- for (const modelName of openRouterModels) {
402
- try {
403
- console.log(`🛡️ [AI Debug] Streaming via OpenRouter: ${modelName}`);
404
- const stream = await openRouter.chat.completions.create({
405
- model: modelName,
406
- messages: messages,
407
- stream: true
408
- });
409
-
410
- let fullText = '';
411
- for await (const chunk of stream) {
412
- const text = chunk.choices[0]?.delta?.content || '';
413
- if (text) {
414
- fullText += text;
415
- res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
416
- }
417
- }
418
- return fullText;
419
- } catch (e) {
420
- lastError = e;
421
- console.warn(`[AI Debug] Stream OpenRouter ${modelName} failed`, e.message);
422
- // CRITICAL FIX: Do NOT throw here. Continue loop to try next OpenRouter model.
423
- // Only throw if loop finishes.
424
- }
425
- }
426
- throw lastError || new Error("All OpenRouter streams failed");
427
- }
428
-
429
- // --- MAIN GENERATION FUNCTIONS ---
430
-
431
- async function generateContentWithSmartFallback(aiModelObj, baseParams) {
432
- // Check specifically for Audio (Gemini Only)
433
- let hasAudio = false;
434
- if (baseParams.contents && Array.isArray(baseParams.contents)) {
435
- for (const c of baseParams.contents) {
436
- if (c.parts) {
437
- for (const p of c.parts) {
438
- if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) {
439
- hasAudio = true;
440
- }
441
- }
442
- }
443
- }
444
- }
445
-
446
- // Constraint: Audio MUST use Gemini.
447
- if (hasAudio) {
448
- console.log("🎤 [AI Debug] Audio detected, forcing Gemini.");
449
- try {
450
- return await callGeminiProvider(aiModelObj, baseParams);
451
- } catch (e) {
452
- console.error("❌ [AI Debug] Audio Gemini Failed:", e.message);
453
- if (isQuotaError(e)) {
454
- // Critical: Even if we fail this request, deprioritize Gemini for future TEXT requests
455
- deprioritizeProvider(PROVIDERS.GEMINI);
456
- throw new Error('QUOTA_EXCEEDED_AUDIO: 语音服务繁忙,请稍后再试或使用纯文本/图片提问。');
457
- }
458
- throw e;
459
- }
460
- }
461
-
462
- // Text OR Image Requests: Iterate through dynamic order
463
- let finalError = null;
464
-
465
- for (const provider of activeProviderOrder) {
466
- try {
467
- if (provider === PROVIDERS.GEMINI) {
468
- return await callGeminiProvider(aiModelObj, baseParams);
469
- } else if (provider === PROVIDERS.OPENROUTER) {
470
- return await callOpenRouterProvider(baseParams);
471
- } else if (provider === PROVIDERS.GEMMA) {
472
- return await callGemmaProvider(aiModelObj, baseParams);
473
- }
474
- } catch (e) {
475
- finalError = e;
476
- if (isQuotaError(e)) {
477
- // Optimize: Move this provider to the back so next requests don't wait
478
- deprioritizeProvider(provider);
479
- continue; // Try next provider immediately
480
- }
481
- // If it's not a quota error (e.g. invalid input, 400 Bad Request due to image not supported by specific model),
482
- // we typically continue to the next provider to see if they can handle it.
483
- console.warn(`⚠️ [AI Debug] ${provider} failed with non-quota error:`, e.message);
484
- }
485
- }
486
-
487
- throw new Error('All AI models (Gemini, OpenRouter, Gemma) are currently unavailable.');
488
- }
489
-
490
- async function streamContentWithSmartFallback(aiModelObj, baseParams, res) {
491
- // Check for Audio Input (Gemini Only)
492
- let hasAudio = false;
493
- if (baseParams.contents && Array.isArray(baseParams.contents)) {
494
- for (const c of baseParams.contents) {
495
- if (c.parts) {
496
- for (const p of c.parts) {
497
- if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) {
498
- hasAudio = true;
499
- }
500
- }
501
- }
502
- }
503
- }
504
-
505
- if (hasAudio) {
506
- try {
507
- return await streamGemini(aiModelObj, baseParams, res);
508
- } catch(e) {
509
- deprioritizeProvider(PROVIDERS.GEMINI);
510
- throw new Error('QUOTA_EXCEEDED_AUDIO: 语音服务繁忙,请稍后再试或使用纯文本/图片提问。');
511
- }
512
- }
513
-
514
- let finalError = null;
515
- console.log(`🚦 [AI Debug] Starting stream with order: ${activeProviderOrder.join(' -> ')}`);
516
-
517
- for (const provider of activeProviderOrder) {
518
- try {
519
- if (provider === PROVIDERS.GEMINI) {
520
- return await streamGemini(aiModelObj, baseParams, res);
521
- } else if (provider === PROVIDERS.OPENROUTER) {
522
- return await streamOpenRouter(baseParams, res);
523
- } else if (provider === PROVIDERS.GEMMA) {
524
- // Now supports streaming fallback to Gemma
525
- return await streamGemma(aiModelObj, baseParams, res);
526
- }
527
- } catch (e) {
528
- finalError = e;
529
- if (isQuotaError(e)) {
530
- deprioritizeProvider(provider);
531
- continue;
532
- }
533
- console.warn(`Streaming ${provider} failed:`, e.message);
534
- }
535
- }
536
- throw finalError || new Error('All streaming models unavailable.');
537
- }
538
-
539
- // --- Middleware: Check AI Access ---
540
- const checkAIAccess = async (req, res, next) => {
541
- const username = req.headers['x-user-username'];
542
- const role = req.headers['x-user-role'];
543
- if (!username) return res.status(401).json({ error: 'Unauthorized' });
544
-
545
- // 1. Check Global Switch (Admins can bypass for management, but for calling AI endpoints we still check)
546
- // Actually, Admin dashboard does NOT call /chat, so this middleware is fine to block /chat for everyone if disabled.
547
- const config = await ConfigModel.findOne({ key: 'main' });
548
-
549
- // If Admin, they are always allowed to access these endpoints (e.g. for testing) unless they explicitly disabled it and we want to enforce it.
550
- // But usually Admins want to test. However, user said "if not open, ai is in maintenance".
551
- // Let's enforce Maintenance Mode for *Teachers*, but Admin might be exempt?
552
- // Requirement: "if enabled, teacher can use. if not, maintenance".
553
- // It implies Admin controls it. Let's block *Teachers* if disabled.
554
-
555
- if (config && config.enableAI === false && role !== 'ADMIN') {
556
- return res.status(503).json({ error: 'MAINTENANCE', message: 'AI 功能正在维护中,请联系管理员。' });
557
- }
558
-
559
- // Admins always have access to endpoint
560
- if (role === 'ADMIN') return next();
561
-
562
- const user = await User.findOne({ username });
563
- if (!user) return res.status(404).json({ error: 'User not found' });
564
-
565
- // Students/Principals NO ACCESS to chat endpoint (Double check)
566
- if (user.role === 'STUDENT' || user.role === 'PRINCIPAL') return res.status(403).json({ error: 'Permission denied' });
567
-
568
- // Teachers need enabled flag
569
- if (!user.aiAccess) return res.status(403).json({ error: 'AI Access not enabled for this user' });
570
-
571
- next();
572
- };
573
-
574
- // --- NEW AI ROUTES ---
575
-
576
- // Scenario 1: AI Chat (Audio/Text In -> AI Think -> Text + Audio Out)
577
- // Now Supports History for Context AND STREAMING
578
- app.post('/api/ai/chat', checkAIAccess, async (req, res) => {
579
- const { text, audio, history } = req.body;
580
-
581
- // Set headers for SSE
582
- res.setHeader('Content-Type', 'text/event-stream');
583
- res.setHeader('Cache-Control', 'no-cache');
584
- res.setHeader('Connection', 'keep-alive');
585
- res.flushHeaders();
586
-
587
- try {
588
- const { ai, Modality } = await getGenAI();
589
-
590
- // 1. Build Content parts for Current Turn
591
- const currentParts = [];
592
- if (audio) {
593
- currentParts.push({
594
- inlineData: {
595
- mimeType: 'audio/wav',
596
- data: audio
597
- }
598
- });
599
- }
600
- if (text) {
601
- currentParts.push({ text: text });
602
- }
603
- if (currentParts.length === 0) {
604
- res.write(`data: ${JSON.stringify({ error: 'No input' })}\n\n`);
605
- res.end();
606
- return;
607
- }
608
-
609
- // 2. Build Full Context (History + Current)
610
- const fullContents = [];
611
-
612
- // Add previous history if exists
613
- if (history && Array.isArray(history)) {
614
- history.forEach(msg => {
615
- const role = msg.role === 'user' ? 'user' : 'model';
616
- if (msg.text) {
617
- fullContents.push({
618
- role: role,
619
- parts: [{ text: msg.text }]
620
- });
621
- }
622
- });
623
- }
624
-
625
- // Add current message
626
- fullContents.push({
627
- role: 'user',
628
- parts: currentParts
629
- });
630
-
631
- // Step 1: Thinking (Streaming)
632
- // Use streamContentWithSmartFallback instead of generate...
633
- const answerText = await streamContentWithSmartFallback(ai.models, {
634
- contents: fullContents,
635
- config: {
636
- systemInstruction: "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。如果学生使用语音,你也应当在回答中体现出自然的口语风格。回复支持 Markdown 格式。",
637
- }
638
- }, res);
639
-
640
- // Ensure we got some text back
641
- const finalAnswer = answerText || "抱歉,我没有听清,请再说一遍。";
642
-
643
- // Step 2: Speaking (Gemini TTS) - Sent as final chunk
644
- // CRITICAL FIX: Wrap TTS in try-catch to allow graceful degradation.
645
- // If TTS fails (quota exceeded), we simply return text without audio.
646
- try {
647
- // Note: TTS does NOT support streaming response in this context easily via SSE for binary.
648
- // We wait for full audio and send as base64 in a data event.
649
- const ttsResponse = await ai.models.generateContent({
650
- model: "gemini-2.5-flash-preview-tts",
651
- contents: [{ parts: [{ text: finalAnswer }] }],
652
- config: {
653
- responseModalities: [Modality.AUDIO],
654
- speechConfig: {
655
- voiceConfig: {
656
- prebuiltVoiceConfig: { voiceName: 'Kore' },
657
- },
658
- },
659
- },
660
- });
661
- const audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
662
- if (audioBytes) {
663
- res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
664
- }
665
- } catch (ttsError) {
666
- console.warn("⚠️ TTS Generation skipped (Quota or Error). Returning text only.");
667
- }
668
-
669
- // Increment Counter
670
- await ConfigModel.findOneAndUpdate({ key: 'main' }, { $inc: { aiTotalCalls: 1 } }, { upsert: true });
671
-
672
- // End stream
673
- res.write('data: [DONE]\n\n');
674
- res.end();
675
-
676
- } catch (e) {
677
- console.error("AI Chat Error:", e);
678
- // If headers already sent, we must send error as event
679
- const errPayload = { error: 'Unknown Error' };
680
-
681
- if (e.message?.includes('QUOTA_EXCEEDED_MEDIA') || e.message?.includes('QUOTA_EXCEEDED_AUDIO')) {
682
- errPayload.error = 'QUOTA_EXCEEDED_MEDIA';
683
- errPayload.message = e.message;
684
- } else if (e.status === 429 || e.message?.includes('QUOTA_EXCEEDED') || e.message?.includes('RESOURCE_EXHAUSTED')) {
685
- errPayload.error = 'QUOTA_EXCEEDED';
686
- errPayload.message = '所有AI模型(包括备用线路)的免费额度均已耗尽,请明天再试。';
687
- } else {
688
- errPayload.error = 'SERVER_ERROR';
689
- errPayload.message = e.message || 'AI Service Unavailable';
690
- }
691
-
692
- res.write(`data: ${JSON.stringify(errPayload)}\n\n`);
693
- res.end();
694
- }
695
- });
696
-
697
- // Scenario 2: Evaluation (Question + Audio/Image Answer -> AI Score + Feedback + Audio)
698
- app.post('/api/ai/evaluate', checkAIAccess, async (req, res) => {
699
- const { question, audio, image } = req.body; // Image is base64
700
-
701
- if (!question || (!audio && !image)) return res.status(400).json({ error: 'Missing question or input (audio/image)' });
702
-
703
- try {
704
- const { ai, Type, Modality } = await getGenAI();
705
-
706
- const evalParts = [{ text: `请作为一名严谨的老师,对学生的回答进行评分。题目是:${question}。` }];
707
-
708
- if (audio) {
709
- evalParts.push({ text: "学生的回答在音频中。" });
710
- evalParts.push({ inlineData: { mimeType: 'audio/wav', data: audio } });
711
- }
712
- if (image) {
713
- evalParts.push({ text: "学生的回答写在图片中,请识别图片中的文字内容并进行批改。" });
714
- evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
715
- }
716
-
717
- evalParts.push({ text: `请分析:1. 内容准确性 2. 表达/书写规范。返回 JSON: {score(0-100), feedback(简短评语), transcription(识别内容)}` });
718
-
719
- // 1. Analyze (Using Multi-Stage Smart Fallback)
720
- // Note: Evaluation usually outputs JSON. Gemma might struggle with strict JSON schemas without system instruction.
721
- // We hope Primary Tier handles this. If falling back to Gemma, it might return raw text.
722
- // For stability, Evaluation heavily relies on Gemini.
723
- const response = await generateContentWithSmartFallback(ai.models, {
724
- contents: { parts: evalParts },
725
- config: {
726
- responseMimeType: "application/json",
727
- responseSchema: {
728
- type: Type.OBJECT,
729
- properties: {
730
- score: { type: Type.NUMBER },
731
- feedback: { type: Type.STRING },
732
- transcription: { type: Type.STRING }
733
- },
734
- required: ["score", "feedback", "transcription"]
735
- }
736
- }
737
- });
738
-
739
- let resultJson;
740
- // Attempt to parse JSON. OpenRouter models might wrap JSON in Markdown code blocks (```json ... ```).
741
- let rawText = response.text || "{}";
742
- if (rawText.includes('```json')) {
743
- rawText = rawText.replace(/```json/g, '').replace(/```/g, '').trim();
744
- } else if (rawText.includes('```')) {
745
- rawText = rawText.replace(/```/g, '').trim();
746
- }
747
-
748
- try {
749
- resultJson = JSON.parse(rawText);
750
- } catch (jsonErr) {
751
- // Fallback for models that return unstructured text
752
- resultJson = {
753
- score: 0,
754
- feedback: rawText,
755
- transcription: "(解析 JSON 失败,显示原始回复)"
756
- };
757
- }
758
-
759
- // 2. Generate Audio for the Feedback (TTS) - Graceful Degradation
760
- let feedbackAudio = null;
761
- if (resultJson.feedback) {
762
- try {
763
- const ttsResponse = await ai.models.generateContent({
764
- model: "gemini-2.5-flash-preview-tts",
765
- contents: [{ parts: [{ text: resultJson.feedback }] }],
766
- config: {
767
- responseModalities: [Modality.AUDIO],
768
- speechConfig: {
769
- voiceConfig: {
770
- prebuiltVoiceConfig: { voiceName: 'Kore' },
771
- },
772
- },
773
- },
774
- });
775
- feedbackAudio = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
776
- } catch (ttsErr) {
777
- console.warn("⚠️ TTS Generation failed:", ttsErr.message);
778
- }
779
- }
780
-
781
- // Increment Counter
782
- await ConfigModel.findOneAndUpdate({ key: 'main' }, { $inc: { aiTotalCalls: 1 } }, { upsert: true });
783
-
784
- res.json({
785
- ...resultJson,
786
- audio: feedbackAudio
787
- });
788
-
789
- } catch (e) {
790
- console.error("AI Eval Error:", e);
791
- if (e.message?.includes('QUOTA_EXCEEDED_MEDIA') || e.message?.includes('QUOTA_EXCEEDED_AUDIO')) {
792
- return res.status(429).json({ error: 'QUOTA_EXCEEDED', message: e.message });
793
- }
794
- if (e.status === 429 || e.message?.includes('QUOTA_EXCEEDED') || e.message?.includes('RESOURCE_EXHAUSTED')) {
795
- return res.status(429).json({ error: 'QUOTA_EXCEEDED', message: '所有AI模型的免费额度均已耗尽,请明天再试。' });
796
- }
797
- res.status(500).json({ error: e.message || 'AI Service Unavailable' });
798
- }
799
- });
800
-
801
 
802
  // ... (Rest of Existing Routes) ...
803
  app.get('/api/classes/:className/teachers', async (req, res) => {
 
6
  WishModel, FeedbackModel
7
  } = require('./models');
8
 
9
+ // Import AI Routes
10
+ const aiRoutes = require('./ai-routes');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  const express = require('express');
13
  const mongoose = require('mongoose');
 
21
  const MONGO_URI = 'mongodb+srv://dv890a:db8822723@chatpro.gw3v0v7.mongodb.net/chatpro?retryWrites=true&w=majority&appName=chatpro&authSource=admin';
22
 
23
  const app = express();
24
+
25
+ // FIX: Disable compression for AI Chat SSE endpoint to allow real-time streaming
26
+ app.use(compression({
27
+ filter: (req, res) => {
28
+ // Note: The route is now mounted at /api/ai, so the full path contains /api/ai/chat
29
+ if (req.path.includes('/api/ai/chat')) {
30
+ return false; // Don't compress SSE streams
31
+ }
32
+ return compression.filter(req, res);
33
+ }
34
+ }));
35
+
36
  app.use(cors());
37
  app.use(bodyParser.json({ limit: '50mb' })); // Increased limit for audio
38
  app.use(express.static(path.join(__dirname, 'dist'), {
 
106
  return `${year}${random}`;
107
  };
108
 
109
+ // MOUNT AI ROUTES
110
+ app.use('/api/ai', aiRoutes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  // ... (Rest of Existing Routes) ...
113
  app.get('/api/classes/:className/teachers', async (req, res) => {