dvc890 commited on
Commit
8035c0c
·
verified ·
1 Parent(s): 78c5cf9

Upload 63 files

Browse files
Files changed (3) hide show
  1. ai-context.js +95 -57
  2. ai-routes.js +74 -47
  3. models.js +12 -1
ai-context.js CHANGED
@@ -1,7 +1,7 @@
1
 
2
  const {
3
  User, Student, Score, AttendanceModel, ClassModel,
4
- LeaveRequestModel, TodoModel, School
5
  } = require('./models');
6
 
7
  /**
@@ -75,26 +75,63 @@ async function buildStudentContext(username, schoolId) {
75
  }
76
 
77
  /**
78
- * 构建教师画像上下文 (增强版 - 全班详情)
79
  */
80
  async function buildTeacherContext(username, schoolId) {
81
  const user = await User.findOne({ username, schoolId });
82
  if (!user) return "无法找到该教师档案。";
83
 
84
- const className = user.homeroomClass;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  let prompt = `
86
  ### 当前用户身份:教师
87
  - **姓名**: ${user.trueName || username}
88
- - **任教科目**: ${user.teachingSubject || '未设置'}
89
  `;
90
 
91
- if (className) {
92
- // 1. 获取全班学生列表
 
 
 
 
 
 
 
 
 
93
  const students = await Student.find({ className, schoolId });
 
 
 
 
 
94
  const studentNos = students.map(s => s.studentNo);
95
  const studentIds = students.map(s => s._id.toString());
96
 
97
- // 2. 获取全班考勤统计 (Group by Student)
98
  const attendanceRaw = await AttendanceModel.aggregate([
99
  { $match: { studentId: { $in: studentIds }, status: { $in: ['Absent', 'Leave'] } } },
100
  { $group: { _id: "$studentId", absent: { $sum: { $cond: [{ $eq: ["$status", "Absent"] }, 1, 0] } }, leave: { $sum: { $cond: [{ $eq: ["$status", "Leave"] }, 1, 0] } } } }
@@ -102,62 +139,62 @@ async function buildTeacherContext(username, schoolId) {
102
  const attendanceMap = {};
103
  attendanceRaw.forEach(a => attendanceMap[a._id] = a);
104
 
105
- // 3. 获取全班近期成绩 (为了Token效率,我们只取每人最近3次考试或全班最近的100条成绩记录)
106
- const recentScores = await Score.find({ schoolId, studentNo: { $in: studentNos } }).sort({ _id: -1 }).limit(200);
107
- const scoreMap = {}; // studentNo -> [Score Obj]
108
- recentScores.forEach(s => {
109
- if (!scoreMap[s.studentNo]) scoreMap[s.studentNo] = [];
110
- scoreMap[s.studentNo].push(s);
111
- });
112
 
113
- // 4. 构建“全息花名册”字符串
114
- let rosterStr = "";
115
- const failingStudents = [];
116
-
117
- students.forEach(s => {
118
  const att = attendanceMap[s._id.toString()] || { absent: 0, leave: 0 };
119
- const myScores = scoreMap[s.studentNo] || [];
120
 
121
- // 计算个人概况
122
- let scoreStr = "暂无成绩";
123
- let avgScore = 0;
124
- if (myScores.length > 0) {
125
- // 只取最近3门
126
- const latest3 = myScores.slice(0, 3);
127
- scoreStr = latest3.map(sc => `${sc.courseName}:${sc.score}`).join(', ');
128
- avgScore = latest3.reduce((a,b)=>a+b.score,0) / latest3.length;
129
-
130
- // 检查不及格
131
- if (latest3.some(sc => sc.score < 60)) {
132
- failingStudents.push(s.name);
 
 
133
  }
 
 
 
 
 
 
 
 
 
 
134
  }
135
 
136
- // 格式: [姓名](座号): 考勤[缺x, 假y], 成绩[科目:分, ...], 积分: z
137
- rosterStr += `- **${s.name}** (座号:${s.seatNo || '-'}): 考勤[缺${att.absent}/假${att.leave}], 小红花:${s.flowerBalance}, 近期成绩[${scoreStr}]\n`;
 
 
 
138
  });
139
 
140
- // 5. 待办审批
141
- const pendingLeaves = await LeaveRequestModel.find({ className, schoolId, status: 'Pending' }).limit(5);
142
-
143
- prompt += `- **班级**: ${className} (共${students.length}人)\n`;
144
-
145
- if (pendingLeaves.length > 0) {
146
- prompt += `\n### 🔴 紧急待办\n`;
147
- prompt += `你有 ${pendingLeaves.length} 条请假申请待审批 (申请人: ${pendingLeaves.map(l => l.studentName).join(', ')})。\n`;
148
- }
149
 
150
- if (failingStudents.length > 0) {
151
- prompt += `\n### ⚠️ 学情预警\n`;
152
- prompt += `以下学生近期有不及格记录,请重点关注: ${failingStudents.join(', ')}\n`;
 
 
 
153
  }
154
-
155
- prompt += `\n### 📋 班级学生全息档案 (Roster)\n`;
156
- prompt += `(这是你班级所有学生的详细数据,当用户询问具体学生时,请在此处检索)\n`;
157
- prompt += rosterStr;
158
-
159
- } else {
160
- prompt += `- **班级**: 目前没有担任班主任,暂无详细学生数据。\n`;
161
  }
162
 
163
  return prompt;
@@ -220,10 +257,11 @@ async function buildUserContext(username, role, schoolId) {
220
  ${roleContext}
221
 
222
  【AI 行为准则】
223
- 1. 你拥有上述所有数据的“上帝视角”。当老师问“张三的情况”时,请直接从【班级学生全息档案】中提取张三的成绩、考勤和小红花数据进行回答,不要说“我不知道”。
224
- 2. 回答要具体。例如:不要说“他成绩一般”,要说“他最近数学考了60分,英语考了85分,属于偏科现象”。
225
- 3. 如果数据中显示学生有缺勤或不及格,请在回答末尾给出具体的教学干预建议。
226
- 4. 保持语气专业、辅助性强。
 
227
  ---
228
  `;
229
  } catch (e) {
 
1
 
2
  const {
3
  User, Student, Score, AttendanceModel, ClassModel,
4
+ LeaveRequestModel, TodoModel, School, Course
5
  } = require('./models');
6
 
7
  /**
 
75
  }
76
 
77
  /**
78
+ * 构建教师画像上下文 (增强版 - 智能区分班主任与科任视角)
79
  */
80
  async function buildTeacherContext(username, schoolId) {
81
  const user = await User.findOne({ username, schoolId });
82
  if (!user) return "无法找到该教师档案。";
83
 
84
+ // 1. 确定老师的身份范围
85
+ const homeroomClassName = user.homeroomClass; // 班主任班级
86
+
87
+ // 查找该老师任教的所有课程 (找出任教的其他班级)
88
+ const teachingCourses = await Course.find({
89
+ $or: [{ teacherId: user._id }, { teacherName: user.trueName || user.username }],
90
+ schoolId
91
+ });
92
+
93
+ // 构建任教班级 -> 科目列表的映射 (e.g., "三年级(2)班": ["数学", "科学"])
94
+ const teachingMap = {};
95
+ teachingCourses.forEach(c => {
96
+ if (!teachingMap[c.className]) teachingMap[c.className] = new Set();
97
+ teachingMap[c.className].add(c.courseName);
98
+ });
99
+
100
+ // 合并所有相关班级 (班主任班级 + 任课班级)
101
+ const allClasses = new Set(Object.keys(teachingMap));
102
+ if (homeroomClassName) allClasses.add(homeroomClassName);
103
+
104
+ if (allClasses.size === 0) {
105
+ return `### 当前用户身份:教师\n- **姓名**: ${user.trueName || username}\n- **状态**: 暂未绑定任何班级或课程数据。`;
106
+ }
107
+
108
  let prompt = `
109
  ### 当前用户身份:教师
110
  - **姓名**: ${user.trueName || username}
111
+ - **负责班级**: ${Array.from(allClasses).join(', ')}
112
  `;
113
 
114
+ // 2. 遍历所有相关班级,构建详细数据
115
+ for (const className of allClasses) {
116
+ const isHomeroom = className === homeroomClassName;
117
+ const subjectsTaught = teachingMap[className] ? Array.from(teachingMap[className]) : [];
118
+
119
+ prompt += `\n#### 🏫 班级: ${className} (${isHomeroom ? '我是班主任' : '我是任课老师'})\n`;
120
+ if (!isHomeroom) {
121
+ prompt += `(非班主任视角:仅展示我任教的科目 [${subjectsTaught.join(', ')}] 的数据)\n`;
122
+ }
123
+
124
+ // 2.1 获取该班学生
125
  const students = await Student.find({ className, schoolId });
126
+ if (students.length === 0) {
127
+ prompt += `- 暂无学生数据\n`;
128
+ continue;
129
+ }
130
+
131
  const studentNos = students.map(s => s.studentNo);
132
  const studentIds = students.map(s => s._id.toString());
133
 
134
+ // 2.2 获取考勤 (全班)
135
  const attendanceRaw = await AttendanceModel.aggregate([
136
  { $match: { studentId: { $in: studentIds }, status: { $in: ['Absent', 'Leave'] } } },
137
  { $group: { _id: "$studentId", absent: { $sum: { $cond: [{ $eq: ["$status", "Absent"] }, 1, 0] } }, leave: { $sum: { $cond: [{ $eq: ["$status", "Leave"] }, 1, 0] } } } }
 
139
  const attendanceMap = {};
140
  attendanceRaw.forEach(a => attendanceMap[a._id] = a);
141
 
142
+ // 2.3 获取成绩 (按需获取)
143
+ // 查询该班级学生的所有成绩
144
+ // 为了性能,还是查出来再内存过滤,比多次DB查询快
145
+ const allScores = await Score.find({
146
+ schoolId,
147
+ studentNo: { $in: studentNos }
148
+ }).sort({ _id: -1 }); // 最新的在前
149
 
150
+ // 构建每个学生的成绩摘要
151
+ const studentDetails = students.map(s => {
 
 
 
152
  const att = attendanceMap[s._id.toString()] || { absent: 0, leave: 0 };
 
153
 
154
+ // 筛选该学生的成绩
155
+ let myScores = allScores.filter(sc => sc.studentNo === s.studentNo);
156
+
157
+ // 【关键逻辑】过滤显示哪些科目
158
+ if (!isHomeroom) {
159
+ // 如果不是班主任,只保留我教的科目的成绩
160
+ myScores = myScores.filter(sc => subjectsTaught.includes(sc.courseName));
161
+ }
162
+
163
+ // 【聚合逻辑】每个科目只取最近一次成绩 (去重)
164
+ const latestSubjectScores = {};
165
+ myScores.forEach(sc => {
166
+ if (!latestSubjectScores[sc.courseName]) {
167
+ latestSubjectScores[sc.courseName] = sc;
168
  }
169
+ });
170
+
171
+ const finalScores = Object.values(latestSubjectScores);
172
+
173
+ // 格式化成绩字符串
174
+ let scoreStr = "";
175
+ if (finalScores.length > 0) {
176
+ scoreStr = finalScores.map(sc => `${sc.courseName}:${sc.score}`).join(', ');
177
+ } else {
178
+ scoreStr = "无相关成绩";
179
  }
180
 
181
+ // 标记异常 (缺勤多 有不及格)
182
+ const hasIssue = att.absent > 0 || finalScores.some(sc => sc.score < 60);
183
+ const flag = hasIssue ? "⚠️" : "";
184
+
185
+ return `- ${flag} **${s.name}**: 考勤[缺${att.absent}/假${att.leave}], 小红花:${s.flowerBalance}, 最新成绩:[${scoreStr}]`;
186
  });
187
 
188
+ // 将学生列表加入 Prompt (限制长度,如果班级人太多,可能需要截断,但Gemini窗口大,通常没事)
189
+ prompt += studentDetails.join('\n') + '\n';
 
 
 
 
 
 
 
190
 
191
+ // 2.4 如果是班主任,额外显示待办
192
+ if (isHomeroom) {
193
+ const pendingLeaves = await LeaveRequestModel.countDocuments({ className, schoolId, status: 'Pending' });
194
+ if (pendingLeaves > 0) {
195
+ prompt += `> 🔴 班务提醒: 有 ${pendingLeaves} 条请假申请待审批。\n`;
196
+ }
197
  }
 
 
 
 
 
 
 
198
  }
199
 
200
  return prompt;
 
257
  ${roleContext}
258
 
259
  【AI 行为准则】
260
+ 1. 你拥有上述所有数据的“上帝视角”。
261
+ 2. **班主任视角**: 当用户是班主任时,你通过上下文已知晓全班所有科目的成绩。如果问“王五偏科吗”,请对比他的各科成绩作答。
262
+ 3. **任课老师视角**: 当用户非班主任时,你只能看到他所教科目的成绩。如果问“李华其他课怎么样”,请诚实回答“我只能看到您任教科目的数据,无法评价其他科目”。
263
+ 4. 回答要具体。不要说“他成绩一般”,要说“他最近数学考了60分,英语考了85分”。
264
+ 5. 数据格式说明: [科目:分数] 代表该科目最近一次录入的成绩。
265
  ---
266
  `;
267
  } catch (e) {
ai-routes.js CHANGED
@@ -2,10 +2,10 @@
2
  const express = require('express');
3
  const router = express.Router();
4
  const OpenAI = require('openai');
5
- const { ConfigModel, User, AIUsageModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
 
8
- // ... (Key Management, Usage Tracking, Helpers, Provider Management functions remain same as before)
9
  // Fetch keys from DB + merge with ENV variables
10
  async function getKeyPool(type) {
11
  const config = await ConfigModel.findOne({ key: 'main' });
@@ -42,7 +42,6 @@ function convertGeminiToOpenAI(baseParams) {
42
  const messages = [];
43
  if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
44
 
45
- // Normalize contents to array if it's a single object (Gemini allows shorthand, OpenAI/Middleware needs array)
46
  let contents = baseParams.contents;
47
  if (contents && !Array.isArray(contents)) {
48
  contents = [contents];
@@ -50,12 +49,7 @@ function convertGeminiToOpenAI(baseParams) {
50
 
51
  if (contents && Array.isArray(contents)) {
52
  contents.forEach(content => {
53
- // Default to user role if not specified (common in short-hand calls)
54
  let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
55
-
56
- // Handle simple text shorthand if parts is missing but text exists (rare but possible in some SDK versions)
57
- // But standard Gemini is { parts: [...] }
58
-
59
  const messageContent = [];
60
  if (content.parts) {
61
  content.parts.forEach(p => {
@@ -65,9 +59,7 @@ function convertGeminiToOpenAI(baseParams) {
65
  }
66
  });
67
  }
68
-
69
  if (messageContent.length > 0) {
70
- // If only one text part, send as string (cleaner for some weaker models)
71
  if (messageContent.length === 1 && messageContent[0].type === 'text') {
72
  messages.push({ role: role, content: messageContent[0].text });
73
  } else {
@@ -86,11 +78,8 @@ const DEFAULT_OPENROUTER_MODELS = ['qwen/qwen3-coder:free', 'openai/gpt-oss-120b
86
  let runtimeProviderOrder = [];
87
 
88
  function deprioritizeProvider(providerName) {
89
- // If the provider is already last, do nothing
90
  if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
91
-
92
  console.log(`[AI System] ⚠️ Deprioritizing ${providerName} due to errors. Moving to end of queue.`);
93
- // Move to end
94
  runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
95
  console.log(`[AI System] 🔄 New Priority Order: ${runtimeProviderOrder.join(' -> ')}`);
96
  }
@@ -114,7 +103,6 @@ async function streamGemini(baseParams, res) {
114
  console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
115
  const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
116
 
117
- // First chunk check usually determines connection success
118
  let hasStarted = false;
119
  let fullText = "";
120
 
@@ -135,9 +123,9 @@ async function streamGemini(baseParams, res) {
135
  console.warn(`[AI] ⚠️ Gemini ${modelName} Error: ${e.message}`);
136
  if (isQuotaError(e)) {
137
  console.log(`[AI] 🔄 Quota exceeded for ${modelName}, trying next...`);
138
- continue; // Try next model or key
139
  }
140
- throw e; // Non-quota errors bubble up to switch provider
141
  }
142
  }
143
  }
@@ -157,17 +145,32 @@ async function streamOpenRouter(baseParams, res) {
157
 
158
  for (const apiKey of keys) {
159
  for (const modelName of models) {
160
- // Find specific model config to check for custom URL
161
  const modelConfig = config?.openRouterModels?.find(m => m.id === modelName);
162
  const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
163
  const providerLabel = modelConfig?.apiUrl ? 'Custom API' : 'OpenRouter';
164
 
165
  const client = new OpenAI({ baseURL, apiKey, defaultHeaders: { "HTTP-Referer": "https://smart.com", "X-Title": "Smart School" } });
166
 
 
 
 
 
 
 
 
 
 
 
 
167
  try {
168
  console.log(`[AI] 🚀 Attempting ${providerLabel} Model: ${modelName} (URL: ${baseURL})`);
169
 
170
- const stream = await client.chat.completions.create({ model: modelName, messages, stream: true });
 
 
 
 
 
171
 
172
  console.log(`[AI] ✅ Connected to ${providerLabel}: ${modelName}`);
173
  recordUsage(modelName, PROVIDERS.OPENROUTER);
@@ -186,7 +189,7 @@ async function streamOpenRouter(baseParams, res) {
186
  console.warn(`[AI] ⚠️ ${providerLabel} ${modelName} Error: ${e.message}`);
187
  if (isQuotaError(e)) {
188
  console.log(`[AI] 🔄 Rate limit/Quota for ${modelName}, switching...`);
189
- break; // Switch to next provider/model logic if implemented in loop, here break to next model in loop
190
  }
191
  }
192
  }
@@ -197,7 +200,7 @@ async function streamOpenRouter(baseParams, res) {
197
  async function streamGemma(baseParams, res) {
198
  const { GoogleGenAI } = await import("@google/genai");
199
  const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
200
- const keys = await getKeyPool('gemini'); // Gemma uses Gemini keys
201
  if (keys.length === 0) throw new Error("No keys for Gemma");
202
 
203
  for (const apiKey of keys) {
@@ -233,8 +236,6 @@ async function streamGemma(baseParams, res) {
233
 
234
  async function streamContentWithSmartFallback(baseParams, res) {
235
  let hasAudio = false;
236
-
237
- // Check if contents is array or object, handle accordingly
238
  const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
239
 
240
  contentsArray.forEach(c => {
@@ -243,7 +244,6 @@ async function streamContentWithSmartFallback(baseParams, res) {
243
  }
244
  });
245
 
246
- // Audio input currently forces Gemini
247
  if (hasAudio) {
248
  try {
249
  console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
@@ -255,37 +255,26 @@ async function streamContentWithSmartFallback(baseParams, res) {
255
  }
256
  }
257
 
258
- // FETCH CONFIG AND SET PROVIDER ORDER
259
  const config = await ConfigModel.findOne({ key: 'main' });
260
  const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
261
  ? config.aiProviderOrder
262
  : [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
263
 
264
- // If runtime order is empty or contains different elements (e.g. config changed), reset it
265
  const runtimeSet = new Set(runtimeProviderOrder);
266
- const configSet = new Set(configuredOrder);
267
  if (runtimeProviderOrder.length === 0 || runtimeProviderOrder.length !== configuredOrder.length || !configuredOrder.every(p => runtimeSet.has(p))) {
268
- console.log(`[AI] 📋 Initializing Provider Order: ${configuredOrder.join(' -> ')}`);
269
  runtimeProviderOrder = [...configuredOrder];
270
- } else {
271
- console.log(`[AI] 📋 Current Provider Priority: ${runtimeProviderOrder.join(' -> ')}`);
272
  }
273
 
274
  let finalError = null;
275
-
276
- // Use runtimeProviderOrder which might have been adjusted due to quota errors in previous calls
277
  for (const provider of runtimeProviderOrder) {
278
  try {
279
  console.log(`[AI] 👉 Trying Provider: ${provider}...`);
280
-
281
  if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
282
  else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
283
  else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
284
-
285
  } catch (e) {
286
  console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
287
  finalError = e;
288
-
289
  if (isQuotaError(e)) {
290
  console.log(`[AI] 📉 Quota/Rate Limit detected. Switching provider...`);
291
  deprioritizeProvider(provider);
@@ -294,8 +283,6 @@ async function streamContentWithSmartFallback(baseParams, res) {
294
  continue;
295
  }
296
  }
297
-
298
- console.error(`[AI] 💀 All providers failed.`);
299
  throw finalError || new Error('All streaming models unavailable.');
300
  }
301
 
@@ -311,7 +298,6 @@ const checkAIAccess = async (req, res, next) => {
311
  next();
312
  };
313
 
314
- // NEW: Endpoint to provide a temporary key for Client-Side Live API
315
  router.get('/live-access', checkAIAccess, async (req, res) => {
316
  try {
317
  const keys = await getKeyPool('gemini');
@@ -339,13 +325,15 @@ router.get('/stats', checkAIAccess, async (req, res) => {
339
  });
340
 
341
  router.post('/reset-pool', checkAIAccess, (req, res) => {
342
- runtimeProviderOrder = []; // Will be re-initialized from DB on next call
343
  console.log('[AI] 🔄 Provider priority pool reset.');
344
  res.json({ success: true });
345
  });
346
 
 
 
347
  router.post('/chat', checkAIAccess, async (req, res) => {
348
- const { text, audio, history } = req.body;
349
 
350
  // Extract headers for context building
351
  const username = req.headers['x-user-username'];
@@ -356,16 +344,52 @@ router.post('/chat', checkAIAccess, async (req, res) => {
356
  res.setHeader('Cache-Control', 'no-cache');
357
  res.setHeader('Connection', 'keep-alive');
358
  res.flushHeaders();
 
359
  try {
360
- const fullContents = [];
361
- if (history && Array.isArray(history)) {
362
- history.forEach(msg => { if (msg.text) fullContents.push({ role: msg.role === 'user' ? 'user' : 'model', parts: [{ text: msg.text }] }); });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
  }
364
- const currentParts = [];
365
- if (audio) currentParts.push({ inlineData: { mimeType: 'audio/webm', data: audio } });
366
- if (text) currentParts.push({ text: text });
367
- if (currentParts.length === 0) currentParts.push({ text: "Hello" });
368
- fullContents.push({ role: 'user', parts: currentParts });
369
 
370
  // --- NEW: Inject Context ---
371
  const contextPrompt = await buildUserContext(username, userRole, schoolId);
@@ -378,7 +402,10 @@ router.post('/chat', checkAIAccess, async (req, res) => {
378
  config: { systemInstruction: combinedSystemInstruction }
379
  }, res);
380
 
 
381
  if (answerText) {
 
 
382
  // Signal that text generation is done and TTS is starting
383
  res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
384
  try {
 
2
  const express = require('express');
3
  const router = express.Router();
4
  const OpenAI = require('openai');
5
+ const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
 
8
+ // ... (Key Management, Usage Tracking, Helpers remain same)
9
  // Fetch keys from DB + merge with ENV variables
10
  async function getKeyPool(type) {
11
  const config = await ConfigModel.findOne({ key: 'main' });
 
42
  const messages = [];
43
  if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
44
 
 
45
  let contents = baseParams.contents;
46
  if (contents && !Array.isArray(contents)) {
47
  contents = [contents];
 
49
 
50
  if (contents && Array.isArray(contents)) {
51
  contents.forEach(content => {
 
52
  let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
 
 
 
 
53
  const messageContent = [];
54
  if (content.parts) {
55
  content.parts.forEach(p => {
 
59
  }
60
  });
61
  }
 
62
  if (messageContent.length > 0) {
 
63
  if (messageContent.length === 1 && messageContent[0].type === 'text') {
64
  messages.push({ role: role, content: messageContent[0].text });
65
  } else {
 
78
  let runtimeProviderOrder = [];
79
 
80
  function deprioritizeProvider(providerName) {
 
81
  if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
 
82
  console.log(`[AI System] ⚠️ Deprioritizing ${providerName} due to errors. Moving to end of queue.`);
 
83
  runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
84
  console.log(`[AI System] 🔄 New Priority Order: ${runtimeProviderOrder.join(' -> ')}`);
85
  }
 
103
  console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
104
  const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
105
 
 
106
  let hasStarted = false;
107
  let fullText = "";
108
 
 
123
  console.warn(`[AI] ⚠️ Gemini ${modelName} Error: ${e.message}`);
124
  if (isQuotaError(e)) {
125
  console.log(`[AI] 🔄 Quota exceeded for ${modelName}, trying next...`);
126
+ continue;
127
  }
128
+ throw e;
129
  }
130
  }
131
  }
 
145
 
146
  for (const apiKey of keys) {
147
  for (const modelName of models) {
 
148
  const modelConfig = config?.openRouterModels?.find(m => m.id === modelName);
149
  const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
150
  const providerLabel = modelConfig?.apiUrl ? 'Custom API' : 'OpenRouter';
151
 
152
  const client = new OpenAI({ baseURL, apiKey, defaultHeaders: { "HTTP-Referer": "https://smart.com", "X-Title": "Smart School" } });
153
 
154
+ // --- DOUBAO OPTIMIZATION (Context Caching) ---
155
+ const extraBody = {};
156
+ if (modelName.toLowerCase().includes('doubao')) {
157
+ console.log(`[AI] 💡 Activating Doubao Prefix Caching for ${modelName}`);
158
+ // Doubao-specific caching parameter
159
+ extraBody.caching = { type: "enabled", prefix: true };
160
+ // Disable thinking to save tokens/time if not needed (optional based on user pref, but here we prioritize speed for chat)
161
+ extraBody.thinking = { type: "disabled" };
162
+ }
163
+ // ---------------------------------------------
164
+
165
  try {
166
  console.log(`[AI] 🚀 Attempting ${providerLabel} Model: ${modelName} (URL: ${baseURL})`);
167
 
168
+ const stream = await client.chat.completions.create({
169
+ model: modelName,
170
+ messages,
171
+ stream: true,
172
+ ...extraBody
173
+ });
174
 
175
  console.log(`[AI] ✅ Connected to ${providerLabel}: ${modelName}`);
176
  recordUsage(modelName, PROVIDERS.OPENROUTER);
 
189
  console.warn(`[AI] ⚠️ ${providerLabel} ${modelName} Error: ${e.message}`);
190
  if (isQuotaError(e)) {
191
  console.log(`[AI] 🔄 Rate limit/Quota for ${modelName}, switching...`);
192
+ break;
193
  }
194
  }
195
  }
 
200
  async function streamGemma(baseParams, res) {
201
  const { GoogleGenAI } = await import("@google/genai");
202
  const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
203
+ const keys = await getKeyPool('gemini');
204
  if (keys.length === 0) throw new Error("No keys for Gemma");
205
 
206
  for (const apiKey of keys) {
 
236
 
237
  async function streamContentWithSmartFallback(baseParams, res) {
238
  let hasAudio = false;
 
 
239
  const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
240
 
241
  contentsArray.forEach(c => {
 
244
  }
245
  });
246
 
 
247
  if (hasAudio) {
248
  try {
249
  console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
 
255
  }
256
  }
257
 
 
258
  const config = await ConfigModel.findOne({ key: 'main' });
259
  const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
260
  ? config.aiProviderOrder
261
  : [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
262
 
 
263
  const runtimeSet = new Set(runtimeProviderOrder);
 
264
  if (runtimeProviderOrder.length === 0 || runtimeProviderOrder.length !== configuredOrder.length || !configuredOrder.every(p => runtimeSet.has(p))) {
 
265
  runtimeProviderOrder = [...configuredOrder];
 
 
266
  }
267
 
268
  let finalError = null;
 
 
269
  for (const provider of runtimeProviderOrder) {
270
  try {
271
  console.log(`[AI] 👉 Trying Provider: ${provider}...`);
 
272
  if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
273
  else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
274
  else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
 
275
  } catch (e) {
276
  console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
277
  finalError = e;
 
278
  if (isQuotaError(e)) {
279
  console.log(`[AI] 📉 Quota/Rate Limit detected. Switching provider...`);
280
  deprioritizeProvider(provider);
 
283
  continue;
284
  }
285
  }
 
 
286
  throw finalError || new Error('All streaming models unavailable.');
287
  }
288
 
 
298
  next();
299
  };
300
 
 
301
  router.get('/live-access', checkAIAccess, async (req, res) => {
302
  try {
303
  const keys = await getKeyPool('gemini');
 
325
  });
326
 
327
  router.post('/reset-pool', checkAIAccess, (req, res) => {
328
+ runtimeProviderOrder = [];
329
  console.log('[AI] 🔄 Provider priority pool reset.');
330
  res.json({ success: true });
331
  });
332
 
333
+ // --- PERSISTENT CHAT HISTORY HANDLER ---
334
+ // Instead of relying on client-side 'history', we use MongoDB to ensure cross-device memory.
335
  router.post('/chat', checkAIAccess, async (req, res) => {
336
+ const { text, audio } = req.body; // Ignore req.body.history for prompt generation
337
 
338
  // Extract headers for context building
339
  const username = req.headers['x-user-username'];
 
344
  res.setHeader('Cache-Control', 'no-cache');
345
  res.setHeader('Connection', 'keep-alive');
346
  res.flushHeaders();
347
+
348
  try {
349
+ const user = await User.findOne({ username });
350
+ if (!user) throw new Error('User not found');
351
+
352
+ // 1. SAVE USER MSG TO DB
353
+ const userMsgText = text || (audio ? '(Audio Message)' : '');
354
+ if (userMsgText) {
355
+ await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
356
+ }
357
+
358
+ // 2. FETCH HISTORY FROM DB (Long-term Memory)
359
+ // Retrieve last 30 messages for context
360
+ const dbHistory = await ChatHistoryModel.find({ userId: user._id })
361
+ .sort({ timestamp: -1 })
362
+ .limit(30);
363
+
364
+ // Re-order for API (oldest first)
365
+ const historyContext = dbHistory.reverse().map(msg => ({
366
+ role: msg.role === 'user' ? 'user' : 'model',
367
+ parts: [{ text: msg.text }]
368
+ }));
369
+
370
+ // 3. PREPARE REQUEST
371
+ // The last user message is already in DB and retrieved in historyContext.
372
+ // We need to separate "history" from "current message" for some APIs,
373
+ // but Google/OpenAI handle a list of messages fine.
374
+ // However, standard pattern is: History + Current.
375
+ // Since we fetched ALL (including current), we just pass historyContext as contents.
376
+ // NOTE: If audio is present, we must append it specifically as the "current" part
377
+ // because DB only stores text representation for now.
378
+
379
+ const fullContents = [...historyContext];
380
+
381
+ // If this request has audio, append it as a new part (since DB load only has text placeholder)
382
+ // We replace the last 'user' text message with the audio payload for the AI model
383
+ if (audio) {
384
+ // Remove the text placeholder we just loaded
385
+ if (fullContents.length > 0 && fullContents[fullContents.length - 1].role === 'user') {
386
+ fullContents.pop();
387
+ }
388
+ fullContents.push({
389
+ role: 'user',
390
+ parts: [{ inlineData: { mimeType: 'audio/webm', data: audio } }]
391
+ });
392
  }
 
 
 
 
 
393
 
394
  // --- NEW: Inject Context ---
395
  const contextPrompt = await buildUserContext(username, userRole, schoolId);
 
402
  config: { systemInstruction: combinedSystemInstruction }
403
  }, res);
404
 
405
+ // 4. SAVE AI RESPONSE TO DB
406
  if (answerText) {
407
+ await ChatHistoryModel.create({ userId: user._id, role: 'model', text: answerText });
408
+
409
  // Signal that text generation is done and TTS is starting
410
  res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
411
  try {
models.js CHANGED
@@ -278,9 +278,20 @@ const AIUsageSchema = new mongoose.Schema({
278
  AIUsageSchema.index({ date: 1, model: 1, provider: 1 }, { unique: true });
279
  const AIUsageModel = mongoose.model('AIUsage', AIUsageSchema);
280
 
 
 
 
 
 
 
 
 
 
 
 
281
  module.exports = {
282
  School, User, Student, Course, Score, ClassModel, SubjectModel, ExamModel, ScheduleModel,
283
  ConfigModel, NotificationModel, GameSessionModel, StudentRewardModel, LuckyDrawConfigModel, GameMonsterConfigModel, GameZenConfigModel,
284
  AchievementConfigModel, TeacherExchangeConfigModel, StudentAchievementModel, AttendanceModel, LeaveRequestModel, SchoolCalendarModel,
285
- WishModel, FeedbackModel, TodoModel, AIUsageModel
286
  };
 
278
  AIUsageSchema.index({ date: 1, model: 1, provider: 1 }, { unique: true });
279
  const AIUsageModel = mongoose.model('AIUsage', AIUsageSchema);
280
 
281
+ // NEW: Persistent Chat History
282
+ const ChatHistorySchema = new mongoose.Schema({
283
+ userId: { type: String, required: true, index: true },
284
+ role: { type: String, enum: ['user', 'model'], required: true },
285
+ text: { type: String, required: true },
286
+ timestamp: { type: Number, default: Date.now }
287
+ });
288
+ // Create index for fast retrieval of latest messages
289
+ ChatHistorySchema.index({ userId: 1, timestamp: -1 });
290
+ const ChatHistoryModel = mongoose.model('ChatHistory', ChatHistorySchema);
291
+
292
  module.exports = {
293
  School, User, Student, Course, Score, ClassModel, SubjectModel, ExamModel, ScheduleModel,
294
  ConfigModel, NotificationModel, GameSessionModel, StudentRewardModel, LuckyDrawConfigModel, GameMonsterConfigModel, GameZenConfigModel,
295
  AchievementConfigModel, TeacherExchangeConfigModel, StudentAchievementModel, AttendanceModel, LeaveRequestModel, SchoolCalendarModel,
296
+ WishModel, FeedbackModel, TodoModel, AIUsageModel, ChatHistoryModel
297
  };