dvc890 commited on
Commit
37ea041
·
verified ·
1 Parent(s): bdfccdc

Upload 63 files

Browse files
Files changed (2) hide show
  1. ai-context.js +129 -70
  2. ai-routes.js +416 -72
ai-context.js CHANGED
@@ -1,7 +1,7 @@
1
 
2
  const {
3
  User, Student, Score, AttendanceModel, ClassModel,
4
- LeaveRequestModel, TodoModel, School, Course
5
  } = require('./models');
6
 
7
  /**
@@ -13,6 +13,16 @@ const getCurrentDateInfo = () => {
13
  return `${now.getFullYear()}年${now.getMonth() + 1}月${now.getDate()}日 ${days[now.getDay()]}`;
14
  };
15
 
 
 
 
 
 
 
 
 
 
 
16
  /**
17
  * 构建学生画像上下文 (学生视角)
18
  */
@@ -24,13 +34,29 @@ async function buildStudentContext(username, schoolId) {
24
 
25
  if (!student) return "无法找到该学生的详细档案。";
26
 
27
- // 1. 获取成绩 (最近20条)
28
- const recentScores = await Score.find({
 
 
 
 
 
 
29
  studentNo: student.studentNo,
30
  schoolId
31
- }).sort({ _id: -1 }).limit(20);
 
 
 
 
 
 
 
 
 
 
32
 
33
- // 2. 获取考勤概况
34
  const attendanceStats = await AttendanceModel.aggregate([
35
  { $match: { studentId: student._id.toString() } },
36
  { $group: { _id: "$status", count: { $sum: 1 } } }
@@ -44,28 +70,33 @@ async function buildStudentContext(username, schoolId) {
44
  - **班级**: ${student.className}
45
  - **学号**: ${student.studentNo}
46
  - **积分(小红花)**: ${student.flowerBalance} 🌺
 
47
 
48
  ### 个人学习数据
49
  `;
50
 
51
  if (recentScores.length > 0) {
52
- prompt += `- **成绩历史**: ${recentScores.map(s => `${s.courseName}: ${s.score}分 (${s.examName||s.type})`).join('; ')}\n`;
 
 
 
 
 
53
  } else {
54
  prompt += `- **近期成绩**: 暂无记录\n`;
55
  }
56
 
57
  if (absentCount > 0 || leaveCount > 0) {
58
- prompt += `- **考勤异常**: 本学期缺勤 ${absentCount} 次,请假 ${leaveCount} 次。\n`;
59
  } else {
60
- prompt += `- **考勤状况**: 全勤,表现极佳。\n`;
61
  }
62
 
63
  return prompt;
64
  }
65
 
66
  /**
67
- * 构建教师画像上下文 (严格权限版)
68
- * 核心逻辑:只查自己教的班级,只查自己教的课(除非是班主任)
69
  */
70
  async function buildTeacherContext(username, schoolId) {
71
  const user = await User.findOne({ username, schoolId });
@@ -73,92 +104,119 @@ async function buildTeacherContext(username, schoolId) {
73
 
74
  const homeroomClass = user.homeroomClass;
75
 
76
- // 1. 查找所有任课信息
77
  const courses = await Course.find({
78
  schoolId,
79
  $or: [{ teacherId: user._id }, { teacherName: user.trueName || user.username }]
80
  });
81
 
82
- // 2. 构建权限映射
83
- // authorizedClasses: Set<string> -> 老师有权限查看的班级列表
84
  const authorizedClasses = new Set();
85
- // teachingSubjects: Map<className, Set<subjectName>> -> 每个班级教哪些课
86
- const teachingSubjects = {};
87
 
88
- if (homeroomClass) authorizedClasses.add(homeroomClass);
 
 
 
89
 
 
90
  courses.forEach(c => {
91
- authorizedClasses.add(c.className);
92
- if (!teachingSubjects[c.className]) teachingSubjects[c.className] = new Set();
93
- teachingSubjects[c.className].add(c.courseName);
 
 
94
  });
95
 
96
  const classList = Array.from(authorizedClasses);
97
 
98
  if (classList.length === 0) {
99
- return `### 当前用户身份:教师\n- **姓名**: ${user.trueName || username}\n- **状态**: 暂未绑定任何班级或课程。请告知用户去“班级管理”或“课程安排”绑定。`;
100
  }
101
 
102
- // 3. 构建详细数据 Prompt
103
- let prompt = `
104
- ### 当前用户身份:教师
105
- - **姓名**: ${user.trueName || username}
106
- - **管理权限范围**: [${classList.join(', ')}]
107
- - **注意**: 你 **绝对不能** 回答关于上述班级以外的任何学生数据。如果用户问其他班级(如“四年级6班”),请明确拒绝,并告知用户系统记录显示他只负责上述班级。
108
-
109
- ### 详细班级数据
110
- `;
111
-
112
- // 4. 并行获取所有相关班级的学生
113
- // 移除 limit,获取全量学生
114
- const allStudents = await Student.find({
115
  schoolId,
116
  className: { $in: classList },
117
- status: 'Enrolled'
118
- }).sort({ seatNo: 1, studentNo: 1 });
 
 
 
 
 
 
 
 
 
 
 
119
 
120
- // 5. 获取这些学生的所有成绩
121
- const allStudentNos = allStudents.map(s => s.studentNo);
122
- const allScores = await Score.find({
123
  schoolId,
124
- studentNo: { $in: allStudentNos },
125
  status: 'Normal'
126
- });
 
 
 
 
 
 
 
 
127
 
128
- // 6. 按班级组装数据
129
- for (const clsName of classList) {
130
- const isHomeroom = clsName === homeroomClass;
131
- const subjectsTaught = teachingSubjects[clsName] ? Array.from(teachingSubjects[clsName]) : [];
132
-
133
- prompt += `\n#### 🏫 ${clsName} (${isHomeroom ? '我是班主任' : '我是任课老师'})\n`;
134
- if (!isHomeroom && subjectsTaught.length > 0) {
135
- prompt += `(非主任视角:我只教 ${subjectsTaught.join(', ')},仅展示这些科目的成绩)\n`;
136
- }
137
 
138
- const classStudents = allStudents.filter(s => s.className === clsName);
 
 
 
 
 
 
 
 
139
 
140
- if (classStudents.length === 0) {
141
- prompt += `- (暂无学生档案)\n`;
 
142
  continue;
143
  }
144
 
145
- // 组装每个学生的信息
146
- const studentLines = classStudents.map(s => {
147
- let stuScores = allScores.filter(sc => sc.studentNo === s.studentNo);
 
148
 
149
- // 权限过滤:如果不是班主任,过滤掉不教的科目
150
- if (!isHomeroom) {
151
- stuScores = stuScores.filter(sc => subjectsTaught.includes(sc.courseName));
152
  }
153
 
154
- // 格式化成绩: [数学:90, 语:85(期末)]
155
- const scoreStrs = stuScores.map(sc => `${sc.courseName}:${sc.score}${sc.examName && sc.examName!=='期末考试' ? '('+sc.examName+')' : ''}`);
156
- const scoreText = scoreStrs.length > 0 ? `成绩:[${scoreStrs.join(', ')}]` : "无相关成绩";
157
-
158
- return `- ${s.name}(${s.seatNo || '-'}号): ${scoreText}`;
159
- });
160
-
161
- prompt += studentLines.join('\n') + '\n';
 
 
 
 
 
 
 
162
  }
163
 
164
  return prompt;
@@ -195,10 +253,11 @@ async function buildUserContext(username, role, schoolId) {
195
  ${roleContext}
196
 
197
  【AI 行为准则】
198
- 1. **诚实原则**: 数据里有什么就说什么如果数据里没有(例如学生列表里没这个名字,或者成绩列表为空),直接说“系统里没有记录”。不要编造。
199
- 2. **权限原则**: 严格遵守管理权限范围用户问其他班级时,礼貌拒绝
200
- 3. **数据格式**: 上下文中的数据格式为 "姓: 成绩:[科目:分数...]"
201
- 4. **回答风格**: 位专业的教务助理简洁明了
 
202
  ---
203
  `;
204
  } catch (e) {
 
1
 
2
  const {
3
  User, Student, Score, AttendanceModel, ClassModel,
4
+ Course, ConfigModel
5
  } = require('./models');
6
 
7
  /**
 
13
  return `${now.getFullYear()}年${now.getMonth() + 1}月${now.getDate()}日 ${days[now.getDay()]}`;
14
  };
15
 
16
+ /**
17
+ * 辅助函数:解析学年前缀
18
+ * 例如 "2023-2024学年 第二学期" -> "2023-2024学年"
19
+ */
20
+ const getSchoolYearPrefix = (semester) => {
21
+ if (!semester) return null;
22
+ const match = semester.match(/^(.+?学年)/);
23
+ return match ? match[1] : null;
24
+ };
25
+
26
  /**
27
  * 构建学生画像上下文 (学生视角)
28
  */
 
34
 
35
  if (!student) return "无法找到该学生的详细档案。";
36
 
37
+ // 获取当前学配置
38
+ const config = await ConfigModel.findOne({ key: 'main' });
39
+ const currentSemester = config ? config.semester : null;
40
+
41
+ // 逻辑:尝试拉取本学年的所有数据 (第一学期 + 第二学期)
42
+ const schoolYearPrefix = getSchoolYearPrefix(currentSemester);
43
+
44
+ const scoreQuery = {
45
  studentNo: student.studentNo,
46
  schoolId
47
+ };
48
+
49
+ if (schoolYearPrefix) {
50
+ // 匹配该学年开头的所有学期
51
+ scoreQuery.semester = { $regex: new RegExp('^' + schoolYearPrefix) };
52
+ } else if (currentSemester) {
53
+ scoreQuery.semester = currentSemester;
54
+ }
55
+
56
+ // 获取成绩 (稍微放宽限制以容纳整年数据)
57
+ const recentScores = await Score.find(scoreQuery).sort({ semester: -1, _id: -1 }).limit(100);
58
 
59
+ // 获取考勤概况
60
  const attendanceStats = await AttendanceModel.aggregate([
61
  { $match: { studentId: student._id.toString() } },
62
  { $group: { _id: "$status", count: { $sum: 1 } } }
 
70
  - **班级**: ${student.className}
71
  - **学号**: ${student.studentNo}
72
  - **积分(小红花)**: ${student.flowerBalance} 🌺
73
+ - **当前学期**: ${currentSemester || '全部'}
74
 
75
  ### 个人学习数据
76
  `;
77
 
78
  if (recentScores.length > 0) {
79
+ // 格式化输出,带上学期标识
80
+ const scoreList = recentScores.map(s => {
81
+ const semShort = s.semester ? s.semester.replace(schoolYearPrefix || '', '').trim() : '';
82
+ return `${s.courseName}: ${s.score} (${semShort} ${s.examName||s.type})`;
83
+ }).join('\n');
84
+ prompt += `#### 成绩记录 (本学年):\n${scoreList}\n`;
85
  } else {
86
  prompt += `- **近期成绩**: 暂无记录\n`;
87
  }
88
 
89
  if (absentCount > 0 || leaveCount > 0) {
90
+ prompt += `- **考勤**: 缺勤 ${absentCount} 次,请假 ${leaveCount} 次。\n`;
91
  } else {
92
+ prompt += `- **考勤**: 全勤。\n`;
93
  }
94
 
95
  return prompt;
96
  }
97
 
98
  /**
99
+ * 构建教师画像上下文 (严格权限版 + 全量数据)
 
100
  */
101
  async function buildTeacherContext(username, schoolId) {
102
  const user = await User.findOne({ username, schoolId });
 
104
 
105
  const homeroomClass = user.homeroomClass;
106
 
107
+ // 1. 查找任信息 (确定科任权限)
108
  const courses = await Course.find({
109
  schoolId,
110
  $or: [{ teacherId: user._id }, { teacherName: user.trueName || user.username }]
111
  });
112
 
113
+ // 2. 确定有权限的班级列表
 
114
  const authorizedClasses = new Set();
115
+ const teachingSubjectsMap = {}; // Map<ClassName, Set<SubjectName>>
 
116
 
117
+ // 班主任权限:拥有该班级所有数据权限
118
+ if (homeroomClass) {
119
+ authorizedClasses.add(homeroomClass);
120
+ }
121
 
122
+ // 科任权限:拥有特定班级的特定科目权限
123
  courses.forEach(c => {
124
+ if (c.className) {
125
+ authorizedClasses.add(c.className);
126
+ if (!teachingSubjectsMap[c.className]) teachingSubjectsMap[c.className] = new Set();
127
+ teachingSubjectsMap[c.className].add(c.courseName);
128
+ }
129
  });
130
 
131
  const classList = Array.from(authorizedClasses);
132
 
133
  if (classList.length === 0) {
134
+ return `### 当前用户身份:教师 (${user.trueName})\n目前系统显示您未绑定任何班级。请告知用户去“班级管理”或“课程安排”进行绑定。`;
135
  }
136
 
137
+ // 3. 全量拉取相关班级的学生
138
+ const students = await Student.find({
 
 
 
 
 
 
 
 
 
 
 
139
  schoolId,
140
  className: { $in: classList },
141
+ status: 'Enrolled'
142
+ }).sort({ seatNo: 1, studentNo: 1 }); // 按座号排序
143
+
144
+ if (students.length === 0) {
145
+ return `### 当前用户身份:教师\n管理班级: [${classList.join(', ')}]\n但系统未在这些班级找到学生档案。`;
146
+ }
147
+
148
+ const studentNos = students.map(s => s.studentNo);
149
+
150
+ // 4. 拉取这些学生的成绩 (限制为本学年:包含第一学期和第二学期)
151
+ const config = await ConfigModel.findOne({ key: 'main' });
152
+ const currentSemester = config ? config.semester : null;
153
+ const schoolYearPrefix = getSchoolYearPrefix(currentSemester);
154
 
155
+ const scoreQuery = {
 
 
156
  schoolId,
157
+ studentNo: { $in: studentNos },
158
  status: 'Normal'
159
+ };
160
+
161
+ // 关键修复:使用正则匹配整个学年 (例如 "2024-2025学年")
162
+ if (schoolYearPrefix) {
163
+ scoreQuery.semester = { $regex: new RegExp('^' + schoolYearPrefix) };
164
+ } else if (currentSemester) {
165
+ // 如果无法解析年份,回退到当前学期
166
+ scoreQuery.semester = currentSemester;
167
+ }
168
 
169
+ const allScores = await Score.find(scoreQuery);
170
+
171
+ // 5. 构建 Prompt
172
+ let prompt = `
173
+ ### 当前用户身份:教师 (${user.trueName || username})
174
+ ### 权限范围 (严格遵守)
175
+ 你只能回答下列班级的数据。如果用户询问其他班级(例如用户只教一年级,却问四年级),请礼貌拒绝,说明权限不足。
176
+ 管理级: [${classList.join(', ')}]
177
+ 数据范围: ${schoolYearPrefix ? schoolYearPrefix + " (全学年)" : (currentSemester || '所有历史')}
178
 
179
+ ### 详细班级数据
180
+ `;
181
+
182
+ for (const cls of classList) {
183
+ const isClassHomeroom = cls === homeroomClass;
184
+ const subjects = teachingSubjectsMap[cls] ? Array.from(teachingSubjectsMap[cls]) : [];
185
+ const roleText = isClassHomeroom ? "班主任 (全科权限)" : `任课老师 (科目: ${subjects.join(', ')})`;
186
+
187
+ prompt += `\n#### 🏫 ${cls} [${roleText}]\n`;
188
 
189
+ const clsStudents = students.filter(s => s.className === cls);
190
+ if (clsStudents.length === 0) {
191
+ prompt += "(暂无学生)\n";
192
  continue;
193
  }
194
 
195
+ prompt += `学生总数: ${clsStudents.length}人\n名单及成绩 (格式: [学期]科目:分数):\n`;
196
+
197
+ for (const s of clsStudents) {
198
+ let sScores = allScores.filter(sc => sc.studentNo === s.studentNo);
199
 
200
+ // 如果不是班主任,仅展示自己教的科目成绩
201
+ if (!isClassHomeroom && subjects.length > 0) {
202
+ sScores = sScores.filter(sc => subjects.includes(sc.courseName));
203
  }
204
 
205
+ // 格式: 张三(01号): [一]语文:90, [二]:85...
206
+ const scoreStr = sScores.length > 0
207
+ ? sScores.map(sc => {
208
+ // 简化学期显示,例如 "第一学期" -> "一", "第二学期" -> "二"
209
+ let semLabel = "";
210
+ if (sc.semester && schoolYearPrefix) {
211
+ if (sc.semester.includes("第一")) semLabel = "[上]";
212
+ else if (sc.semester.includes("第二")) semLabel = "[下]";
213
+ }
214
+ return `${semLabel}${sc.courseName}:${sc.score}`;
215
+ }).join(', ')
216
+ : (isClassHomeroom ? "暂无本学年成绩" : "无本科目成绩");
217
+
218
+ prompt += `- ${s.name} (${s.seatNo ? s.seatNo+'号' : '无座号'}): ${scoreStr}\n`;
219
+ }
220
  }
221
 
222
  return prompt;
 
253
  ${roleContext}
254
 
255
  【AI 行为准则】
256
+ 1. **数据优先**: 回答问题时,**必须**基于上述提供的具体数据。不要编造。
257
+ 2. **权限边界**: 只要下文中没有的数据(例如其他班级),一律视为权限”或“无记录”,并告知用户。
258
+ 3. **列表完整性**: 如果用户问“有哪些学生”,请列出数据中该班级的所有学生字,不要省略
259
+ 4. **成绩解释**: 数据中标注了 [上] 代表第学期[下] 代表第二学期如果用户问“本学期”,默认指第二学期(如果是下)或第一学期(如果是上)。
260
+ 5. **回答风格**: 简洁、专业、像一位教务助手。
261
  ---
262
  `;
263
  } catch (e) {
ai-routes.js CHANGED
@@ -5,6 +5,7 @@ const OpenAI = require('openai');
5
  const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
 
 
8
  // Fetch keys from DB + merge with ENV variables
9
  async function getKeyPool(type) {
10
  const config = await ConfigModel.findOne({ key: 'main' });
@@ -25,6 +26,266 @@ async function recordUsage(model, provider) {
25
  } catch (e) { console.error("Failed to record AI usage stats:", e); }
26
  }
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  const checkAIAccess = async (req, res, next) => {
29
  const username = req.headers['x-user-username'];
30
  const role = req.headers['x-user-role'];
@@ -64,29 +325,21 @@ router.get('/stats', checkAIAccess, async (req, res) => {
64
  });
65
 
66
  router.post('/reset-pool', checkAIAccess, (req, res) => {
 
 
67
  res.json({ success: true });
68
  });
69
 
70
- function convertHistoryToOpenAI(history) {
71
- return history.map(msg => ({
72
- role: msg.role === 'model' ? 'assistant' : 'user',
73
- content: msg.parts ? msg.parts.map(p => p.text).join('') : (msg.text || '')
74
- }));
75
- }
76
-
77
- // --- SSE Protocol Helper ---
78
- const sendSSE = (res, data) => {
79
- res.write(`data: ${JSON.stringify(data)}\n\n`);
80
- };
81
-
82
- // --- STANDARD CHAT ROUTE (Context Injection Only) ---
83
  router.post('/chat', checkAIAccess, async (req, res) => {
84
- const { text, audio } = req.body;
 
 
85
  const username = req.headers['x-user-username'];
86
  const userRole = req.headers['x-user-role'];
87
  const schoolId = req.headers['x-school-id'];
88
 
89
- // SSE Setup
90
  res.setHeader('Content-Type', 'text/event-stream');
91
  res.setHeader('Cache-Control', 'no-cache');
92
  res.setHeader('Connection', 'keep-alive');
@@ -96,86 +349,177 @@ router.post('/chat', checkAIAccess, async (req, res) => {
96
  const user = await User.findOne({ username });
97
  if (!user) throw new Error('User not found');
98
 
99
- // 1. Save User Message
100
  const userMsgText = text || (audio ? '(Audio Message)' : '');
101
  if (userMsgText) {
102
  await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
103
  }
104
 
105
- const config = await ConfigModel.findOne({ key: 'main' });
 
 
 
 
106
 
107
- // 2. Build Context (The Heavy Lifting)
108
- const contextPrompt = await buildUserContext(username, userRole, schoolId);
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
- // Setup OpenAI Client
111
- const keys = await getKeyPool('openrouter');
112
- if (keys.length === 0) throw new Error("No API keys available");
113
 
114
- let modelName = 'qwen/qwen3-coder:free';
115
- let apiUrl = 'https://openrouter.ai/api/v1';
116
- if (config?.openRouterModels && config.openRouterModels.length > 0) {
117
- const m = config.openRouterModels[0];
118
- modelName = m.id;
119
- if (m.apiUrl) apiUrl = m.apiUrl;
 
 
 
 
 
120
  }
121
 
122
- console.log(`🤖 [Standard Chat] ${modelName} @ ${apiUrl}`);
123
-
124
- const client = new OpenAI({
125
- baseURL: apiUrl,
126
- apiKey: keys[0],
127
- defaultHeaders: { "HTTP-Referer": "https://smart.com" }
128
- });
129
-
130
- // 3. Build History
131
- const dbHistory = await ChatHistoryModel.find({ userId: user._id }).sort({ timestamp: -1 }).limit(10);
132
- let messages = [
133
- { role: 'system', content: contextPrompt },
134
- ...convertHistoryToOpenAI(dbHistory.reverse())
135
- ];
136
- if (text) messages.push({ role: 'user', content: text });
137
-
138
- // 4. Stream Response
139
- const stream = await client.chat.completions.create({
140
- model: modelName,
141
- messages: messages,
142
- stream: true
143
- });
144
 
145
- let finalResponseText = "";
 
 
 
146
 
147
- for await (const chunk of stream) {
148
- const delta = chunk.choices[0]?.delta?.content;
149
- if (delta) {
150
- finalResponseText += delta;
151
- sendSSE(res, { type: 'text', content: delta });
152
- }
153
- }
154
 
155
- // 5. Save Final Answer
156
- if (finalResponseText) {
157
- await ChatHistoryModel.create({ userId: user._id, role: 'model', text: finalResponseText });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  }
159
- recordUsage('chat-response', 'STANDARD');
160
-
161
- sendSSE(res, { type: 'done' });
162
- res.end();
163
-
164
  } catch (e) {
165
- console.error("[AI Chat Error]", e);
166
- sendSSE(res, { type: 'error', message: e.message });
167
- res.end();
168
  }
169
  });
170
 
171
- // ... (Rest of the file: evaluate route, export)
172
  router.post('/evaluate', checkAIAccess, async (req, res) => {
173
  const { question, audio, image, images } = req.body;
174
  res.setHeader('Content-Type', 'text/event-stream');
175
  res.setHeader('Cache-Control', 'no-cache');
 
176
  res.flushHeaders();
177
- res.write(`data: ${JSON.stringify({ error: true, message: "Use Gemeni provider for multimodel evaluation" })}\n\n`);
178
- res.end();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  });
180
 
181
  module.exports = router;
 
5
  const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
 
8
+ // ... (Key Management, Usage Tracking, Helpers remain same)
9
  // Fetch keys from DB + merge with ENV variables
10
  async function getKeyPool(type) {
11
  const config = await ConfigModel.findOne({ key: 'main' });
 
26
  } catch (e) { console.error("Failed to record AI usage stats:", e); }
27
  }
28
 
29
+ const wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
30
+ async function callAIWithRetry(aiModelCall, retries = 1) {
31
+ for (let i = 0; i < retries; i++) {
32
+ try { return await aiModelCall(); }
33
+ catch (e) {
34
+ if (e.status === 400 || e.status === 401 || e.status === 403) throw e;
35
+ if (i < retries - 1) { await wait(1000 * Math.pow(2, i)); continue; }
36
+ throw e;
37
+ }
38
+ }
39
+ }
40
+
41
+ function convertGeminiToOpenAI(baseParams) {
42
+ const messages = [];
43
+ if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
44
+
45
+ let contents = baseParams.contents;
46
+ if (contents && !Array.isArray(contents)) {
47
+ contents = [contents];
48
+ }
49
+
50
+ if (contents && Array.isArray(contents)) {
51
+ contents.forEach(content => {
52
+ let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
53
+ const messageContent = [];
54
+ if (content.parts) {
55
+ content.parts.forEach(p => {
56
+ if (p.text) messageContent.push({ type: 'text', text: p.text });
57
+ else if (p.inlineData && p.inlineData.mimeType.startsWith('image/')) {
58
+ messageContent.push({ type: 'image_url', image_url: { url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}` } });
59
+ }
60
+ });
61
+ }
62
+ if (messageContent.length > 0) {
63
+ if (messageContent.length === 1 && messageContent[0].type === 'text') {
64
+ messages.push({ role: role, content: messageContent[0].text });
65
+ } else {
66
+ messages.push({ role: role, content: messageContent });
67
+ }
68
+ }
69
+ });
70
+ }
71
+ return messages;
72
+ }
73
+
74
+ const PROVIDERS = { GEMINI: 'GEMINI', OPENROUTER: 'OPENROUTER', GEMMA: 'GEMMA' };
75
+ const DEFAULT_OPENROUTER_MODELS = ['qwen/qwen3-coder:free', 'openai/gpt-oss-120b:free', 'qwen/qwen3-235b-a22b:free', 'tngtech/deepseek-r1t-chimera:free'];
76
+
77
+ // Runtime override logic
78
+ let runtimeProviderOrder = [];
79
+
80
+ function deprioritizeProvider(providerName) {
81
+ if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
82
+ console.log(`[AI System] ⚠️ Deprioritizing ${providerName} due to errors. Moving to end of queue.`);
83
+ runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
84
+ console.log(`[AI System] 🔄 New Priority Order: ${runtimeProviderOrder.join(' -> ')}`);
85
+ }
86
+
87
+ function isQuotaError(e) {
88
+ const msg = (e.message || '').toLowerCase();
89
+ return e.status === 429 || e.status === 503 || msg.includes('quota') || msg.includes('overloaded') || msg.includes('resource_exhausted') || msg.includes('rate limit') || msg.includes('credits');
90
+ }
91
+
92
+ // Streaming Helpers
93
+ async function streamGemini(baseParams, res) {
94
+ const { GoogleGenAI } = await import("@google/genai");
95
+ const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
96
+ const keys = await getKeyPool('gemini');
97
+ if (keys.length === 0) throw new Error("No Gemini API keys");
98
+
99
+ for (const apiKey of keys) {
100
+ const client = new GoogleGenAI({ apiKey });
101
+ for (const modelName of models) {
102
+ try {
103
+ console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
104
+ const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
105
+
106
+ let hasStarted = false;
107
+ let fullText = "";
108
+
109
+ for await (const chunk of result) {
110
+ if (!hasStarted) {
111
+ console.log(`[AI] ✅ Connected to Gemini: ${modelName}`);
112
+ recordUsage(modelName, PROVIDERS.GEMINI);
113
+ hasStarted = true;
114
+ }
115
+ if (chunk.text) {
116
+ fullText += chunk.text;
117
+ res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
118
+ if (res.flush) res.flush();
119
+ }
120
+ }
121
+ return fullText;
122
+ } catch (e) {
123
+ console.warn(`[AI] ⚠️ Gemini ${modelName} Error: ${e.message}`);
124
+ if (isQuotaError(e)) {
125
+ console.log(`[AI] 🔄 Quota exceeded for ${modelName}, trying next...`);
126
+ continue;
127
+ }
128
+ throw e;
129
+ }
130
+ }
131
+ }
132
+ throw new Error("Gemini streaming failed (All keys/models exhausted)");
133
+ }
134
+
135
+ async function streamOpenRouter(baseParams, res) {
136
+ const config = await ConfigModel.findOne({ key: 'main' });
137
+ const models = (config && config.openRouterModels?.length) ? config.openRouterModels.map(m => m.id) : DEFAULT_OPENROUTER_MODELS;
138
+ const messages = convertGeminiToOpenAI(baseParams);
139
+ const keys = await getKeyPool('openrouter');
140
+ if (keys.length === 0) throw new Error("No OpenRouter API keys");
141
+
142
+ if (messages.length === 0) {
143
+ throw new Error("Conversion resulted in empty messages array. Check input format.");
144
+ }
145
+
146
+ for (const apiKey of keys) {
147
+ for (const modelName of models) {
148
+ const modelConfig = config?.openRouterModels?.find(m => m.id === modelName);
149
+ const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
150
+ const providerLabel = modelConfig?.apiUrl ? 'Custom API' : 'OpenRouter';
151
+
152
+ const client = new OpenAI({ baseURL, apiKey, defaultHeaders: { "HTTP-Referer": "https://smart.com", "X-Title": "Smart School" } });
153
+
154
+ // --- DOUBAO OPTIMIZATION (Context Caching) ---
155
+ const extraBody = {};
156
+ if (modelName.toLowerCase().includes('doubao')) {
157
+ console.log(`[AI] 💡 Activating Doubao Prefix Caching for ${modelName}`);
158
+ // Doubao-specific caching parameter
159
+ extraBody.caching = { type: "enabled", prefix: true };
160
+ // Disable thinking to save tokens/time if not needed (optional based on user pref, but here we prioritize speed for chat)
161
+ extraBody.thinking = { type: "disabled" };
162
+ }
163
+ // ---------------------------------------------
164
+
165
+ try {
166
+ console.log(`[AI] 🚀 Attempting ${providerLabel} Model: ${modelName} (URL: ${baseURL})`);
167
+
168
+ const stream = await client.chat.completions.create({
169
+ model: modelName,
170
+ messages,
171
+ stream: true,
172
+ ...extraBody
173
+ });
174
+
175
+ console.log(`[AI] ✅ Connected to ${providerLabel}: ${modelName}`);
176
+ recordUsage(modelName, PROVIDERS.OPENROUTER);
177
+
178
+ let fullText = '';
179
+ for await (const chunk of stream) {
180
+ const text = chunk.choices[0]?.delta?.content || '';
181
+ if (text) {
182
+ fullText += text;
183
+ res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
184
+ if (res.flush) res.flush();
185
+ }
186
+ }
187
+ return fullText;
188
+ } catch (e) {
189
+ console.warn(`[AI] ⚠️ ${providerLabel} ${modelName} Error: ${e.message}`);
190
+ if (isQuotaError(e)) {
191
+ console.log(`[AI] 🔄 Rate limit/Quota for ${modelName}, switching...`);
192
+ break;
193
+ }
194
+ }
195
+ }
196
+ }
197
+ throw new Error("OpenRouter/Custom stream failed (All models exhausted)");
198
+ }
199
+
200
+ async function streamGemma(baseParams, res) {
201
+ const { GoogleGenAI } = await import("@google/genai");
202
+ const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
203
+ const keys = await getKeyPool('gemini');
204
+ if (keys.length === 0) throw new Error("No keys for Gemma");
205
+
206
+ for (const apiKey of keys) {
207
+ const client = new GoogleGenAI({ apiKey });
208
+ for (const modelName of models) {
209
+ try {
210
+ console.log(`[AI] 🚀 Attempting Gemma Model: ${modelName}`);
211
+ const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
212
+
213
+ let hasStarted = false;
214
+ let fullText = "";
215
+ for await (const chunk of result) {
216
+ if (!hasStarted) {
217
+ console.log(`[AI] ✅ Connected to Gemma: ${modelName}`);
218
+ recordUsage(modelName, PROVIDERS.GEMMA);
219
+ hasStarted = true;
220
+ }
221
+ if (chunk.text) {
222
+ fullText += chunk.text;
223
+ res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
224
+ if (res.flush) res.flush();
225
+ }
226
+ }
227
+ return fullText;
228
+ } catch (e) {
229
+ console.warn(`[AI] ⚠️ Gemma ${modelName} Error: ${e.message}`);
230
+ if (isQuotaError(e)) continue;
231
+ }
232
+ }
233
+ }
234
+ throw new Error("Gemma stream failed");
235
+ }
236
+
237
+ async function streamContentWithSmartFallback(baseParams, res) {
238
+ let hasAudio = false;
239
+ const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
240
+
241
+ contentsArray.forEach(c => {
242
+ if (c && c.parts) {
243
+ c.parts.forEach(p => { if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) hasAudio = true; });
244
+ }
245
+ });
246
+
247
+ if (hasAudio) {
248
+ try {
249
+ console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
250
+ return await streamGemini(baseParams, res);
251
+ } catch(e) {
252
+ console.error(`[AI] ❌ Audio Processing Failed: ${e.message}`);
253
+ deprioritizeProvider(PROVIDERS.GEMINI);
254
+ throw new Error('QUOTA_EXCEEDED_AUDIO');
255
+ }
256
+ }
257
+
258
+ const config = await ConfigModel.findOne({ key: 'main' });
259
+ const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
260
+ ? config.aiProviderOrder
261
+ : [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
262
+
263
+ const runtimeSet = new Set(runtimeProviderOrder);
264
+ if (runtimeProviderOrder.length === 0 || runtimeProviderOrder.length !== configuredOrder.length || !configuredOrder.every(p => runtimeSet.has(p))) {
265
+ runtimeProviderOrder = [...configuredOrder];
266
+ }
267
+
268
+ let finalError = null;
269
+ for (const provider of runtimeProviderOrder) {
270
+ try {
271
+ console.log(`[AI] 👉 Trying Provider: ${provider}...`);
272
+ if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
273
+ else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
274
+ else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
275
+ } catch (e) {
276
+ console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
277
+ finalError = e;
278
+ if (isQuotaError(e)) {
279
+ console.log(`[AI] 📉 Quota/Rate Limit detected. Switching provider...`);
280
+ deprioritizeProvider(provider);
281
+ continue;
282
+ }
283
+ continue;
284
+ }
285
+ }
286
+ throw finalError || new Error('All streaming models unavailable.');
287
+ }
288
+
289
  const checkAIAccess = async (req, res, next) => {
290
  const username = req.headers['x-user-username'];
291
  const role = req.headers['x-user-role'];
 
325
  });
326
 
327
  router.post('/reset-pool', checkAIAccess, (req, res) => {
328
+ runtimeProviderOrder = [];
329
+ console.log('[AI] 🔄 Provider priority pool reset.');
330
  res.json({ success: true });
331
  });
332
 
333
+ // --- PERSISTENT CHAT HISTORY HANDLER ---
334
+ // Instead of relying on client-side 'history', we use MongoDB to ensure cross-device memory.
 
 
 
 
 
 
 
 
 
 
 
335
  router.post('/chat', checkAIAccess, async (req, res) => {
336
+ const { text, audio } = req.body; // Ignore req.body.history for prompt generation
337
+
338
+ // Extract headers for context building
339
  const username = req.headers['x-user-username'];
340
  const userRole = req.headers['x-user-role'];
341
  const schoolId = req.headers['x-school-id'];
342
 
 
343
  res.setHeader('Content-Type', 'text/event-stream');
344
  res.setHeader('Cache-Control', 'no-cache');
345
  res.setHeader('Connection', 'keep-alive');
 
349
  const user = await User.findOne({ username });
350
  if (!user) throw new Error('User not found');
351
 
352
+ // 1. SAVE USER MSG TO DB
353
  const userMsgText = text || (audio ? '(Audio Message)' : '');
354
  if (userMsgText) {
355
  await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
356
  }
357
 
358
+ // 2. FETCH HISTORY FROM DB (Long-term Memory)
359
+ // Retrieve last 30 messages for context
360
+ const dbHistory = await ChatHistoryModel.find({ userId: user._id })
361
+ .sort({ timestamp: -1 })
362
+ .limit(30);
363
 
364
+ // Re-order for API (oldest first)
365
+ const historyContext = dbHistory.reverse().map(msg => ({
366
+ role: msg.role === 'user' ? 'user' : 'model',
367
+ parts: [{ text: msg.text }]
368
+ }));
369
+
370
+ // 3. PREPARE REQUEST
371
+ // The last user message is already in DB and retrieved in historyContext.
372
+ // We need to separate "history" from "current message" for some APIs,
373
+ // but Google/OpenAI handle a list of messages fine.
374
+ // However, standard pattern is: History + Current.
375
+ // Since we fetched ALL (including current), we just pass historyContext as contents.
376
+ // NOTE: If audio is present, we must append it specifically as the "current" part
377
+ // because DB only stores text representation for now.
378
 
379
+ const fullContents = [...historyContext];
 
 
380
 
381
+ // If this request has audio, append it as a new part (since DB load only has text placeholder)
382
+ // We replace the last 'user' text message with the audio payload for the AI model
383
+ if (audio) {
384
+ // Remove the text placeholder we just loaded
385
+ if (fullContents.length > 0 && fullContents[fullContents.length - 1].role === 'user') {
386
+ fullContents.pop();
387
+ }
388
+ fullContents.push({
389
+ role: 'user',
390
+ parts: [{ inlineData: { mimeType: 'audio/webm', data: audio } }]
391
+ });
392
  }
393
 
394
+ // --- NEW: Inject Context ---
395
+ const contextPrompt = await buildUserContext(username, userRole, schoolId);
396
+ const baseSystemInstruction = "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。回复支持 Markdown 格式。";
397
+ const combinedSystemInstruction = `${baseSystemInstruction}\n${contextPrompt}`;
398
+ // ---------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
 
400
+ const answerText = await streamContentWithSmartFallback({
401
+ contents: fullContents,
402
+ config: { systemInstruction: combinedSystemInstruction }
403
+ }, res);
404
 
405
+ // 4. SAVE AI RESPONSE TO DB
406
+ if (answerText) {
407
+ await ChatHistoryModel.create({ userId: user._id, role: 'model', text: answerText });
 
 
 
 
408
 
409
+ // Signal that text generation is done and TTS is starting
410
+ res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
411
+ try {
412
+ const { GoogleGenAI } = await import("@google/genai");
413
+ const keys = await getKeyPool('gemini');
414
+ let audioBytes = null;
415
+ for (const apiKey of keys) {
416
+ try {
417
+ const client = new GoogleGenAI({ apiKey });
418
+ const ttsResponse = await client.models.generateContent({
419
+ model: "gemini-2.5-flash-preview-tts",
420
+ contents: [{ parts: [{ text: answerText }] }],
421
+ config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
422
+ });
423
+ audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
424
+ if (audioBytes) break;
425
+ } catch(e) { if (isQuotaError(e)) continue; break; }
426
+ }
427
+ if (audioBytes) res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
428
+ else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
429
+ } catch (ttsError) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
430
  }
431
+ res.write('data: [DONE]\n\n'); res.end();
 
 
 
 
432
  } catch (e) {
433
+ console.error("[AI Chat Route Error]", e);
434
+ res.write(`data: ${JSON.stringify({ error: true, message: e.message })}\n\n`); res.end();
 
435
  }
436
  });
437
 
438
+ // STREAMING ASSESSMENT ENDPOINT
439
  router.post('/evaluate', checkAIAccess, async (req, res) => {
440
  const { question, audio, image, images } = req.body;
441
  res.setHeader('Content-Type', 'text/event-stream');
442
  res.setHeader('Cache-Control', 'no-cache');
443
+ res.setHeader('Connection', 'keep-alive');
444
  res.flushHeaders();
445
+
446
+ try {
447
+ res.write(`data: ${JSON.stringify({ status: 'analyzing' })}\n\n`);
448
+
449
+ const evalParts = [{ text: `请作为一名严谨的老师,对学生的回答进行评分。题目是:${question}。` }];
450
+ if (audio) {
451
+ evalParts.push({ text: "学生的回答在音频中。" });
452
+ evalParts.push({ inlineData: { mimeType: 'audio/webm', data: audio } });
453
+ }
454
+
455
+ // Support multiple images
456
+ if (images && Array.isArray(images) && images.length > 0) {
457
+ evalParts.push({ text: "学生的回答写在以下图片中,请识别所有图片中的文字内容并进行批改:" });
458
+ images.forEach(img => {
459
+ if(img) evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: img } });
460
+ });
461
+ } else if (image) {
462
+ // Legacy single image support
463
+ evalParts.push({ text: "学生的回答写在图片中,请识别图片中的文字内容并进行批改。" });
464
+ evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
465
+ }
466
+
467
+ // Force structured markdown output for streaming parsing
468
+ evalParts.push({ text: `请分析:1. 内容准确性 2. 表达/书写规范。
469
+ 必须严格按照以下格式输出(不要使用Markdown代码块包裹):
470
+
471
+ ## Transcription
472
+ (在此处输出识别到的学生回答内容,如果是图片则为识别的文字)
473
+
474
+ ## Feedback
475
+ (在此处输出简短的鼓励性评语和建议)
476
+
477
+ ## Score
478
+ (在此处仅输出一个0-100的数字)` });
479
+
480
+ // Stream Text
481
+ const fullText = await streamContentWithSmartFallback({
482
+ // CRITICAL FIX: Pass as array of objects for OpenRouter compatibility
483
+ contents: [{ role: 'user', parts: evalParts }],
484
+ // NO JSON MODE to allow progressive text streaming
485
+ }, res);
486
+
487
+ // Extract Feedback for TTS
488
+ const feedbackMatch = fullText.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i);
489
+ const feedbackText = feedbackMatch ? feedbackMatch[1].trim() : "";
490
+
491
+ // Generate TTS if feedback exists
492
+ if (feedbackText) {
493
+ res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
494
+ try {
495
+ const { GoogleGenAI } = await import("@google/genai");
496
+ const keys = await getKeyPool('gemini');
497
+ let feedbackAudio = null;
498
+ for (const apiKey of keys) {
499
+ try {
500
+ const client = new GoogleGenAI({ apiKey });
501
+ const ttsResponse = await client.models.generateContent({
502
+ model: "gemini-2.5-flash-preview-tts",
503
+ contents: [{ parts: [{ text: feedbackText }] }],
504
+ config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
505
+ });
506
+ feedbackAudio = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
507
+ if (feedbackAudio) break;
508
+ } catch(e) { if (isQuotaError(e)) continue; break; }
509
+ }
510
+ if (feedbackAudio) res.write(`data: ${JSON.stringify({ audio: feedbackAudio })}\n\n`);
511
+ else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
512
+ } catch (ttsErr) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
513
+ }
514
+
515
+ res.write('data: [DONE]\n\n');
516
+ res.end();
517
+
518
+ } catch (e) {
519
+ console.error("AI Eval Error:", e);
520
+ res.write(`data: ${JSON.stringify({ error: true, message: e.message || "Evaluation failed" })}\n\n`);
521
+ res.end();
522
+ }
523
  });
524
 
525
  module.exports = router;