dvc890 commited on
Commit
bdfccdc
·
verified ·
1 Parent(s): 67a8b0c

Upload 63 files

Browse files
Files changed (3) hide show
  1. ai-context.js +79 -142
  2. ai-routes.js +72 -416
  3. components/ai/ChatPanel.tsx +20 -66
ai-context.js CHANGED
@@ -24,11 +24,11 @@ async function buildStudentContext(username, schoolId) {
24
 
25
  if (!student) return "无法找到该学生的详细档案。";
26
 
27
- // 1. 获取近期成绩 (最近10条,让AI掌握更多趋势)
28
  const recentScores = await Score.find({
29
  studentNo: student.studentNo,
30
  schoolId
31
- }).sort({ _id: -1 }).limit(10);
32
 
33
  // 2. 获取考勤概况
34
  const attendanceStats = await AttendanceModel.aggregate([
@@ -38,10 +38,6 @@ async function buildStudentContext(username, schoolId) {
38
  const absentCount = attendanceStats.find(a => a._id === 'Absent')?.count || 0;
39
  const leaveCount = attendanceStats.find(a => a._id === 'Leave')?.count || 0;
40
 
41
- // 3. 获取待办事项
42
- const user = await User.findOne({ username, schoolId });
43
- const todos = user ? await TodoModel.find({ userId: user._id, isCompleted: false }).limit(5) : [];
44
-
45
  let prompt = `
46
  ### 当前用户身份:学生 (个人视图)
47
  - **姓名**: ${student.name}
@@ -53,10 +49,7 @@ async function buildStudentContext(username, schoolId) {
53
  `;
54
 
55
  if (recentScores.length > 0) {
56
- prompt += `- **近期成绩历史**: ${recentScores.map(s => `${s.courseName}: ${s.score} (${s.type || '考试'})`).join('; ')}\n`;
57
- // 计算简单平均分
58
- const avg = (recentScores.reduce((acc, s) => acc + s.score, 0) / recentScores.length).toFixed(1);
59
- prompt += `- **近期平均分**: ${avg}\n`;
60
  } else {
61
  prompt += `- **近期成绩**: 暂无记录\n`;
62
  }
@@ -67,134 +60,105 @@ async function buildStudentContext(username, schoolId) {
67
  prompt += `- **考勤状况**: 全勤,表现极佳。\n`;
68
  }
69
 
70
- if (todos.length > 0) {
71
- prompt += `- **未完成待办**: ${todos.map(t => t.content).join('; ')}\n`;
72
- }
73
-
74
  return prompt;
75
  }
76
 
77
  /**
78
- * 构建教师画像上下文 (增强版 - 智能区分班主任与科任视角)
 
79
  */
80
  async function buildTeacherContext(username, schoolId) {
81
  const user = await User.findOne({ username, schoolId });
82
  if (!user) return "无法找到该教师档案。";
83
 
84
- // 1. 确定老师的身份范围
85
- const homeroomClassName = user.homeroomClass; // 班主任班级
86
 
87
- // 查找该老师任教的所有课程 (找出任教的其他班级)
88
- const teachingCourses = await Course.find({
89
- $or: [{ teacherId: user._id }, { teacherName: user.trueName || user.username }],
90
- schoolId
91
  });
92
-
93
- // 构建任教班级 -> 科目列表的映射 (e.g., "三年级(2)班": ["数学", "科学"])
94
- const teachingMap = {};
95
- teachingCourses.forEach(c => {
96
- if (!teachingMap[c.className]) teachingMap[c.className] = new Set();
97
- teachingMap[c.className].add(c.courseName);
 
 
 
 
 
 
 
98
  });
99
 
100
- // 合并所有相关班级 (班主任班级 + 任课班级)
101
- const allClasses = new Set(Object.keys(teachingMap));
102
- if (homeroomClassName) allClasses.add(homeroomClassName);
103
 
104
- if (allClasses.size === 0) {
105
- return `### 当前用户身份:教师\n- **姓名**: ${user.trueName || username}\n- **状态**: 暂未绑定任何班级或课程数据。`;
106
  }
107
 
 
108
  let prompt = `
109
  ### 当前用户身份:教师
110
  - **姓名**: ${user.trueName || username}
111
- - **负责班级**: ${Array.from(allClasses).join(', ')}
 
 
 
112
  `;
113
 
114
- // 2. 遍历所有相关班级,构建详细数据
115
- for (const className of allClasses) {
116
- const isHomeroom = className === homeroomClassName;
117
- const subjectsTaught = teachingMap[className] ? Array.from(teachingMap[className]) : [];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- prompt += `\n#### 🏫 班级: ${className} (${isHomeroom ? '我是班主任' : '我是任课老师'})\n`;
120
- if (!isHomeroom) {
121
- prompt += `(非班主任视角:仅展示我任教的科目 [${subjectsTaught.join(', ')}] 的数据)\n`;
122
  }
123
 
124
- // 2.1 获取该班学生
125
- const students = await Student.find({ className, schoolId });
126
- if (students.length === 0) {
127
- prompt += `- 暂无学生数据\n`;
128
  continue;
129
  }
130
-
131
- const studentNos = students.map(s => s.studentNo);
132
- const studentIds = students.map(s => s._id.toString());
133
-
134
- // 2.2 获取考勤 (全班)
135
- const attendanceRaw = await AttendanceModel.aggregate([
136
- { $match: { studentId: { $in: studentIds }, status: { $in: ['Absent', 'Leave'] } } },
137
- { $group: { _id: "$studentId", absent: { $sum: { $cond: [{ $eq: ["$status", "Absent"] }, 1, 0] } }, leave: { $sum: { $cond: [{ $eq: ["$status", "Leave"] }, 1, 0] } } } }
138
- ]);
139
- const attendanceMap = {};
140
- attendanceRaw.forEach(a => attendanceMap[a._id] = a);
141
-
142
- // 2.3 获取成绩 (按需获取)
143
- // 查询该班级学生的所有成绩
144
- // 为了性能,还是查出来再内存过滤,比多次DB查询快
145
- const allScores = await Score.find({
146
- schoolId,
147
- studentNo: { $in: studentNos }
148
- }).sort({ _id: -1 }); // 最新的在前
149
-
150
- // 构建每个学生的成绩摘要
151
- const studentDetails = students.map(s => {
152
- const att = attendanceMap[s._id.toString()] || { absent: 0, leave: 0 };
153
-
154
- // 筛选该学生的成绩
155
- let myScores = allScores.filter(sc => sc.studentNo === s.studentNo);
156
-
157
- // 【关键逻辑】过滤显示哪些科目
158
- if (!isHomeroom) {
159
- // 如果不是班主任,只保留我教的科目的成绩
160
- myScores = myScores.filter(sc => subjectsTaught.includes(sc.courseName));
161
- }
162
 
163
- // 【聚合逻辑】每个科目只取最近一次成绩 (去重)
164
- const latestSubjectScores = {};
165
- myScores.forEach(sc => {
166
- if (!latestSubjectScores[sc.courseName]) {
167
- latestSubjectScores[sc.courseName] = sc;
168
- }
169
- });
170
-
171
- const finalScores = Object.values(latestSubjectScores);
172
 
173
- // 格式化成绩字符串
174
- let scoreStr = "";
175
- if (finalScores.length > 0) {
176
- scoreStr = finalScores.map(sc => `${sc.courseName}:${sc.score}`).join(', ');
177
- } else {
178
- scoreStr = "无相关成绩";
179
  }
180
 
181
- // 标记异常 (缺勤多 或 有不及格)
182
- const hasIssue = att.absent > 0 || finalScores.some(sc => sc.score < 60);
183
- const flag = hasIssue ? "⚠️" : "";
184
 
185
- return `- ${flag} **${s.name}**: 考勤[缺${att.absent}/假${att.leave}], 小红花:${s.flowerBalance}, 最新成绩:[${scoreStr}]`;
186
  });
187
 
188
- // 将学生列表加入 Prompt (限制长度,如果班级人太多,可能需要截断,但Gemini窗口大,通常没事)
189
- prompt += studentDetails.join('\n') + '\n';
190
-
191
- // 2.4 如果是班主任,额外显示待办
192
- if (isHomeroom) {
193
- const pendingLeaves = await LeaveRequestModel.countDocuments({ className, schoolId, status: 'Pending' });
194
- if (pendingLeaves > 0) {
195
- prompt += `> 🔴 班务提醒: 有 ${pendingLeaves} 条请假申请待审批。\n`;
196
- }
197
- }
198
  }
199
 
200
  return prompt;
@@ -204,36 +168,11 @@ async function buildTeacherContext(username, schoolId) {
204
  * 构建管理员/校长画像上下文
205
  */
206
  async function buildAdminContext(role, schoolId) {
207
- let prompt = `### 当前用户身份:${role === 'PRINCIPAL' ? '校长' : '超级管理员'}\n`;
208
-
209
- if (role === 'PRINCIPAL' && schoolId) {
210
- const school = await School.findById(schoolId);
211
- const totalStudents = await Student.countDocuments({ schoolId });
212
- const totalTeachers = await User.countDocuments({ schoolId, role: 'TEACHER' });
213
-
214
- // 今日缺勤详细名单
215
- const today = new Date().toISOString().split('T')[0];
216
- const absences = await AttendanceModel.find({ schoolId, date: today, status: { $in: ['Absent', 'Leave'] } });
217
- const absentNames = absences.map(a => `${a.studentName}(${a.className})`).join(', ');
218
-
219
- // 全校均分
220
- const recentScores = await Score.find({ schoolId }).sort({_id:-1}).limit(100);
221
- let avgScore = 0;
222
- if (recentScores.length) avgScore = (recentScores.reduce((a,b)=>a+b.score,0)/recentScores.length).toFixed(1);
223
-
224
- prompt += `- **学校**: ${school ? school.name : '未知'}\n`;
225
- prompt += `- **宏观数据**: 教师 ${totalTeachers} 人,学生 ${totalStudents} 人,近期全校抽样平均分 ${avgScore}。\n`;
226
- prompt += `- **今日出勤**: 缺勤/请假 ${absences.length} 人。名单: ${absentNames || '无'}。\n`;
227
- }
228
-
229
- return prompt;
230
  }
231
 
232
  /**
233
- * 主入口:构建用户上下文 Prompt
234
- * @param {string} username - 请求头中的用户名
235
- * @param {string} role - 请求头中的角色
236
- * @param {string} schoolId - 请求头中的学校ID
237
  */
238
  async function buildUserContext(username, role, schoolId) {
239
  try {
@@ -248,25 +187,23 @@ async function buildUserContext(username, role, schoolId) {
248
  roleContext = await buildAdminContext(role, schoolId);
249
  }
250
 
251
- // 组装最终 System Instruction 片段
252
  return `
253
  ---
254
- 【上下文注入信息 (Context Injection) - 绝密】
255
- 当前系统时间: ${dateStr}
256
- 以下是当前用户的核心数据和其管辖范围内的详细档案。
257
  ${roleContext}
258
 
259
  【AI 行为准则】
260
- 1. 你拥有上述所有数据的“上帝视角”。
261
- 2. **班主任视角**: 当用户是班主任时,你通过上下文已知晓全班所有科目的成绩。如果问“王五偏科吗”,请对比他的各科成绩作答。
262
- 3. **任课老师视角**: 当用户非班主任时,你只能看到他所教科目的成绩。如果问“李华其他课怎么样”,请诚实回答“我只能看到您任教科目的数据,无法评价其他科目”。
263
- 4. 回答要具体。不要说“他成绩一般”,要说“他最近数学考了60分,英语考了85分”。
264
- 5. 数据格式说明: [科目:分数] 代表该科目最近一次录入的成绩。
265
  ---
266
  `;
267
  } catch (e) {
268
  console.error("Context build failed:", e);
269
- return ""; // 失败时降级为空,不影响主流程
270
  }
271
  }
272
 
 
24
 
25
  if (!student) return "无法找到该学生的详细档案。";
26
 
27
+ // 1. 获取近期成绩 (最近20条)
28
  const recentScores = await Score.find({
29
  studentNo: student.studentNo,
30
  schoolId
31
+ }).sort({ _id: -1 }).limit(20);
32
 
33
  // 2. 获取考勤概况
34
  const attendanceStats = await AttendanceModel.aggregate([
 
38
  const absentCount = attendanceStats.find(a => a._id === 'Absent')?.count || 0;
39
  const leaveCount = attendanceStats.find(a => a._id === 'Leave')?.count || 0;
40
 
 
 
 
 
41
  let prompt = `
42
  ### 当前用户身份:学生 (个人视图)
43
  - **姓名**: ${student.name}
 
49
  `;
50
 
51
  if (recentScores.length > 0) {
52
+ prompt += `- **成绩历史**: ${recentScores.map(s => `${s.courseName}: ${s.score} (${s.examName||s.type})`).join('; ')}\n`;
 
 
 
53
  } else {
54
  prompt += `- **近期成绩**: 暂无记录\n`;
55
  }
 
60
  prompt += `- **考勤状况**: 全勤,表现极佳。\n`;
61
  }
62
 
 
 
 
 
63
  return prompt;
64
  }
65
 
66
  /**
67
+ * 构建教师画像上下文 (严格权限版)
68
+ * 核心逻辑:只查自己教的班级,只查自己教的课(除非是班主任)
69
  */
70
  async function buildTeacherContext(username, schoolId) {
71
  const user = await User.findOne({ username, schoolId });
72
  if (!user) return "无法找到该教师档案。";
73
 
74
+ const homeroomClass = user.homeroomClass;
 
75
 
76
+ // 1. 查找所有任课信息
77
+ const courses = await Course.find({
78
+ schoolId,
79
+ $or: [{ teacherId: user._id }, { teacherName: user.trueName || user.username }]
80
  });
81
+
82
+ // 2. 构建权限映射
83
+ // authorizedClasses: Set<string> -> 老师有权限查看的班级列表
84
+ const authorizedClasses = new Set();
85
+ // teachingSubjects: Map<className, Set<subjectName>> -> 每个班级教哪些课
86
+ const teachingSubjects = {};
87
+
88
+ if (homeroomClass) authorizedClasses.add(homeroomClass);
89
+
90
+ courses.forEach(c => {
91
+ authorizedClasses.add(c.className);
92
+ if (!teachingSubjects[c.className]) teachingSubjects[c.className] = new Set();
93
+ teachingSubjects[c.className].add(c.courseName);
94
  });
95
 
96
+ const classList = Array.from(authorizedClasses);
 
 
97
 
98
+ if (classList.length === 0) {
99
+ return `### 当前用户身份:教师\n- **姓名**: ${user.trueName || username}\n- **状态**: 暂未绑定任何班级或课程。请告知用户去“班级管理”或“课程安排”绑定。`;
100
  }
101
 
102
+ // 3. 构建详细数据 Prompt
103
  let prompt = `
104
  ### 当前用户身份:教师
105
  - **姓名**: ${user.trueName || username}
106
+ - **管理权限范围**: [${classList.join(', ')}]
107
+ - **注意**: 你 **绝对不能** 回答关于上述班级以外的任何学生数据。如果用户问其他班级(如“四年级6班”),请明确拒绝,并告知用户系统记录显示他只负责上述班级。
108
+
109
+ ### 详细班级数��
110
  `;
111
 
112
+ // 4. 并行获取所有相关班级的学生
113
+ // 移除 limit,获取全量学生
114
+ const allStudents = await Student.find({
115
+ schoolId,
116
+ className: { $in: classList },
117
+ status: 'Enrolled'
118
+ }).sort({ seatNo: 1, studentNo: 1 });
119
+
120
+ // 5. 获取这些学生的所有成绩
121
+ const allStudentNos = allStudents.map(s => s.studentNo);
122
+ const allScores = await Score.find({
123
+ schoolId,
124
+ studentNo: { $in: allStudentNos },
125
+ status: 'Normal'
126
+ });
127
+
128
+ // 6. 按班级组装数据
129
+ for (const clsName of classList) {
130
+ const isHomeroom = clsName === homeroomClass;
131
+ const subjectsTaught = teachingSubjects[clsName] ? Array.from(teachingSubjects[clsName]) : [];
132
 
133
+ prompt += `\n#### 🏫 ${clsName} (${isHomeroom ? '我是班主任' : '我是任课老师'})\n`;
134
+ if (!isHomeroom && subjectsTaught.length > 0) {
135
+ prompt += `(非班主任视角:我只教 ${subjectsTaught.join(', ')},仅展示这些科目的成绩)\n`;
136
  }
137
 
138
+ const classStudents = allStudents.filter(s => s.className === clsName);
139
+
140
+ if (classStudents.length === 0) {
141
+ prompt += `- (暂无学生档案)\n`;
142
  continue;
143
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ // 组装每个学生的信息
146
+ const studentLines = classStudents.map(s => {
147
+ let stuScores = allScores.filter(sc => sc.studentNo === s.studentNo);
 
 
 
 
 
 
148
 
149
+ // 权限过滤:如果不是班主任,过滤掉不教的科目
150
+ if (!isHomeroom) {
151
+ stuScores = stuScores.filter(sc => subjectsTaught.includes(sc.courseName));
 
 
 
152
  }
153
 
154
+ // 格式化成绩: [数学:90, 英语:85(期末)]
155
+ const scoreStrs = stuScores.map(sc => `${sc.courseName}:${sc.score}${sc.examName && sc.examName!=='期末考试' ? '('+sc.examName+')' : ''}`);
156
+ const scoreText = scoreStrs.length > 0 ? `成绩:[${scoreStrs.join(', ')}]` : "无相关成绩";
157
 
158
+ return `- ${s.name}(${s.seatNo || '-'}号): ${scoreText}`;
159
  });
160
 
161
+ prompt += studentLines.join('\n') + '\n';
 
 
 
 
 
 
 
 
 
162
  }
163
 
164
  return prompt;
 
168
  * 构建管理员/校长画像上下文
169
  */
170
  async function buildAdminContext(role, schoolId) {
171
+ return `### 当前用户身份:${role === 'PRINCIPAL' ? '校长' : '超级管理员'}\n你拥有最高权限,但请注意保护隐私。`;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  }
173
 
174
  /**
175
+ * 主入口
 
 
 
176
  */
177
  async function buildUserContext(username, role, schoolId) {
178
  try {
 
187
  roleContext = await buildAdminContext(role, schoolId);
188
  }
189
 
 
190
  return `
191
  ---
192
+ 【系统注入上下文】
193
+ 当前时间: ${dateStr}
194
+ 以下是当前用户权限范围内的 **全量真实数据**。
195
  ${roleContext}
196
 
197
  【AI 行为准则】
198
+ 1. **诚实原则**: 数据里有什么就说什么。如果数据里没有(例如学生列表里没这个名字,或者成绩列表为空),直接说“系统里没有记录”。不要编造。
199
+ 2. **权限原则**: 严格遵守上述“管理权限范围”。用户问其他班级时,礼貌拒绝。
200
+ 3. **数据格式**: 上下文中的数据格式为 "姓名: 成绩:[科目:分数...]"。
201
+ 4. **回答风格**: 像一位专业的教务助理,简洁明了。
 
202
  ---
203
  `;
204
  } catch (e) {
205
  console.error("Context build failed:", e);
206
+ return "";
207
  }
208
  }
209
 
ai-routes.js CHANGED
@@ -5,7 +5,6 @@ const OpenAI = require('openai');
5
  const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
 
8
- // ... (Key Management, Usage Tracking, Helpers remain same)
9
  // Fetch keys from DB + merge with ENV variables
10
  async function getKeyPool(type) {
11
  const config = await ConfigModel.findOne({ key: 'main' });
@@ -26,266 +25,6 @@ async function recordUsage(model, provider) {
26
  } catch (e) { console.error("Failed to record AI usage stats:", e); }
27
  }
28
 
29
- const wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
30
- async function callAIWithRetry(aiModelCall, retries = 1) {
31
- for (let i = 0; i < retries; i++) {
32
- try { return await aiModelCall(); }
33
- catch (e) {
34
- if (e.status === 400 || e.status === 401 || e.status === 403) throw e;
35
- if (i < retries - 1) { await wait(1000 * Math.pow(2, i)); continue; }
36
- throw e;
37
- }
38
- }
39
- }
40
-
41
- function convertGeminiToOpenAI(baseParams) {
42
- const messages = [];
43
- if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
44
-
45
- let contents = baseParams.contents;
46
- if (contents && !Array.isArray(contents)) {
47
- contents = [contents];
48
- }
49
-
50
- if (contents && Array.isArray(contents)) {
51
- contents.forEach(content => {
52
- let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
53
- const messageContent = [];
54
- if (content.parts) {
55
- content.parts.forEach(p => {
56
- if (p.text) messageContent.push({ type: 'text', text: p.text });
57
- else if (p.inlineData && p.inlineData.mimeType.startsWith('image/')) {
58
- messageContent.push({ type: 'image_url', image_url: { url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}` } });
59
- }
60
- });
61
- }
62
- if (messageContent.length > 0) {
63
- if (messageContent.length === 1 && messageContent[0].type === 'text') {
64
- messages.push({ role: role, content: messageContent[0].text });
65
- } else {
66
- messages.push({ role: role, content: messageContent });
67
- }
68
- }
69
- });
70
- }
71
- return messages;
72
- }
73
-
74
- const PROVIDERS = { GEMINI: 'GEMINI', OPENROUTER: 'OPENROUTER', GEMMA: 'GEMMA' };
75
- const DEFAULT_OPENROUTER_MODELS = ['qwen/qwen3-coder:free', 'openai/gpt-oss-120b:free', 'qwen/qwen3-235b-a22b:free', 'tngtech/deepseek-r1t-chimera:free'];
76
-
77
- // Runtime override logic
78
- let runtimeProviderOrder = [];
79
-
80
- function deprioritizeProvider(providerName) {
81
- if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
82
- console.log(`[AI System] ⚠️ Deprioritizing ${providerName} due to errors. Moving to end of queue.`);
83
- runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
84
- console.log(`[AI System] 🔄 New Priority Order: ${runtimeProviderOrder.join(' -> ')}`);
85
- }
86
-
87
- function isQuotaError(e) {
88
- const msg = (e.message || '').toLowerCase();
89
- return e.status === 429 || e.status === 503 || msg.includes('quota') || msg.includes('overloaded') || msg.includes('resource_exhausted') || msg.includes('rate limit') || msg.includes('credits');
90
- }
91
-
92
- // Streaming Helpers
93
- async function streamGemini(baseParams, res) {
94
- const { GoogleGenAI } = await import("@google/genai");
95
- const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
96
- const keys = await getKeyPool('gemini');
97
- if (keys.length === 0) throw new Error("No Gemini API keys");
98
-
99
- for (const apiKey of keys) {
100
- const client = new GoogleGenAI({ apiKey });
101
- for (const modelName of models) {
102
- try {
103
- console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
104
- const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
105
-
106
- let hasStarted = false;
107
- let fullText = "";
108
-
109
- for await (const chunk of result) {
110
- if (!hasStarted) {
111
- console.log(`[AI] ✅ Connected to Gemini: ${modelName}`);
112
- recordUsage(modelName, PROVIDERS.GEMINI);
113
- hasStarted = true;
114
- }
115
- if (chunk.text) {
116
- fullText += chunk.text;
117
- res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
118
- if (res.flush) res.flush();
119
- }
120
- }
121
- return fullText;
122
- } catch (e) {
123
- console.warn(`[AI] ⚠️ Gemini ${modelName} Error: ${e.message}`);
124
- if (isQuotaError(e)) {
125
- console.log(`[AI] 🔄 Quota exceeded for ${modelName}, trying next...`);
126
- continue;
127
- }
128
- throw e;
129
- }
130
- }
131
- }
132
- throw new Error("Gemini streaming failed (All keys/models exhausted)");
133
- }
134
-
135
- async function streamOpenRouter(baseParams, res) {
136
- const config = await ConfigModel.findOne({ key: 'main' });
137
- const models = (config && config.openRouterModels?.length) ? config.openRouterModels.map(m => m.id) : DEFAULT_OPENROUTER_MODELS;
138
- const messages = convertGeminiToOpenAI(baseParams);
139
- const keys = await getKeyPool('openrouter');
140
- if (keys.length === 0) throw new Error("No OpenRouter API keys");
141
-
142
- if (messages.length === 0) {
143
- throw new Error("Conversion resulted in empty messages array. Check input format.");
144
- }
145
-
146
- for (const apiKey of keys) {
147
- for (const modelName of models) {
148
- const modelConfig = config?.openRouterModels?.find(m => m.id === modelName);
149
- const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
150
- const providerLabel = modelConfig?.apiUrl ? 'Custom API' : 'OpenRouter';
151
-
152
- const client = new OpenAI({ baseURL, apiKey, defaultHeaders: { "HTTP-Referer": "https://smart.com", "X-Title": "Smart School" } });
153
-
154
- // --- DOUBAO OPTIMIZATION (Context Caching) ---
155
- const extraBody = {};
156
- if (modelName.toLowerCase().includes('doubao')) {
157
- console.log(`[AI] 💡 Activating Doubao Prefix Caching for ${modelName}`);
158
- // Doubao-specific caching parameter
159
- extraBody.caching = { type: "enabled", prefix: true };
160
- // Disable thinking to save tokens/time if not needed (optional based on user pref, but here we prioritize speed for chat)
161
- extraBody.thinking = { type: "disabled" };
162
- }
163
- // ---------------------------------------------
164
-
165
- try {
166
- console.log(`[AI] 🚀 Attempting ${providerLabel} Model: ${modelName} (URL: ${baseURL})`);
167
-
168
- const stream = await client.chat.completions.create({
169
- model: modelName,
170
- messages,
171
- stream: true,
172
- ...extraBody
173
- });
174
-
175
- console.log(`[AI] ✅ Connected to ${providerLabel}: ${modelName}`);
176
- recordUsage(modelName, PROVIDERS.OPENROUTER);
177
-
178
- let fullText = '';
179
- for await (const chunk of stream) {
180
- const text = chunk.choices[0]?.delta?.content || '';
181
- if (text) {
182
- fullText += text;
183
- res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
184
- if (res.flush) res.flush();
185
- }
186
- }
187
- return fullText;
188
- } catch (e) {
189
- console.warn(`[AI] ⚠️ ${providerLabel} ${modelName} Error: ${e.message}`);
190
- if (isQuotaError(e)) {
191
- console.log(`[AI] 🔄 Rate limit/Quota for ${modelName}, switching...`);
192
- break;
193
- }
194
- }
195
- }
196
- }
197
- throw new Error("OpenRouter/Custom stream failed (All models exhausted)");
198
- }
199
-
200
- async function streamGemma(baseParams, res) {
201
- const { GoogleGenAI } = await import("@google/genai");
202
- const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
203
- const keys = await getKeyPool('gemini');
204
- if (keys.length === 0) throw new Error("No keys for Gemma");
205
-
206
- for (const apiKey of keys) {
207
- const client = new GoogleGenAI({ apiKey });
208
- for (const modelName of models) {
209
- try {
210
- console.log(`[AI] 🚀 Attempting Gemma Model: ${modelName}`);
211
- const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
212
-
213
- let hasStarted = false;
214
- let fullText = "";
215
- for await (const chunk of result) {
216
- if (!hasStarted) {
217
- console.log(`[AI] ✅ Connected to Gemma: ${modelName}`);
218
- recordUsage(modelName, PROVIDERS.GEMMA);
219
- hasStarted = true;
220
- }
221
- if (chunk.text) {
222
- fullText += chunk.text;
223
- res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
224
- if (res.flush) res.flush();
225
- }
226
- }
227
- return fullText;
228
- } catch (e) {
229
- console.warn(`[AI] ⚠️ Gemma ${modelName} Error: ${e.message}`);
230
- if (isQuotaError(e)) continue;
231
- }
232
- }
233
- }
234
- throw new Error("Gemma stream failed");
235
- }
236
-
237
- async function streamContentWithSmartFallback(baseParams, res) {
238
- let hasAudio = false;
239
- const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
240
-
241
- contentsArray.forEach(c => {
242
- if (c && c.parts) {
243
- c.parts.forEach(p => { if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) hasAudio = true; });
244
- }
245
- });
246
-
247
- if (hasAudio) {
248
- try {
249
- console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
250
- return await streamGemini(baseParams, res);
251
- } catch(e) {
252
- console.error(`[AI] ❌ Audio Processing Failed: ${e.message}`);
253
- deprioritizeProvider(PROVIDERS.GEMINI);
254
- throw new Error('QUOTA_EXCEEDED_AUDIO');
255
- }
256
- }
257
-
258
- const config = await ConfigModel.findOne({ key: 'main' });
259
- const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
260
- ? config.aiProviderOrder
261
- : [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
262
-
263
- const runtimeSet = new Set(runtimeProviderOrder);
264
- if (runtimeProviderOrder.length === 0 || runtimeProviderOrder.length !== configuredOrder.length || !configuredOrder.every(p => runtimeSet.has(p))) {
265
- runtimeProviderOrder = [...configuredOrder];
266
- }
267
-
268
- let finalError = null;
269
- for (const provider of runtimeProviderOrder) {
270
- try {
271
- console.log(`[AI] 👉 Trying Provider: ${provider}...`);
272
- if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
273
- else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
274
- else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
275
- } catch (e) {
276
- console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
277
- finalError = e;
278
- if (isQuotaError(e)) {
279
- console.log(`[AI] 📉 Quota/Rate Limit detected. Switching provider...`);
280
- deprioritizeProvider(provider);
281
- continue;
282
- }
283
- continue;
284
- }
285
- }
286
- throw finalError || new Error('All streaming models unavailable.');
287
- }
288
-
289
  const checkAIAccess = async (req, res, next) => {
290
  const username = req.headers['x-user-username'];
291
  const role = req.headers['x-user-role'];
@@ -325,21 +64,29 @@ router.get('/stats', checkAIAccess, async (req, res) => {
325
  });
326
 
327
  router.post('/reset-pool', checkAIAccess, (req, res) => {
328
- runtimeProviderOrder = [];
329
- console.log('[AI] 🔄 Provider priority pool reset.');
330
  res.json({ success: true });
331
  });
332
 
333
- // --- PERSISTENT CHAT HISTORY HANDLER ---
334
- // Instead of relying on client-side 'history', we use MongoDB to ensure cross-device memory.
 
 
 
 
 
 
 
 
 
 
 
335
  router.post('/chat', checkAIAccess, async (req, res) => {
336
- const { text, audio } = req.body; // Ignore req.body.history for prompt generation
337
-
338
- // Extract headers for context building
339
  const username = req.headers['x-user-username'];
340
  const userRole = req.headers['x-user-role'];
341
  const schoolId = req.headers['x-school-id'];
342
 
 
343
  res.setHeader('Content-Type', 'text/event-stream');
344
  res.setHeader('Cache-Control', 'no-cache');
345
  res.setHeader('Connection', 'keep-alive');
@@ -349,177 +96,86 @@ router.post('/chat', checkAIAccess, async (req, res) => {
349
  const user = await User.findOne({ username });
350
  if (!user) throw new Error('User not found');
351
 
352
- // 1. SAVE USER MSG TO DB
353
  const userMsgText = text || (audio ? '(Audio Message)' : '');
354
  if (userMsgText) {
355
  await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
356
  }
357
 
358
- // 2. FETCH HISTORY FROM DB (Long-term Memory)
359
- // Retrieve last 30 messages for context
360
- const dbHistory = await ChatHistoryModel.find({ userId: user._id })
361
- .sort({ timestamp: -1 })
362
- .limit(30);
363
 
364
- // Re-order for API (oldest first)
365
- const historyContext = dbHistory.reverse().map(msg => ({
366
- role: msg.role === 'user' ? 'user' : 'model',
367
- parts: [{ text: msg.text }]
368
- }));
369
-
370
- // 3. PREPARE REQUEST
371
- // The last user message is already in DB and retrieved in historyContext.
372
- // We need to separate "history" from "current message" for some APIs,
373
- // but Google/OpenAI handle a list of messages fine.
374
- // However, standard pattern is: History + Current.
375
- // Since we fetched ALL (including current), we just pass historyContext as contents.
376
- // NOTE: If audio is present, we must append it specifically as the "current" part
377
- // because DB only stores text representation for now.
378
 
379
- const fullContents = [...historyContext];
 
 
380
 
381
- // If this request has audio, append it as a new part (since DB load only has text placeholder)
382
- // We replace the last 'user' text message with the audio payload for the AI model
383
- if (audio) {
384
- // Remove the text placeholder we just loaded
385
- if (fullContents.length > 0 && fullContents[fullContents.length - 1].role === 'user') {
386
- fullContents.pop();
387
- }
388
- fullContents.push({
389
- role: 'user',
390
- parts: [{ inlineData: { mimeType: 'audio/webm', data: audio } }]
391
- });
392
  }
393
 
394
- // --- NEW: Inject Context ---
395
- const contextPrompt = await buildUserContext(username, userRole, schoolId);
396
- const baseSystemInstruction = "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。回复支持 Markdown 格式。";
397
- const combinedSystemInstruction = `${baseSystemInstruction}\n${contextPrompt}`;
398
- // ---------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399
 
400
- const answerText = await streamContentWithSmartFallback({
401
- contents: fullContents,
402
- config: { systemInstruction: combinedSystemInstruction }
403
- }, res);
404
 
405
- // 4. SAVE AI RESPONSE TO DB
406
- if (answerText) {
407
- await ChatHistoryModel.create({ userId: user._id, role: 'model', text: answerText });
 
 
 
 
408
 
409
- // Signal that text generation is done and TTS is starting
410
- res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
411
- try {
412
- const { GoogleGenAI } = await import("@google/genai");
413
- const keys = await getKeyPool('gemini');
414
- let audioBytes = null;
415
- for (const apiKey of keys) {
416
- try {
417
- const client = new GoogleGenAI({ apiKey });
418
- const ttsResponse = await client.models.generateContent({
419
- model: "gemini-2.5-flash-preview-tts",
420
- contents: [{ parts: [{ text: answerText }] }],
421
- config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
422
- });
423
- audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
424
- if (audioBytes) break;
425
- } catch(e) { if (isQuotaError(e)) continue; break; }
426
- }
427
- if (audioBytes) res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
428
- else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
429
- } catch (ttsError) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
430
  }
431
- res.write('data: [DONE]\n\n'); res.end();
 
 
 
 
432
  } catch (e) {
433
- console.error("[AI Chat Route Error]", e);
434
- res.write(`data: ${JSON.stringify({ error: true, message: e.message })}\n\n`); res.end();
 
435
  }
436
  });
437
 
438
- // STREAMING ASSESSMENT ENDPOINT
439
  router.post('/evaluate', checkAIAccess, async (req, res) => {
440
  const { question, audio, image, images } = req.body;
441
  res.setHeader('Content-Type', 'text/event-stream');
442
  res.setHeader('Cache-Control', 'no-cache');
443
- res.setHeader('Connection', 'keep-alive');
444
  res.flushHeaders();
445
-
446
- try {
447
- res.write(`data: ${JSON.stringify({ status: 'analyzing' })}\n\n`);
448
-
449
- const evalParts = [{ text: `请作为一名严谨的老师,对学生的回答进行评分。题目是:${question}。` }];
450
- if (audio) {
451
- evalParts.push({ text: "学生的回答在音频中。" });
452
- evalParts.push({ inlineData: { mimeType: 'audio/webm', data: audio } });
453
- }
454
-
455
- // Support multiple images
456
- if (images && Array.isArray(images) && images.length > 0) {
457
- evalParts.push({ text: "学生的回答写在以下图片中,请识别所有图片中的文字内容并进行批改:" });
458
- images.forEach(img => {
459
- if(img) evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: img } });
460
- });
461
- } else if (image) {
462
- // Legacy single image support
463
- evalParts.push({ text: "学生的回答写在图片中,请识别图片中的文字内容并进行批改。" });
464
- evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
465
- }
466
-
467
- // Force structured markdown output for streaming parsing
468
- evalParts.push({ text: `请分析:1. 内容准确性 2. 表达/书写规范。
469
- 必须严格按照以下格式输出(不要使用Markdown代码块包裹):
470
-
471
- ## Transcription
472
- (在此处输出识别到的学生回答内容,如果是图片则为识别的文字)
473
-
474
- ## Feedback
475
- (在此处输出简短的鼓励性评语和建议)
476
-
477
- ## Score
478
- (在此处仅输出一个0-100的数字)` });
479
-
480
- // Stream Text
481
- const fullText = await streamContentWithSmartFallback({
482
- // CRITICAL FIX: Pass as array of objects for OpenRouter compatibility
483
- contents: [{ role: 'user', parts: evalParts }],
484
- // NO JSON MODE to allow progressive text streaming
485
- }, res);
486
-
487
- // Extract Feedback for TTS
488
- const feedbackMatch = fullText.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i);
489
- const feedbackText = feedbackMatch ? feedbackMatch[1].trim() : "";
490
-
491
- // Generate TTS if feedback exists
492
- if (feedbackText) {
493
- res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
494
- try {
495
- const { GoogleGenAI } = await import("@google/genai");
496
- const keys = await getKeyPool('gemini');
497
- let feedbackAudio = null;
498
- for (const apiKey of keys) {
499
- try {
500
- const client = new GoogleGenAI({ apiKey });
501
- const ttsResponse = await client.models.generateContent({
502
- model: "gemini-2.5-flash-preview-tts",
503
- contents: [{ parts: [{ text: feedbackText }] }],
504
- config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
505
- });
506
- feedbackAudio = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
507
- if (feedbackAudio) break;
508
- } catch(e) { if (isQuotaError(e)) continue; break; }
509
- }
510
- if (feedbackAudio) res.write(`data: ${JSON.stringify({ audio: feedbackAudio })}\n\n`);
511
- else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
512
- } catch (ttsErr) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
513
- }
514
-
515
- res.write('data: [DONE]\n\n');
516
- res.end();
517
-
518
- } catch (e) {
519
- console.error("AI Eval Error:", e);
520
- res.write(`data: ${JSON.stringify({ error: true, message: e.message || "Evaluation failed" })}\n\n`);
521
- res.end();
522
- }
523
  });
524
 
525
  module.exports = router;
 
5
  const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
 
 
8
  // Fetch keys from DB + merge with ENV variables
9
  async function getKeyPool(type) {
10
  const config = await ConfigModel.findOne({ key: 'main' });
 
25
  } catch (e) { console.error("Failed to record AI usage stats:", e); }
26
  }
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  const checkAIAccess = async (req, res, next) => {
29
  const username = req.headers['x-user-username'];
30
  const role = req.headers['x-user-role'];
 
64
  });
65
 
66
  router.post('/reset-pool', checkAIAccess, (req, res) => {
 
 
67
  res.json({ success: true });
68
  });
69
 
70
+ function convertHistoryToOpenAI(history) {
71
+ return history.map(msg => ({
72
+ role: msg.role === 'model' ? 'assistant' : 'user',
73
+ content: msg.parts ? msg.parts.map(p => p.text).join('') : (msg.text || '')
74
+ }));
75
+ }
76
+
77
+ // --- SSE Protocol Helper ---
78
+ const sendSSE = (res, data) => {
79
+ res.write(`data: ${JSON.stringify(data)}\n\n`);
80
+ };
81
+
82
+ // --- STANDARD CHAT ROUTE (Context Injection Only) ---
83
  router.post('/chat', checkAIAccess, async (req, res) => {
84
+ const { text, audio } = req.body;
 
 
85
  const username = req.headers['x-user-username'];
86
  const userRole = req.headers['x-user-role'];
87
  const schoolId = req.headers['x-school-id'];
88
 
89
+ // SSE Setup
90
  res.setHeader('Content-Type', 'text/event-stream');
91
  res.setHeader('Cache-Control', 'no-cache');
92
  res.setHeader('Connection', 'keep-alive');
 
96
  const user = await User.findOne({ username });
97
  if (!user) throw new Error('User not found');
98
 
99
+ // 1. Save User Message
100
  const userMsgText = text || (audio ? '(Audio Message)' : '');
101
  if (userMsgText) {
102
  await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
103
  }
104
 
105
+ const config = await ConfigModel.findOne({ key: 'main' });
 
 
 
 
106
 
107
+ // 2. Build Context (The Heavy Lifting)
108
+ const contextPrompt = await buildUserContext(username, userRole, schoolId);
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ // Setup OpenAI Client
111
+ const keys = await getKeyPool('openrouter');
112
+ if (keys.length === 0) throw new Error("No API keys available");
113
 
114
+ let modelName = 'qwen/qwen3-coder:free';
115
+ let apiUrl = 'https://openrouter.ai/api/v1';
116
+ if (config?.openRouterModels && config.openRouterModels.length > 0) {
117
+ const m = config.openRouterModels[0];
118
+ modelName = m.id;
119
+ if (m.apiUrl) apiUrl = m.apiUrl;
 
 
 
 
 
120
  }
121
 
122
+ console.log(`🤖 [Standard Chat] ${modelName} @ ${apiUrl}`);
123
+
124
+ const client = new OpenAI({
125
+ baseURL: apiUrl,
126
+ apiKey: keys[0],
127
+ defaultHeaders: { "HTTP-Referer": "https://smart.com" }
128
+ });
129
+
130
+ // 3. Build History
131
+ const dbHistory = await ChatHistoryModel.find({ userId: user._id }).sort({ timestamp: -1 }).limit(10);
132
+ let messages = [
133
+ { role: 'system', content: contextPrompt },
134
+ ...convertHistoryToOpenAI(dbHistory.reverse())
135
+ ];
136
+ if (text) messages.push({ role: 'user', content: text });
137
+
138
+ // 4. Stream Response
139
+ const stream = await client.chat.completions.create({
140
+ model: modelName,
141
+ messages: messages,
142
+ stream: true
143
+ });
144
 
145
+ let finalResponseText = "";
 
 
 
146
 
147
+ for await (const chunk of stream) {
148
+ const delta = chunk.choices[0]?.delta?.content;
149
+ if (delta) {
150
+ finalResponseText += delta;
151
+ sendSSE(res, { type: 'text', content: delta });
152
+ }
153
+ }
154
 
155
+ // 5. Save Final Answer
156
+ if (finalResponseText) {
157
+ await ChatHistoryModel.create({ userId: user._id, role: 'model', text: finalResponseText });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  }
159
+ recordUsage('chat-response', 'STANDARD');
160
+
161
+ sendSSE(res, { type: 'done' });
162
+ res.end();
163
+
164
  } catch (e) {
165
+ console.error("[AI Chat Error]", e);
166
+ sendSSE(res, { type: 'error', message: e.message });
167
+ res.end();
168
  }
169
  });
170
 
171
+ // ... (Rest of the file: evaluate route, export)
172
  router.post('/evaluate', checkAIAccess, async (req, res) => {
173
  const { question, audio, image, images } = req.body;
174
  res.setHeader('Content-Type', 'text/event-stream');
175
  res.setHeader('Cache-Control', 'no-cache');
 
176
  res.flushHeaders();
177
+ res.write(`data: ${JSON.stringify({ error: true, message: "Use Gemeni provider for multimodel evaluation" })}\n\n`);
178
+ res.end();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  });
180
 
181
  module.exports = router;
components/ai/ChatPanel.tsx CHANGED
@@ -34,9 +34,8 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
34
  const [inputMode, setInputMode] = useState<'text' | 'audio'>('text');
35
  const [isChatProcessing, setIsChatProcessing] = useState(false);
36
  const [isChatRecording, setIsChatRecording] = useState(false);
37
- const [generatingAudioId, setGeneratingAudioId] = useState<string | null>(null);
38
  const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
39
-
40
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
41
  const audioChunksRef = useRef<Blob[]>([]);
42
  const audioContextRef = useRef<AudioContext | null>(null);
@@ -68,8 +67,8 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
68
 
69
  // Scroll to bottom
70
  useEffect(() => {
71
- messagesEndRef.current?.scrollIntoView({ behavior: isChatProcessing ? 'auto' : 'smooth', block: 'end' });
72
- }, [messages, isChatProcessing, generatingAudioId]);
73
 
74
  const stopPlayback = () => {
75
  if (currentSourceRef.current) {
@@ -92,30 +91,6 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
92
  window.speechSynthesis.speak(utterance);
93
  };
94
 
95
- const playPCMAudio = async (base64Audio: string) => {
96
- stopPlayback();
97
- try {
98
- if (!audioContextRef.current) {
99
- // @ts-ignore
100
- const AudioCtor = window.AudioContext || window.webkitAudioContext;
101
- audioContextRef.current = new AudioCtor();
102
- }
103
- if (audioContextRef.current?.state === 'suspended') {
104
- await audioContextRef.current.resume();
105
- }
106
- const bytes = base64ToUint8Array(base64Audio);
107
- const audioBuffer = decodePCM(bytes, audioContextRef.current!);
108
- const source = audioContextRef.current!.createBufferSource();
109
- source.buffer = audioBuffer;
110
- source.connect(audioContextRef.current!.destination);
111
- source.start(0);
112
- currentSourceRef.current = source;
113
- } catch (e) {
114
- console.error("Audio playback error", e);
115
- setToast({ show: true, message: '语音播放失败', type: 'error' });
116
- }
117
- };
118
-
119
  const startRecording = async () => {
120
  try {
121
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
@@ -153,7 +128,6 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
153
  const handleChatSubmit = async (text?: string, audioBase64?: string) => {
154
  if (!text && !audioBase64) return;
155
  stopPlayback();
156
- setGeneratingAudioId(null);
157
 
158
  const historyPayload = messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text }));
159
 
@@ -164,12 +138,13 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
164
  isAudioMessage: !!audioBase64,
165
  timestamp: Date.now()
166
  };
 
167
  const newAiMsgId = (Date.now() + 1).toString();
168
  const newAiMsg: AIChatMessage = {
169
  id: newAiMsgId,
170
  role: 'model',
171
  text: '',
172
- timestamp: Date.now()
173
  };
174
 
175
  setMessages(prev => [...prev, newUserMsg, newAiMsg]);
@@ -199,6 +174,7 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
199
  while (true) {
200
  const { done, value } = await reader.read();
201
  if (done) break;
 
202
  buffer += decoder.decode(value, { stream: true });
203
  const parts = buffer.split('\n\n');
204
  buffer = parts.pop() || '';
@@ -206,41 +182,26 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
206
  for (const line of parts) {
207
  if (line.startsWith('data: ')) {
208
  const jsonStr = line.replace('data: ', '').trim();
209
- if (jsonStr === '[DONE]') break;
210
  try {
211
  const data = JSON.parse(jsonStr);
212
-
213
- if (data.status === 'tts') {
214
- setGeneratingAudioId(newAiMsgId);
215
- }
216
-
217
- if (data.text) {
218
- aiTextAccumulated += data.text;
219
  setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: aiTextAccumulated } : m));
 
 
 
220
  }
221
- if (data.audio) {
222
- setGeneratingAudioId(null);
223
- setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, audio: data.audio } : m));
224
- playPCMAudio(data.audio);
225
- }
226
- if (data.ttsSkipped) {
227
- setGeneratingAudioId(null);
228
- setToast({ show: true, message: 'AI 语音额度已用尽,已切换至本地语音播报', type: 'error' });
229
- speakWithBrowser(aiTextAccumulated);
230
- }
231
- if (data.error) {
232
- setGeneratingAudioId(null);
233
- setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message || '未知错误'}` } : m));
234
  }
235
  } catch (e) {}
236
  }
237
  }
238
  }
239
  } catch (error: any) {
240
- setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '抱歉,连接断开或发生错误,请重试。' } : m));
241
  } finally {
242
  setIsChatProcessing(false);
243
- setGeneratingAudioId(null);
244
  }
245
  };
246
 
@@ -269,19 +230,12 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
269
  <div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 ${msg.role === 'model' ? 'bg-blue-100 text-blue-600' : 'bg-gray-200 text-gray-600'}`}>
270
  {msg.role === 'model' ? <Sparkles size={20}/> : <Bot size={20}/>}
271
  </div>
272
- <div className={`max-w-[80%] p-3 rounded-2xl text-sm overflow-hidden ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none shadow-sm'}`}>
273
- <div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
274
- {msg.role === 'model' && !msg.text && isChatProcessing && <div className="flex items-center gap-2 text-gray-400 py-1"><Loader2 className="animate-spin" size={14}/><span className="text-xs">思考中...</span></div>}
275
-
276
- {/* Audio Generating Indicator */}
277
- {msg.id === generatingAudioId && (
278
- <div className="flex items-center gap-2 text-purple-600 py-2 animate-pulse mt-1 border-t border-purple-100 pt-2">
279
- <Loader2 className="animate-spin" size={14}/>
280
- <span className="text-xs font-bold">正在生成语音回复...</span>
281
- </div>
282
- )}
283
-
284
- {msg.audio ? (<button onClick={() => playPCMAudio(msg.audio!)} className="mt-2 flex items-center gap-2 text-xs bg-blue-50 text-blue-600 px-3 py-1.5 rounded-full hover:bg-blue-100 border border-blue-100 transition-colors w-fit"><Volume2 size={14}/> 播放语音 (AI)</button>) : (msg.role === 'model' && msg.text && !isChatProcessing && !generatingAudioId) && (<button onClick={() => speakWithBrowser(msg.text!)} className="mt-2 flex items-center gap-2 text-xs bg-gray-50 text-gray-600 px-3 py-1.5 rounded-full hover:bg-gray-100 border border-gray-200 transition-colors w-fit"><Volume2 size={14}/> 朗读 (本地)</button>)}
285
  </div>
286
  </div>
287
  ))}
 
34
  const [inputMode, setInputMode] = useState<'text' | 'audio'>('text');
35
  const [isChatProcessing, setIsChatProcessing] = useState(false);
36
  const [isChatRecording, setIsChatRecording] = useState(false);
 
37
  const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
38
+
39
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
40
  const audioChunksRef = useRef<Blob[]>([]);
41
  const audioContextRef = useRef<AudioContext | null>(null);
 
67
 
68
  // Scroll to bottom
69
  useEffect(() => {
70
+ messagesEndRef.current?.scrollIntoView({ behavior: 'smooth', block: 'end' });
71
+ }, [messages, isChatProcessing]);
72
 
73
  const stopPlayback = () => {
74
  if (currentSourceRef.current) {
 
91
  window.speechSynthesis.speak(utterance);
92
  };
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  const startRecording = async () => {
95
  try {
96
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
 
128
  const handleChatSubmit = async (text?: string, audioBase64?: string) => {
129
  if (!text && !audioBase64) return;
130
  stopPlayback();
 
131
 
132
  const historyPayload = messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text }));
133
 
 
138
  isAudioMessage: !!audioBase64,
139
  timestamp: Date.now()
140
  };
141
+
142
  const newAiMsgId = (Date.now() + 1).toString();
143
  const newAiMsg: AIChatMessage = {
144
  id: newAiMsgId,
145
  role: 'model',
146
  text: '',
147
+ timestamp: Date.now()
148
  };
149
 
150
  setMessages(prev => [...prev, newUserMsg, newAiMsg]);
 
174
  while (true) {
175
  const { done, value } = await reader.read();
176
  if (done) break;
177
+
178
  buffer += decoder.decode(value, { stream: true });
179
  const parts = buffer.split('\n\n');
180
  buffer = parts.pop() || '';
 
182
  for (const line of parts) {
183
  if (line.startsWith('data: ')) {
184
  const jsonStr = line.replace('data: ', '').trim();
 
185
  try {
186
  const data = JSON.parse(jsonStr);
187
+ if (data.type === 'text') {
188
+ aiTextAccumulated += data.content;
 
 
 
 
 
189
  setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: aiTextAccumulated } : m));
190
+ }
191
+ else if (data.type === 'error') {
192
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message}` } : m));
193
  }
194
+ else if (data.type === 'done') {
195
+ break;
 
 
 
 
 
 
 
 
 
 
 
196
  }
197
  } catch (e) {}
198
  }
199
  }
200
  }
201
  } catch (error: any) {
202
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '抱歉,连接断开或发生错误。' } : m));
203
  } finally {
204
  setIsChatProcessing(false);
 
205
  }
206
  };
207
 
 
230
  <div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 ${msg.role === 'model' ? 'bg-blue-100 text-blue-600' : 'bg-gray-200 text-gray-600'}`}>
231
  {msg.role === 'model' ? <Sparkles size={20}/> : <Bot size={20}/>}
232
  </div>
233
+ <div className={`max-w-[85%] flex flex-col items-start ${msg.role === 'user' ? 'items-end' : ''}`}>
234
+ <div className={`p-3 rounded-2xl text-sm overflow-hidden shadow-sm ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none'}`}>
235
+ <div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
236
+ {msg.role === 'model' && !msg.text && isChatProcessing && <div className="flex items-center gap-2 text-gray-400 py-1"><Loader2 className="animate-spin" size={14}/><span className="text-xs">思考中...</span></div>}
237
+ {(msg.role === 'model' && msg.text && !isChatProcessing) && (<button onClick={() => speakWithBrowser(msg.text!)} className="mt-2 flex items-center gap-2 text-xs bg-gray-50 text-gray-600 px-3 py-1.5 rounded-full hover:bg-gray-100 border border-gray-200 transition-colors w-fit"><Volume2 size={14}/> 朗读</button>)}
238
+ </div>
 
 
 
 
 
 
 
239
  </div>
240
  </div>
241
  ))}