dvc890 commited on
Commit
67a8b0c
·
verified ·
1 Parent(s): 9d69f4f

Upload 63 files

Browse files
Files changed (4) hide show
  1. ai-context.js +242 -30
  2. ai-routes.js +414 -150
  3. components/ai/ChatPanel.tsx +60 -64
  4. types.ts +0 -1
ai-context.js CHANGED
@@ -1,5 +1,8 @@
1
 
2
- const { User, Student, School } = require('./models');
 
 
 
3
 
4
  /**
5
  * 格式化当前日期
@@ -11,50 +14,259 @@ const getCurrentDateInfo = () => {
11
  };
12
 
13
  /**
14
- * 构建用户上下文 - Agentic版 (精简)
15
- * 既然 AI 现在有了 query_database 工具,我们不需要把所有数据都塞进 System Prompt。
16
- * 我们只需要告诉它:“你是谁”,“用户是谁”,以及“你有查库的能力”。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  */
18
  async function buildUserContext(username, role, schoolId) {
19
  try {
20
  const dateStr = getCurrentDateInfo();
21
- let userProfile = "";
22
 
23
- // 基础用户信息
24
  if (role === 'STUDENT') {
25
- const student = await Student.findOne({
26
- $or: [{ studentNo: username }, { name: username }],
27
- schoolId
28
- });
29
- if (student) {
30
- userProfile = `用户是学生:${student.name} (班级: ${student.className}, 学号: ${student.studentNo})`;
31
- }
32
  } else if (role === 'TEACHER') {
33
- const user = await User.findOne({ username, schoolId });
34
- if (user) {
35
- userProfile = `用户是教师:${user.trueName || username} (任教: ${user.teachingSubject || '无'}, 班主任: ${user.homeroomClass || '否'})`;
36
- }
37
- } else {
38
- userProfile = `用户是管理员/校长。`;
39
  }
40
 
 
41
  return `
42
  ---
43
- 系统信息】
44
- 当前时间: ${dateStr}
45
- ${userProfile}
46
-
47
- 【能力说明】
48
- 1. 你是一个拥有“本地数据库查询权限”的智能助教。
49
- 2. 工具执行环境:**工具是在用户的本地服务器上执行的**。可以通过 Function Calling 获取内网数据,**不需要**公网访问权限
50
- 3. 如果用户问“张三考了多少分或“我们班谁考勤不好”,请**务必**大胆调用 \`query_database\` 工具
51
- 4. 不要回答“我无法访问数据库”因为工具会帮你完成访问并将结果传回给你
52
- 5. 如果查询结果返 JSON 请将其整理为通俗易懂的自然言回答用户
 
53
  ---
54
  `;
55
  } catch (e) {
56
  console.error("Context build failed:", e);
57
- return "";
58
  }
59
  }
60
 
 
1
 
2
+ const {
3
+ User, Student, Score, AttendanceModel, ClassModel,
4
+ LeaveRequestModel, TodoModel, School, Course
5
+ } = require('./models');
6
 
7
  /**
8
  * 格式化当前日期
 
14
  };
15
 
16
  /**
17
+ * 构建学生画像上下文 (学生视角)
18
+ */
19
+ async function buildStudentContext(username, schoolId) {
20
+ const student = await Student.findOne({
21
+ $or: [{ studentNo: username }, { name: username }],
22
+ schoolId
23
+ });
24
+
25
+ if (!student) return "无法找到该学生的详细档案。";
26
+
27
+ // 1. 获取近期成绩 (最近10条,让AI掌握更多趋势)
28
+ const recentScores = await Score.find({
29
+ studentNo: student.studentNo,
30
+ schoolId
31
+ }).sort({ _id: -1 }).limit(10);
32
+
33
+ // 2. 获取考勤概况
34
+ const attendanceStats = await AttendanceModel.aggregate([
35
+ { $match: { studentId: student._id.toString() } },
36
+ { $group: { _id: "$status", count: { $sum: 1 } } }
37
+ ]);
38
+ const absentCount = attendanceStats.find(a => a._id === 'Absent')?.count || 0;
39
+ const leaveCount = attendanceStats.find(a => a._id === 'Leave')?.count || 0;
40
+
41
+ // 3. 获取待办事项
42
+ const user = await User.findOne({ username, schoolId });
43
+ const todos = user ? await TodoModel.find({ userId: user._id, isCompleted: false }).limit(5) : [];
44
+
45
+ let prompt = `
46
+ ### 当前用户身份:学生 (个人视图)
47
+ - **姓名**: ${student.name}
48
+ - **班级**: ${student.className}
49
+ - **学号**: ${student.studentNo}
50
+ - **积分(小红花)**: ${student.flowerBalance} 🌺
51
+
52
+ ### 个人学习数据
53
+ `;
54
+
55
+ if (recentScores.length > 0) {
56
+ prompt += `- **近期成绩历史**: ${recentScores.map(s => `${s.courseName}: ${s.score} (${s.type || '考试'})`).join('; ')}\n`;
57
+ // 计算简单平均分
58
+ const avg = (recentScores.reduce((acc, s) => acc + s.score, 0) / recentScores.length).toFixed(1);
59
+ prompt += `- **近期平均分**: ${avg}\n`;
60
+ } else {
61
+ prompt += `- **近期成绩**: 暂无记录\n`;
62
+ }
63
+
64
+ if (absentCount > 0 || leaveCount > 0) {
65
+ prompt += `- **考勤异常**: 本学期缺勤 ${absentCount} 次,请假 ${leaveCount} 次。\n`;
66
+ } else {
67
+ prompt += `- **考勤状况**: 全勤,表现极佳。\n`;
68
+ }
69
+
70
+ if (todos.length > 0) {
71
+ prompt += `- **未完成待办**: ${todos.map(t => t.content).join('; ')}\n`;
72
+ }
73
+
74
+ return prompt;
75
+ }
76
+
77
+ /**
78
+ * 构建教师画像上下文 (增强版 - 智能区分班主任与科任视角)
79
+ */
80
+ async function buildTeacherContext(username, schoolId) {
81
+ const user = await User.findOne({ username, schoolId });
82
+ if (!user) return "无法找到该教师档案。";
83
+
84
+ // 1. 确定老师的身份范围
85
+ const homeroomClassName = user.homeroomClass; // 班主任班级
86
+
87
+ // 查找该老师任教的所有课程 (找出任教的其他班级)
88
+ const teachingCourses = await Course.find({
89
+ $or: [{ teacherId: user._id }, { teacherName: user.trueName || user.username }],
90
+ schoolId
91
+ });
92
+
93
+ // 构建任教班级 -> 科目列表的映射 (e.g., "三年级(2)班": ["数学", "科学"])
94
+ const teachingMap = {};
95
+ teachingCourses.forEach(c => {
96
+ if (!teachingMap[c.className]) teachingMap[c.className] = new Set();
97
+ teachingMap[c.className].add(c.courseName);
98
+ });
99
+
100
+ // 合并所有相关班级 (班主任班级 + 任课班级)
101
+ const allClasses = new Set(Object.keys(teachingMap));
102
+ if (homeroomClassName) allClasses.add(homeroomClassName);
103
+
104
+ if (allClasses.size === 0) {
105
+ return `### 当前用户身份:教师\n- **姓名**: ${user.trueName || username}\n- **状态**: 暂未绑定任何班级或课程数据。`;
106
+ }
107
+
108
+ let prompt = `
109
+ ### 当前用户身份:教师
110
+ - **姓名**: ${user.trueName || username}
111
+ - **负责班级**: ${Array.from(allClasses).join(', ')}
112
+ `;
113
+
114
+ // 2. 遍历所有相关班级,构建详细数据
115
+ for (const className of allClasses) {
116
+ const isHomeroom = className === homeroomClassName;
117
+ const subjectsTaught = teachingMap[className] ? Array.from(teachingMap[className]) : [];
118
+
119
+ prompt += `\n#### 🏫 班级: ${className} (${isHomeroom ? '我是班主任' : '我是任课老师'})\n`;
120
+ if (!isHomeroom) {
121
+ prompt += `(非班主任视角:仅展示我任教的科目 [${subjectsTaught.join(', ')}] 的数据)\n`;
122
+ }
123
+
124
+ // 2.1 获取该班学生
125
+ const students = await Student.find({ className, schoolId });
126
+ if (students.length === 0) {
127
+ prompt += `- 暂无学生数据\n`;
128
+ continue;
129
+ }
130
+
131
+ const studentNos = students.map(s => s.studentNo);
132
+ const studentIds = students.map(s => s._id.toString());
133
+
134
+ // 2.2 获取考勤 (全班)
135
+ const attendanceRaw = await AttendanceModel.aggregate([
136
+ { $match: { studentId: { $in: studentIds }, status: { $in: ['Absent', 'Leave'] } } },
137
+ { $group: { _id: "$studentId", absent: { $sum: { $cond: [{ $eq: ["$status", "Absent"] }, 1, 0] } }, leave: { $sum: { $cond: [{ $eq: ["$status", "Leave"] }, 1, 0] } } } }
138
+ ]);
139
+ const attendanceMap = {};
140
+ attendanceRaw.forEach(a => attendanceMap[a._id] = a);
141
+
142
+ // 2.3 获取成绩 (按需获取)
143
+ // 查询该班级学生的所有成绩
144
+ // 为了性能,还是查出来再内存过滤,比多次DB查询快
145
+ const allScores = await Score.find({
146
+ schoolId,
147
+ studentNo: { $in: studentNos }
148
+ }).sort({ _id: -1 }); // 最新的在前
149
+
150
+ // 构建每个学生的成绩摘要
151
+ const studentDetails = students.map(s => {
152
+ const att = attendanceMap[s._id.toString()] || { absent: 0, leave: 0 };
153
+
154
+ // 筛选该学生的成绩
155
+ let myScores = allScores.filter(sc => sc.studentNo === s.studentNo);
156
+
157
+ // 【关键逻辑】过滤显示哪些科目
158
+ if (!isHomeroom) {
159
+ // 如果不是班主任,只保留我教的科目的成绩
160
+ myScores = myScores.filter(sc => subjectsTaught.includes(sc.courseName));
161
+ }
162
+
163
+ // 【聚合逻辑】每个科目只取最近一次成绩 (去重)
164
+ const latestSubjectScores = {};
165
+ myScores.forEach(sc => {
166
+ if (!latestSubjectScores[sc.courseName]) {
167
+ latestSubjectScores[sc.courseName] = sc;
168
+ }
169
+ });
170
+
171
+ const finalScores = Object.values(latestSubjectScores);
172
+
173
+ // 格式化成绩字符串
174
+ let scoreStr = "";
175
+ if (finalScores.length > 0) {
176
+ scoreStr = finalScores.map(sc => `${sc.courseName}:${sc.score}`).join(', ');
177
+ } else {
178
+ scoreStr = "无相关成绩";
179
+ }
180
+
181
+ // 标记异常 (缺勤多 或 有不及格)
182
+ const hasIssue = att.absent > 0 || finalScores.some(sc => sc.score < 60);
183
+ const flag = hasIssue ? "⚠️" : "";
184
+
185
+ return `- ${flag} **${s.name}**: 考勤[缺${att.absent}/假${att.leave}], 小红花:${s.flowerBalance}, 最新成绩:[${scoreStr}]`;
186
+ });
187
+
188
+ // 将学生列表加入 Prompt (限制长度,如果班级人太多,可能需要截断,但Gemini窗口大,通常没事)
189
+ prompt += studentDetails.join('\n') + '\n';
190
+
191
+ // 2.4 如果是班主任,额外显示待办
192
+ if (isHomeroom) {
193
+ const pendingLeaves = await LeaveRequestModel.countDocuments({ className, schoolId, status: 'Pending' });
194
+ if (pendingLeaves > 0) {
195
+ prompt += `> 🔴 班务提醒: 有 ${pendingLeaves} 条请假申请待审批。\n`;
196
+ }
197
+ }
198
+ }
199
+
200
+ return prompt;
201
+ }
202
+
203
+ /**
204
+ * 构建管理员/校长画像上下文
205
+ */
206
+ async function buildAdminContext(role, schoolId) {
207
+ let prompt = `### 当前用户身份:${role === 'PRINCIPAL' ? '校长' : '超级管理员'}\n`;
208
+
209
+ if (role === 'PRINCIPAL' && schoolId) {
210
+ const school = await School.findById(schoolId);
211
+ const totalStudents = await Student.countDocuments({ schoolId });
212
+ const totalTeachers = await User.countDocuments({ schoolId, role: 'TEACHER' });
213
+
214
+ // 今日缺勤详细名单
215
+ const today = new Date().toISOString().split('T')[0];
216
+ const absences = await AttendanceModel.find({ schoolId, date: today, status: { $in: ['Absent', 'Leave'] } });
217
+ const absentNames = absences.map(a => `${a.studentName}(${a.className})`).join(', ');
218
+
219
+ // 全校均分
220
+ const recentScores = await Score.find({ schoolId }).sort({_id:-1}).limit(100);
221
+ let avgScore = 0;
222
+ if (recentScores.length) avgScore = (recentScores.reduce((a,b)=>a+b.score,0)/recentScores.length).toFixed(1);
223
+
224
+ prompt += `- **学校**: ${school ? school.name : '未知'}\n`;
225
+ prompt += `- **宏观数据**: 教师 ${totalTeachers} 人,学生 ${totalStudents} 人,近期全校抽样平均分 ${avgScore}。\n`;
226
+ prompt += `- **今日出勤**: 缺勤/请假 ${absences.length} 人。名单: ${absentNames || '无'}。\n`;
227
+ }
228
+
229
+ return prompt;
230
+ }
231
+
232
+ /**
233
+ * 主入口:构建用户上下文 Prompt
234
+ * @param {string} username - 请求头中的用户名
235
+ * @param {string} role - 请求头中的角色
236
+ * @param {string} schoolId - 请求头中的学校ID
237
  */
238
  async function buildUserContext(username, role, schoolId) {
239
  try {
240
  const dateStr = getCurrentDateInfo();
241
+ let roleContext = "";
242
 
 
243
  if (role === 'STUDENT') {
244
+ roleContext = await buildStudentContext(username, schoolId);
 
 
 
 
 
 
245
  } else if (role === 'TEACHER') {
246
+ roleContext = await buildTeacherContext(username, schoolId);
247
+ } else if (role === 'ADMIN' || role === 'PRINCIPAL') {
248
+ roleContext = await buildAdminContext(role, schoolId);
 
 
 
249
  }
250
 
251
+ // 组装最终 System Instruction 片段
252
  return `
253
  ---
254
+ 上下文注入信息 (Context Injection) - 绝密
255
+ 当前系统时间: ${dateStr}
256
+ 以下是当前用户的核心数据和其管辖范围内的详细档案。
257
+ ${roleContext}
258
+
259
+ 【AI 行为准则】
260
+ 1. 你拥有上述所有数据的“上帝视角”
261
+ 2. **班主任视角**: 当用户是班主任时,你通过上下文已知晓全班所有科目的成绩。如果问“王五偏科吗”,请对比他的各科成绩作答
262
+ 3. **任课老师视角**: 当用户非班主任时,你只能看到他所教科目的成绩。如果问“李华其他课怎么样”,请诚实回答“我只能看到您任教科目的数据,无法评价其他科目”
263
+ 4. 回答要具体。不要说“他成绩一般”,要说“他最近学考了60分考了85分”
264
+ 5. 数据格式说明: [科目:分数] 代表该科目最近一次录入的成绩。
265
  ---
266
  `;
267
  } catch (e) {
268
  console.error("Context build failed:", e);
269
+ return ""; // 失败时降级为空,不影响主流程
270
  }
271
  }
272
 
ai-routes.js CHANGED
@@ -4,8 +4,8 @@ const router = express.Router();
4
  const OpenAI = require('openai');
5
  const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
7
- const { mongoTools, getOpenAITools, executeMongoTool } = require('./ai-tools');
8
 
 
9
  // Fetch keys from DB + merge with ENV variables
10
  async function getKeyPool(type) {
11
  const config = await ConfigModel.findOne({ key: 'main' });
@@ -26,6 +26,266 @@ async function recordUsage(model, provider) {
26
  } catch (e) { console.error("Failed to record AI usage stats:", e); }
27
  }
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  const checkAIAccess = async (req, res, next) => {
30
  const username = req.headers['x-user-username'];
31
  const role = req.headers['x-user-role'];
@@ -65,30 +325,21 @@ router.get('/stats', checkAIAccess, async (req, res) => {
65
  });
66
 
67
  router.post('/reset-pool', checkAIAccess, (req, res) => {
 
 
68
  res.json({ success: true });
69
  });
70
 
71
- function convertHistoryToOpenAI(history) {
72
- return history.map(msg => ({
73
- role: msg.role === 'model' ? 'assistant' : 'user',
74
- content: msg.parts ? msg.parts.map(p => p.text).join('') : (msg.text || '')
75
- }));
76
- }
77
-
78
- // --- SSE Protocol Helper ---
79
- // Sends structured events to client: { type: 'text'|'thought'|'done'|'error', content?: string }
80
- const sendSSE = (res, data) => {
81
- res.write(`data: ${JSON.stringify(data)}\n\n`);
82
- };
83
-
84
- // --- REAL STREAMING CHAT ROUTE ---
85
  router.post('/chat', checkAIAccess, async (req, res) => {
86
- const { text, audio } = req.body;
 
 
87
  const username = req.headers['x-user-username'];
88
  const userRole = req.headers['x-user-role'];
89
  const schoolId = req.headers['x-school-id'];
90
 
91
- // SSE Setup
92
  res.setHeader('Content-Type', 'text/event-stream');
93
  res.setHeader('Cache-Control', 'no-cache');
94
  res.setHeader('Connection', 'keep-alive');
@@ -98,164 +349,177 @@ router.post('/chat', checkAIAccess, async (req, res) => {
98
  const user = await User.findOne({ username });
99
  if (!user) throw new Error('User not found');
100
 
101
- // 1. Save User Message
102
  const userMsgText = text || (audio ? '(Audio Message)' : '');
103
  if (userMsgText) {
104
  await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
105
  }
106
 
107
- const config = await ConfigModel.findOne({ key: 'main' });
108
- const contextPrompt = await buildUserContext(username, userRole, schoolId);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
- // Setup OpenAI Client (Used for both Doubao and OpenRouter)
111
- const keys = await getKeyPool('openrouter');
112
- if (keys.length === 0) throw new Error("No API keys available");
113
 
114
- let modelName = 'qwen/qwen3-coder:free';
115
- let apiUrl = 'https://openrouter.ai/api/v1';
116
- if (config?.openRouterModels && config.openRouterModels.length > 0) {
117
- const m = config.openRouterModels[0];
118
- modelName = m.id;
119
- if (m.apiUrl) apiUrl = m.apiUrl;
 
 
 
 
 
120
  }
121
 
122
- console.log(`🤖 [Streaming Agent] ${modelName} @ ${apiUrl}`);
123
-
124
- const client = new OpenAI({
125
- baseURL: apiUrl,
126
- apiKey: keys[0],
127
- defaultHeaders: { "HTTP-Referer": "https://smart.com" }
128
- });
129
 
130
- // 2. Build History
131
- const dbHistory = await ChatHistoryModel.find({ userId: user._id }).sort({ timestamp: -1 }).limit(10);
132
- let messages = [
133
- { role: 'system', content: `${contextPrompt}\n\n重要:如果用户查询具体数据,请使用 query_database 工具。` },
134
- ...convertHistoryToOpenAI(dbHistory.reverse())
135
- ];
136
- if (text) messages.push({ role: 'user', content: text });
137
-
138
- // 3. Recursive Agent Loop
139
- let finalResponseText = "";
140
- let turnCount = 0;
141
- const MAX_TURNS = 5;
142
-
143
- // Loop handles: LLM -> Tool Call -> Tool Result -> LLM -> Answer
144
- while (turnCount < MAX_TURNS) {
145
-
146
- // Start Stream for this turn
147
- const stream = await client.chat.completions.create({
148
- model: modelName,
149
- messages: messages,
150
- tools: getOpenAITools(),
151
- tool_choice: "auto",
152
- stream: true // Enable REAL streaming
153
- });
154
 
155
- let toolCallBuffer = []; // To accumulate tool call chunks
156
- let currentContent = "";
 
157
 
158
- for await (const chunk of stream) {
159
- const delta = chunk.choices[0]?.delta;
160
-
161
- // A. Handle Text Content
162
- if (delta?.content) {
163
- currentContent += delta.content;
164
- finalResponseText += delta.content;
165
- // Directly stream text to client
166
- sendSSE(res, { type: 'text', content: delta.content });
 
 
 
 
 
 
 
 
167
  }
 
 
 
 
 
 
 
 
 
 
168
 
169
- // B. Handle Tool Calls (Accumulate args)
170
- if (delta?.tool_calls) {
171
- const toolCalls = delta.tool_calls;
172
- for (const toolCall of toolCalls) {
173
- const index = toolCall.index;
174
- if (!toolCallBuffer[index]) {
175
- toolCallBuffer[index] = {
176
- id: toolCall.id,
177
- name: toolCall.function?.name || "",
178
- arguments: ""
179
- };
180
- }
181
- if (toolCall.function?.name) toolCallBuffer[index].name = toolCall.function.name;
182
- if (toolCall.function?.arguments) toolCallBuffer[index].arguments += toolCall.function.arguments;
183
- }
184
- }
185
- }
186
 
187
- // End of stream for this turn.
188
- // Check if we have tool calls to execute.
189
- if (toolCallBuffer.length > 0) {
190
- // Add the assistant's "intent" message to history
191
- // Note: We reconstruct the message object as if it wasn't streamed
192
- messages.push({
193
- role: 'assistant',
194
- content: currentContent || null, // Content might be null if only calling tools
195
- tool_calls: toolCallBuffer.map(tc => ({
196
- id: tc.id || `call_${Date.now()}`,
197
- type: 'function',
198
- function: { name: tc.name, arguments: tc.arguments }
199
- }))
200
- });
201
 
202
- // Notify Frontend: Tool Execution Started
203
- sendSSE(res, { type: 'thought', content: `🤔 正在调用工具: ${toolCallBuffer.map(t => t.name).join(', ')} ...` });
204
-
205
- // Execute Tools
206
- for (const toolCall of toolCallBuffer) {
207
- const toolResult = await executeMongoTool({
208
- name: toolCall.name,
209
- args: undefined,
210
- arguments: toolCall.arguments
211
- }, user, userRole, schoolId);
212
-
213
- // Add result to history
214
- messages.push({
215
- role: "tool",
216
- tool_call_id: toolCall.id || `call_${Date.now()}`,
217
- content: JSON.stringify(toolResult)
218
- });
219
-
220
- // Notify Frontend: Tool Result
221
- const shortResult = JSON.stringify(toolResult).substring(0, 50) + "...";
222
- sendSSE(res, { type: 'thought', content: `✅ 工具执行完成: ${shortResult}` });
223
- }
224
-
225
- // Continue loop to let LLM generate answer based on tool result
226
- turnCount++;
227
- } else {
228
- // No tool calls, we are done.
229
- break;
230
- }
231
  }
232
-
233
- // 4. Save Final Answer (Prevent Empty Text Error)
234
- if (finalResponseText && finalResponseText.trim().length > 0) {
235
- await ChatHistoryModel.create({ userId: user._id, role: 'model', text: finalResponseText });
 
 
 
 
 
 
 
236
  }
 
 
 
 
 
 
 
 
 
 
237
 
238
- recordUsage('agent-response', 'AGENT');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
- // 5. Send Done Signal
241
- sendSSE(res, { type: 'done' });
242
  res.end();
243
 
244
  } catch (e) {
245
- console.error("[AI Chat Error]", e);
246
- sendSSE(res, { type: 'error', message: e.message });
247
  res.end();
248
  }
249
  });
250
 
251
- // ... (Rest of the file: evaluate route, export)
252
- router.post('/evaluate', checkAIAccess, async (req, res) => {
253
- const { question, audio, image, images } = req.body;
254
- res.setHeader('Content-Type', 'text/event-stream');
255
- res.setHeader('Cache-Control', 'no-cache');
256
- res.flushHeaders();
257
- res.write(`data: ${JSON.stringify({ error: true, message: "Use Gemeni provider for multimodel evaluation" })}\n\n`);
258
- res.end();
259
- });
260
-
261
  module.exports = router;
 
4
  const OpenAI = require('openai');
5
  const { ConfigModel, User, AIUsageModel, ChatHistoryModel } = require('./models');
6
  const { buildUserContext } = require('./ai-context');
 
7
 
8
+ // ... (Key Management, Usage Tracking, Helpers remain same)
9
  // Fetch keys from DB + merge with ENV variables
10
  async function getKeyPool(type) {
11
  const config = await ConfigModel.findOne({ key: 'main' });
 
26
  } catch (e) { console.error("Failed to record AI usage stats:", e); }
27
  }
28
 
29
+ const wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
30
+ async function callAIWithRetry(aiModelCall, retries = 1) {
31
+ for (let i = 0; i < retries; i++) {
32
+ try { return await aiModelCall(); }
33
+ catch (e) {
34
+ if (e.status === 400 || e.status === 401 || e.status === 403) throw e;
35
+ if (i < retries - 1) { await wait(1000 * Math.pow(2, i)); continue; }
36
+ throw e;
37
+ }
38
+ }
39
+ }
40
+
41
+ function convertGeminiToOpenAI(baseParams) {
42
+ const messages = [];
43
+ if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
44
+
45
+ let contents = baseParams.contents;
46
+ if (contents && !Array.isArray(contents)) {
47
+ contents = [contents];
48
+ }
49
+
50
+ if (contents && Array.isArray(contents)) {
51
+ contents.forEach(content => {
52
+ let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
53
+ const messageContent = [];
54
+ if (content.parts) {
55
+ content.parts.forEach(p => {
56
+ if (p.text) messageContent.push({ type: 'text', text: p.text });
57
+ else if (p.inlineData && p.inlineData.mimeType.startsWith('image/')) {
58
+ messageContent.push({ type: 'image_url', image_url: { url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}` } });
59
+ }
60
+ });
61
+ }
62
+ if (messageContent.length > 0) {
63
+ if (messageContent.length === 1 && messageContent[0].type === 'text') {
64
+ messages.push({ role: role, content: messageContent[0].text });
65
+ } else {
66
+ messages.push({ role: role, content: messageContent });
67
+ }
68
+ }
69
+ });
70
+ }
71
+ return messages;
72
+ }
73
+
74
+ const PROVIDERS = { GEMINI: 'GEMINI', OPENROUTER: 'OPENROUTER', GEMMA: 'GEMMA' };
75
+ const DEFAULT_OPENROUTER_MODELS = ['qwen/qwen3-coder:free', 'openai/gpt-oss-120b:free', 'qwen/qwen3-235b-a22b:free', 'tngtech/deepseek-r1t-chimera:free'];
76
+
77
+ // Runtime override logic
78
+ let runtimeProviderOrder = [];
79
+
80
+ function deprioritizeProvider(providerName) {
81
+ if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
82
+ console.log(`[AI System] ⚠️ Deprioritizing ${providerName} due to errors. Moving to end of queue.`);
83
+ runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
84
+ console.log(`[AI System] 🔄 New Priority Order: ${runtimeProviderOrder.join(' -> ')}`);
85
+ }
86
+
87
+ function isQuotaError(e) {
88
+ const msg = (e.message || '').toLowerCase();
89
+ return e.status === 429 || e.status === 503 || msg.includes('quota') || msg.includes('overloaded') || msg.includes('resource_exhausted') || msg.includes('rate limit') || msg.includes('credits');
90
+ }
91
+
92
+ // Streaming Helpers
93
+ async function streamGemini(baseParams, res) {
94
+ const { GoogleGenAI } = await import("@google/genai");
95
+ const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
96
+ const keys = await getKeyPool('gemini');
97
+ if (keys.length === 0) throw new Error("No Gemini API keys");
98
+
99
+ for (const apiKey of keys) {
100
+ const client = new GoogleGenAI({ apiKey });
101
+ for (const modelName of models) {
102
+ try {
103
+ console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
104
+ const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
105
+
106
+ let hasStarted = false;
107
+ let fullText = "";
108
+
109
+ for await (const chunk of result) {
110
+ if (!hasStarted) {
111
+ console.log(`[AI] ✅ Connected to Gemini: ${modelName}`);
112
+ recordUsage(modelName, PROVIDERS.GEMINI);
113
+ hasStarted = true;
114
+ }
115
+ if (chunk.text) {
116
+ fullText += chunk.text;
117
+ res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
118
+ if (res.flush) res.flush();
119
+ }
120
+ }
121
+ return fullText;
122
+ } catch (e) {
123
+ console.warn(`[AI] ⚠️ Gemini ${modelName} Error: ${e.message}`);
124
+ if (isQuotaError(e)) {
125
+ console.log(`[AI] 🔄 Quota exceeded for ${modelName}, trying next...`);
126
+ continue;
127
+ }
128
+ throw e;
129
+ }
130
+ }
131
+ }
132
+ throw new Error("Gemini streaming failed (All keys/models exhausted)");
133
+ }
134
+
135
+ async function streamOpenRouter(baseParams, res) {
136
+ const config = await ConfigModel.findOne({ key: 'main' });
137
+ const models = (config && config.openRouterModels?.length) ? config.openRouterModels.map(m => m.id) : DEFAULT_OPENROUTER_MODELS;
138
+ const messages = convertGeminiToOpenAI(baseParams);
139
+ const keys = await getKeyPool('openrouter');
140
+ if (keys.length === 0) throw new Error("No OpenRouter API keys");
141
+
142
+ if (messages.length === 0) {
143
+ throw new Error("Conversion resulted in empty messages array. Check input format.");
144
+ }
145
+
146
+ for (const apiKey of keys) {
147
+ for (const modelName of models) {
148
+ const modelConfig = config?.openRouterModels?.find(m => m.id === modelName);
149
+ const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
150
+ const providerLabel = modelConfig?.apiUrl ? 'Custom API' : 'OpenRouter';
151
+
152
+ const client = new OpenAI({ baseURL, apiKey, defaultHeaders: { "HTTP-Referer": "https://smart.com", "X-Title": "Smart School" } });
153
+
154
+ // --- DOUBAO OPTIMIZATION (Context Caching) ---
155
+ const extraBody = {};
156
+ if (modelName.toLowerCase().includes('doubao')) {
157
+ console.log(`[AI] 💡 Activating Doubao Prefix Caching for ${modelName}`);
158
+ // Doubao-specific caching parameter
159
+ extraBody.caching = { type: "enabled", prefix: true };
160
+ // Disable thinking to save tokens/time if not needed (optional based on user pref, but here we prioritize speed for chat)
161
+ extraBody.thinking = { type: "disabled" };
162
+ }
163
+ // ---------------------------------------------
164
+
165
+ try {
166
+ console.log(`[AI] 🚀 Attempting ${providerLabel} Model: ${modelName} (URL: ${baseURL})`);
167
+
168
+ const stream = await client.chat.completions.create({
169
+ model: modelName,
170
+ messages,
171
+ stream: true,
172
+ ...extraBody
173
+ });
174
+
175
+ console.log(`[AI] ✅ Connected to ${providerLabel}: ${modelName}`);
176
+ recordUsage(modelName, PROVIDERS.OPENROUTER);
177
+
178
+ let fullText = '';
179
+ for await (const chunk of stream) {
180
+ const text = chunk.choices[0]?.delta?.content || '';
181
+ if (text) {
182
+ fullText += text;
183
+ res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
184
+ if (res.flush) res.flush();
185
+ }
186
+ }
187
+ return fullText;
188
+ } catch (e) {
189
+ console.warn(`[AI] ⚠️ ${providerLabel} ${modelName} Error: ${e.message}`);
190
+ if (isQuotaError(e)) {
191
+ console.log(`[AI] 🔄 Rate limit/Quota for ${modelName}, switching...`);
192
+ break;
193
+ }
194
+ }
195
+ }
196
+ }
197
+ throw new Error("OpenRouter/Custom stream failed (All models exhausted)");
198
+ }
199
+
200
+ async function streamGemma(baseParams, res) {
201
+ const { GoogleGenAI } = await import("@google/genai");
202
+ const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
203
+ const keys = await getKeyPool('gemini');
204
+ if (keys.length === 0) throw new Error("No keys for Gemma");
205
+
206
+ for (const apiKey of keys) {
207
+ const client = new GoogleGenAI({ apiKey });
208
+ for (const modelName of models) {
209
+ try {
210
+ console.log(`[AI] 🚀 Attempting Gemma Model: ${modelName}`);
211
+ const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
212
+
213
+ let hasStarted = false;
214
+ let fullText = "";
215
+ for await (const chunk of result) {
216
+ if (!hasStarted) {
217
+ console.log(`[AI] ✅ Connected to Gemma: ${modelName}`);
218
+ recordUsage(modelName, PROVIDERS.GEMMA);
219
+ hasStarted = true;
220
+ }
221
+ if (chunk.text) {
222
+ fullText += chunk.text;
223
+ res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
224
+ if (res.flush) res.flush();
225
+ }
226
+ }
227
+ return fullText;
228
+ } catch (e) {
229
+ console.warn(`[AI] ⚠️ Gemma ${modelName} Error: ${e.message}`);
230
+ if (isQuotaError(e)) continue;
231
+ }
232
+ }
233
+ }
234
+ throw new Error("Gemma stream failed");
235
+ }
236
+
237
+ async function streamContentWithSmartFallback(baseParams, res) {
238
+ let hasAudio = false;
239
+ const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
240
+
241
+ contentsArray.forEach(c => {
242
+ if (c && c.parts) {
243
+ c.parts.forEach(p => { if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) hasAudio = true; });
244
+ }
245
+ });
246
+
247
+ if (hasAudio) {
248
+ try {
249
+ console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
250
+ return await streamGemini(baseParams, res);
251
+ } catch(e) {
252
+ console.error(`[AI] ❌ Audio Processing Failed: ${e.message}`);
253
+ deprioritizeProvider(PROVIDERS.GEMINI);
254
+ throw new Error('QUOTA_EXCEEDED_AUDIO');
255
+ }
256
+ }
257
+
258
+ const config = await ConfigModel.findOne({ key: 'main' });
259
+ const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
260
+ ? config.aiProviderOrder
261
+ : [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
262
+
263
+ const runtimeSet = new Set(runtimeProviderOrder);
264
+ if (runtimeProviderOrder.length === 0 || runtimeProviderOrder.length !== configuredOrder.length || !configuredOrder.every(p => runtimeSet.has(p))) {
265
+ runtimeProviderOrder = [...configuredOrder];
266
+ }
267
+
268
+ let finalError = null;
269
+ for (const provider of runtimeProviderOrder) {
270
+ try {
271
+ console.log(`[AI] 👉 Trying Provider: ${provider}...`);
272
+ if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
273
+ else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
274
+ else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
275
+ } catch (e) {
276
+ console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
277
+ finalError = e;
278
+ if (isQuotaError(e)) {
279
+ console.log(`[AI] 📉 Quota/Rate Limit detected. Switching provider...`);
280
+ deprioritizeProvider(provider);
281
+ continue;
282
+ }
283
+ continue;
284
+ }
285
+ }
286
+ throw finalError || new Error('All streaming models unavailable.');
287
+ }
288
+
289
  const checkAIAccess = async (req, res, next) => {
290
  const username = req.headers['x-user-username'];
291
  const role = req.headers['x-user-role'];
 
325
  });
326
 
327
  router.post('/reset-pool', checkAIAccess, (req, res) => {
328
+ runtimeProviderOrder = [];
329
+ console.log('[AI] 🔄 Provider priority pool reset.');
330
  res.json({ success: true });
331
  });
332
 
333
+ // --- PERSISTENT CHAT HISTORY HANDLER ---
334
+ // Instead of relying on client-side 'history', we use MongoDB to ensure cross-device memory.
 
 
 
 
 
 
 
 
 
 
 
 
335
  router.post('/chat', checkAIAccess, async (req, res) => {
336
+ const { text, audio } = req.body; // Ignore req.body.history for prompt generation
337
+
338
+ // Extract headers for context building
339
  const username = req.headers['x-user-username'];
340
  const userRole = req.headers['x-user-role'];
341
  const schoolId = req.headers['x-school-id'];
342
 
 
343
  res.setHeader('Content-Type', 'text/event-stream');
344
  res.setHeader('Cache-Control', 'no-cache');
345
  res.setHeader('Connection', 'keep-alive');
 
349
  const user = await User.findOne({ username });
350
  if (!user) throw new Error('User not found');
351
 
352
+ // 1. SAVE USER MSG TO DB
353
  const userMsgText = text || (audio ? '(Audio Message)' : '');
354
  if (userMsgText) {
355
  await ChatHistoryModel.create({ userId: user._id, role: 'user', text: userMsgText });
356
  }
357
 
358
+ // 2. FETCH HISTORY FROM DB (Long-term Memory)
359
+ // Retrieve last 30 messages for context
360
+ const dbHistory = await ChatHistoryModel.find({ userId: user._id })
361
+ .sort({ timestamp: -1 })
362
+ .limit(30);
363
+
364
+ // Re-order for API (oldest first)
365
+ const historyContext = dbHistory.reverse().map(msg => ({
366
+ role: msg.role === 'user' ? 'user' : 'model',
367
+ parts: [{ text: msg.text }]
368
+ }));
369
+
370
+ // 3. PREPARE REQUEST
371
+ // The last user message is already in DB and retrieved in historyContext.
372
+ // We need to separate "history" from "current message" for some APIs,
373
+ // but Google/OpenAI handle a list of messages fine.
374
+ // However, standard pattern is: History + Current.
375
+ // Since we fetched ALL (including current), we just pass historyContext as contents.
376
+ // NOTE: If audio is present, we must append it specifically as the "current" part
377
+ // because DB only stores text representation for now.
378
 
379
+ const fullContents = [...historyContext];
 
 
380
 
381
+ // If this request has audio, append it as a new part (since DB load only has text placeholder)
382
+ // We replace the last 'user' text message with the audio payload for the AI model
383
+ if (audio) {
384
+ // Remove the text placeholder we just loaded
385
+ if (fullContents.length > 0 && fullContents[fullContents.length - 1].role === 'user') {
386
+ fullContents.pop();
387
+ }
388
+ fullContents.push({
389
+ role: 'user',
390
+ parts: [{ inlineData: { mimeType: 'audio/webm', data: audio } }]
391
+ });
392
  }
393
 
394
+ // --- NEW: Inject Context ---
395
+ const contextPrompt = await buildUserContext(username, userRole, schoolId);
396
+ const baseSystemInstruction = "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。回复支持 Markdown 格式。";
397
+ const combinedSystemInstruction = `${baseSystemInstruction}\n${contextPrompt}`;
398
+ // ---------------------------
 
 
399
 
400
+ const answerText = await streamContentWithSmartFallback({
401
+ contents: fullContents,
402
+ config: { systemInstruction: combinedSystemInstruction }
403
+ }, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
 
405
+ // 4. SAVE AI RESPONSE TO DB
406
+ if (answerText) {
407
+ await ChatHistoryModel.create({ userId: user._id, role: 'model', text: answerText });
408
 
409
+ // Signal that text generation is done and TTS is starting
410
+ res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
411
+ try {
412
+ const { GoogleGenAI } = await import("@google/genai");
413
+ const keys = await getKeyPool('gemini');
414
+ let audioBytes = null;
415
+ for (const apiKey of keys) {
416
+ try {
417
+ const client = new GoogleGenAI({ apiKey });
418
+ const ttsResponse = await client.models.generateContent({
419
+ model: "gemini-2.5-flash-preview-tts",
420
+ contents: [{ parts: [{ text: answerText }] }],
421
+ config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
422
+ });
423
+ audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
424
+ if (audioBytes) break;
425
+ } catch(e) { if (isQuotaError(e)) continue; break; }
426
  }
427
+ if (audioBytes) res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
428
+ else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
429
+ } catch (ttsError) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
430
+ }
431
+ res.write('data: [DONE]\n\n'); res.end();
432
+ } catch (e) {
433
+ console.error("[AI Chat Route Error]", e);
434
+ res.write(`data: ${JSON.stringify({ error: true, message: e.message })}\n\n`); res.end();
435
+ }
436
+ });
437
 
438
+ // STREAMING ASSESSMENT ENDPOINT
439
+ router.post('/evaluate', checkAIAccess, async (req, res) => {
440
+ const { question, audio, image, images } = req.body;
441
+ res.setHeader('Content-Type', 'text/event-stream');
442
+ res.setHeader('Cache-Control', 'no-cache');
443
+ res.setHeader('Connection', 'keep-alive');
444
+ res.flushHeaders();
 
 
 
 
 
 
 
 
 
 
445
 
446
+ try {
447
+ res.write(`data: ${JSON.stringify({ status: 'analyzing' })}\n\n`);
 
 
 
 
 
 
 
 
 
 
 
 
448
 
449
+ const evalParts = [{ text: `请作为一名严谨的老师,对学生的回答进行评分。题目是:${question}。` }];
450
+ if (audio) {
451
+ evalParts.push({ text: "学生的回答在音频中。" });
452
+ evalParts.push({ inlineData: { mimeType: 'audio/webm', data: audio } });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
  }
454
+
455
+ // Support multiple images
456
+ if (images && Array.isArray(images) && images.length > 0) {
457
+ evalParts.push({ text: "学生的回答写���以下图片中,请识别所有图片中的文字内容并进行批改:" });
458
+ images.forEach(img => {
459
+ if(img) evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: img } });
460
+ });
461
+ } else if (image) {
462
+ // Legacy single image support
463
+ evalParts.push({ text: "学生的回答写在图片中,请识别图片中的文字内容并进行批改。" });
464
+ evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
465
  }
466
+
467
+ // Force structured markdown output for streaming parsing
468
+ evalParts.push({ text: `请分析:1. 内容准确性 2. 表达/书写规范。
469
+ 必须严格按照以下格式输出(不要使用Markdown代码块包裹):
470
+
471
+ ## Transcription
472
+ (在此处输出识别到的学生回答内容,如果是图片则为识别的文字)
473
+
474
+ ## Feedback
475
+ (在此处输出简短的鼓励性评语和建议)
476
 
477
+ ## Score
478
+ (在此处仅输出一个0-100的数字)` });
479
+
480
+ // Stream Text
481
+ const fullText = await streamContentWithSmartFallback({
482
+ // CRITICAL FIX: Pass as array of objects for OpenRouter compatibility
483
+ contents: [{ role: 'user', parts: evalParts }],
484
+ // NO JSON MODE to allow progressive text streaming
485
+ }, res);
486
+
487
+ // Extract Feedback for TTS
488
+ const feedbackMatch = fullText.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i);
489
+ const feedbackText = feedbackMatch ? feedbackMatch[1].trim() : "";
490
+
491
+ // Generate TTS if feedback exists
492
+ if (feedbackText) {
493
+ res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
494
+ try {
495
+ const { GoogleGenAI } = await import("@google/genai");
496
+ const keys = await getKeyPool('gemini');
497
+ let feedbackAudio = null;
498
+ for (const apiKey of keys) {
499
+ try {
500
+ const client = new GoogleGenAI({ apiKey });
501
+ const ttsResponse = await client.models.generateContent({
502
+ model: "gemini-2.5-flash-preview-tts",
503
+ contents: [{ parts: [{ text: feedbackText }] }],
504
+ config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
505
+ });
506
+ feedbackAudio = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
507
+ if (feedbackAudio) break;
508
+ } catch(e) { if (isQuotaError(e)) continue; break; }
509
+ }
510
+ if (feedbackAudio) res.write(`data: ${JSON.stringify({ audio: feedbackAudio })}\n\n`);
511
+ else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
512
+ } catch (ttsErr) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
513
+ }
514
 
515
+ res.write('data: [DONE]\n\n');
 
516
  res.end();
517
 
518
  } catch (e) {
519
+ console.error("AI Eval Error:", e);
520
+ res.write(`data: ${JSON.stringify({ error: true, message: e.message || "Evaluation failed" })}\n\n`);
521
  res.end();
522
  }
523
  });
524
 
 
 
 
 
 
 
 
 
 
 
525
  module.exports = router;
components/ai/ChatPanel.tsx CHANGED
@@ -1,7 +1,7 @@
1
 
2
  import React, { useState, useRef, useEffect } from 'react';
3
  import { AIChatMessage, User } from '../../types';
4
- import { Bot, Mic, Square, Volume2, Send, Sparkles, Loader2, StopCircle, Trash2, BrainCircuit, ChevronDown, ChevronRight } from 'lucide-react';
5
  import ReactMarkdown from 'react-markdown';
6
  import remarkGfm from 'remark-gfm';
7
  import { blobToBase64, base64ToUint8Array, decodePCM, cleanTextForTTS } from '../../utils/mediaHelpers';
@@ -34,10 +34,8 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
34
  const [inputMode, setInputMode] = useState<'text' | 'audio'>('text');
35
  const [isChatProcessing, setIsChatProcessing] = useState(false);
36
  const [isChatRecording, setIsChatRecording] = useState(false);
 
37
  const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
38
-
39
- // State to toggle thoughts visibility per message
40
- const [expandedThoughts, setExpandedThoughts] = useState<Record<string, boolean>>({});
41
 
42
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
43
  const audioChunksRef = useRef<Blob[]>([]);
@@ -70,8 +68,8 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
70
 
71
  // Scroll to bottom
72
  useEffect(() => {
73
- messagesEndRef.current?.scrollIntoView({ behavior: 'smooth', block: 'end' });
74
- }, [messages, isChatProcessing]);
75
 
76
  const stopPlayback = () => {
77
  if (currentSourceRef.current) {
@@ -94,6 +92,30 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
94
  window.speechSynthesis.speak(utterance);
95
  };
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  const startRecording = async () => {
98
  try {
99
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
@@ -131,6 +153,7 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
131
  const handleChatSubmit = async (text?: string, audioBase64?: string) => {
132
  if (!text && !audioBase64) return;
133
  stopPlayback();
 
134
 
135
  const historyPayload = messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text }));
136
 
@@ -141,21 +164,15 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
141
  isAudioMessage: !!audioBase64,
142
  timestamp: Date.now()
143
  };
144
-
145
  const newAiMsgId = (Date.now() + 1).toString();
146
- // Init with empty thoughts array
147
  const newAiMsg: AIChatMessage = {
148
  id: newAiMsgId,
149
  role: 'model',
150
  text: '',
151
- timestamp: Date.now(),
152
- thoughts: []
153
  };
154
 
155
  setMessages(prev => [...prev, newUserMsg, newAiMsg]);
156
- // Auto-expand thoughts for new message
157
- setExpandedThoughts(prev => ({...prev, [newAiMsgId]: true}));
158
-
159
  setTextInput('');
160
  setIsChatProcessing(true);
161
 
@@ -182,7 +199,6 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
182
  while (true) {
183
  const { done, value } = await reader.read();
184
  if (done) break;
185
-
186
  buffer += decoder.decode(value, { stream: true });
187
  const parts = buffer.split('\n\n');
188
  buffer = parts.pop() || '';
@@ -190,44 +206,44 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
190
  for (const line of parts) {
191
  if (line.startsWith('data: ')) {
192
  const jsonStr = line.replace('data: ', '').trim();
 
193
  try {
194
  const data = JSON.parse(jsonStr);
195
 
196
- // REAL STREAMING HANDLING
197
- if (data.type === 'text') {
198
- aiTextAccumulated += data.content;
 
 
 
199
  setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: aiTextAccumulated } : m));
200
- }
201
- else if (data.type === 'thought') {
202
- setMessages(prev => prev.map(m => {
203
- if (m.id === newAiMsgId) {
204
- const oldThoughts = m.thoughts || [];
205
- return { ...m, thoughts: [...oldThoughts, data.content] };
206
- }
207
- return m;
208
- }));
209
  }
210
- else if (data.type === 'error') {
211
- setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message}` } : m));
 
 
 
 
 
 
 
212
  }
213
- else if (data.type === 'done') {
214
- break;
 
215
  }
216
  } catch (e) {}
217
  }
218
  }
219
  }
220
  } catch (error: any) {
221
- setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '抱歉,连接断开或发生错误。' } : m));
222
  } finally {
223
  setIsChatProcessing(false);
 
224
  }
225
  };
226
 
227
- const toggleThoughts = (msgId: string) => {
228
- setExpandedThoughts(prev => ({...prev, [msgId]: !prev[msgId]}));
229
- };
230
-
231
  const clearHistory = () => {
232
  setMessages([{
233
  id: 'welcome',
@@ -253,39 +269,19 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
253
  <div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 ${msg.role === 'model' ? 'bg-blue-100 text-blue-600' : 'bg-gray-200 text-gray-600'}`}>
254
  {msg.role === 'model' ? <Sparkles size={20}/> : <Bot size={20}/>}
255
  </div>
256
- <div className={`max-w-[85%] flex flex-col items-start ${msg.role === 'user' ? 'items-end' : ''}`}>
 
 
257
 
258
- {/* Chain of Thought / Tool Logs */}
259
- {msg.thoughts && msg.thoughts.length > 0 && (
260
- <div className="mb-2 w-full max-w-md">
261
- <div
262
- onClick={() => toggleThoughts(msg.id)}
263
- className="flex items-center gap-2 text-xs text-gray-500 bg-gray-50 border border-gray-200 rounded-lg px-3 py-1.5 cursor-pointer hover:bg-gray-100 transition-colors w-fit"
264
- >
265
- <BrainCircuit size={14} className={isChatProcessing && msg.id === messages[messages.length-1].id ? "animate-pulse text-purple-500" : "text-gray-400"}/>
266
- <span>{isChatProcessing && msg.id === messages[messages.length-1].id ? '深度思考 & 工具调用中...' : '思维链 / 系统日志'}</span>
267
- {expandedThoughts[msg.id] ? <ChevronDown size={14}/> : <ChevronRight size={14}/>}
268
- </div>
269
-
270
- {expandedThoughts[msg.id] && (
271
- <div className="mt-1 bg-gray-50 border border-gray-100 rounded-lg p-3 text-xs font-mono text-gray-600 space-y-1 animate-in slide-in-from-top-1">
272
- {msg.thoughts.map((t, idx) => (
273
- <div key={idx} className="flex gap-2 border-l-2 border-gray-200 pl-2">
274
- <span className="text-gray-400 select-none">[{idx+1}]</span>
275
- <span className="whitespace-pre-wrap">{t}</span>
276
- </div>
277
- ))}
278
- </div>
279
- )}
280
  </div>
281
  )}
282
 
283
- {/* Message Bubble */}
284
- <div className={`p-3 rounded-2xl text-sm overflow-hidden shadow-sm ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none'}`}>
285
- <div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
286
- {msg.role === 'model' && !msg.text && isChatProcessing && <div className="flex items-center gap-2 text-gray-400 py-1"><Loader2 className="animate-spin" size={14}/><span className="text-xs">组织语言中...</span></div>}
287
- {(msg.role === 'model' && msg.text && !isChatProcessing) && (<button onClick={() => speakWithBrowser(msg.text!)} className="mt-2 flex items-center gap-2 text-xs bg-gray-50 text-gray-600 px-3 py-1.5 rounded-full hover:bg-gray-100 border border-gray-200 transition-colors w-fit"><Volume2 size={14}/> 朗读</button>)}
288
- </div>
289
  </div>
290
  </div>
291
  ))}
 
1
 
2
  import React, { useState, useRef, useEffect } from 'react';
3
  import { AIChatMessage, User } from '../../types';
4
+ import { Bot, Mic, Square, Volume2, Send, Sparkles, Loader2, StopCircle, Trash2 } from 'lucide-react';
5
  import ReactMarkdown from 'react-markdown';
6
  import remarkGfm from 'remark-gfm';
7
  import { blobToBase64, base64ToUint8Array, decodePCM, cleanTextForTTS } from '../../utils/mediaHelpers';
 
34
  const [inputMode, setInputMode] = useState<'text' | 'audio'>('text');
35
  const [isChatProcessing, setIsChatProcessing] = useState(false);
36
  const [isChatRecording, setIsChatRecording] = useState(false);
37
+ const [generatingAudioId, setGeneratingAudioId] = useState<string | null>(null);
38
  const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
 
 
 
39
 
40
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
41
  const audioChunksRef = useRef<Blob[]>([]);
 
68
 
69
  // Scroll to bottom
70
  useEffect(() => {
71
+ messagesEndRef.current?.scrollIntoView({ behavior: isChatProcessing ? 'auto' : 'smooth', block: 'end' });
72
+ }, [messages, isChatProcessing, generatingAudioId]);
73
 
74
  const stopPlayback = () => {
75
  if (currentSourceRef.current) {
 
92
  window.speechSynthesis.speak(utterance);
93
  };
94
 
95
+ const playPCMAudio = async (base64Audio: string) => {
96
+ stopPlayback();
97
+ try {
98
+ if (!audioContextRef.current) {
99
+ // @ts-ignore
100
+ const AudioCtor = window.AudioContext || window.webkitAudioContext;
101
+ audioContextRef.current = new AudioCtor();
102
+ }
103
+ if (audioContextRef.current?.state === 'suspended') {
104
+ await audioContextRef.current.resume();
105
+ }
106
+ const bytes = base64ToUint8Array(base64Audio);
107
+ const audioBuffer = decodePCM(bytes, audioContextRef.current!);
108
+ const source = audioContextRef.current!.createBufferSource();
109
+ source.buffer = audioBuffer;
110
+ source.connect(audioContextRef.current!.destination);
111
+ source.start(0);
112
+ currentSourceRef.current = source;
113
+ } catch (e) {
114
+ console.error("Audio playback error", e);
115
+ setToast({ show: true, message: '语音播放失败', type: 'error' });
116
+ }
117
+ };
118
+
119
  const startRecording = async () => {
120
  try {
121
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
 
153
  const handleChatSubmit = async (text?: string, audioBase64?: string) => {
154
  if (!text && !audioBase64) return;
155
  stopPlayback();
156
+ setGeneratingAudioId(null);
157
 
158
  const historyPayload = messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text }));
159
 
 
164
  isAudioMessage: !!audioBase64,
165
  timestamp: Date.now()
166
  };
 
167
  const newAiMsgId = (Date.now() + 1).toString();
 
168
  const newAiMsg: AIChatMessage = {
169
  id: newAiMsgId,
170
  role: 'model',
171
  text: '',
172
+ timestamp: Date.now()
 
173
  };
174
 
175
  setMessages(prev => [...prev, newUserMsg, newAiMsg]);
 
 
 
176
  setTextInput('');
177
  setIsChatProcessing(true);
178
 
 
199
  while (true) {
200
  const { done, value } = await reader.read();
201
  if (done) break;
 
202
  buffer += decoder.decode(value, { stream: true });
203
  const parts = buffer.split('\n\n');
204
  buffer = parts.pop() || '';
 
206
  for (const line of parts) {
207
  if (line.startsWith('data: ')) {
208
  const jsonStr = line.replace('data: ', '').trim();
209
+ if (jsonStr === '[DONE]') break;
210
  try {
211
  const data = JSON.parse(jsonStr);
212
 
213
+ if (data.status === 'tts') {
214
+ setGeneratingAudioId(newAiMsgId);
215
+ }
216
+
217
+ if (data.text) {
218
+ aiTextAccumulated += data.text;
219
  setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: aiTextAccumulated } : m));
 
 
 
 
 
 
 
 
 
220
  }
221
+ if (data.audio) {
222
+ setGeneratingAudioId(null);
223
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, audio: data.audio } : m));
224
+ playPCMAudio(data.audio);
225
+ }
226
+ if (data.ttsSkipped) {
227
+ setGeneratingAudioId(null);
228
+ setToast({ show: true, message: 'AI 语音额度已用尽,已切换至本地语音播报', type: 'error' });
229
+ speakWithBrowser(aiTextAccumulated);
230
  }
231
+ if (data.error) {
232
+ setGeneratingAudioId(null);
233
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message || '未知错误'}` } : m));
234
  }
235
  } catch (e) {}
236
  }
237
  }
238
  }
239
  } catch (error: any) {
240
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '抱歉,连接断开或发生错误,请重试。' } : m));
241
  } finally {
242
  setIsChatProcessing(false);
243
+ setGeneratingAudioId(null);
244
  }
245
  };
246
 
 
 
 
 
247
  const clearHistory = () => {
248
  setMessages([{
249
  id: 'welcome',
 
269
  <div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 ${msg.role === 'model' ? 'bg-blue-100 text-blue-600' : 'bg-gray-200 text-gray-600'}`}>
270
  {msg.role === 'model' ? <Sparkles size={20}/> : <Bot size={20}/>}
271
  </div>
272
+ <div className={`max-w-[80%] p-3 rounded-2xl text-sm overflow-hidden ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none shadow-sm'}`}>
273
+ <div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
274
+ {msg.role === 'model' && !msg.text && isChatProcessing && <div className="flex items-center gap-2 text-gray-400 py-1"><Loader2 className="animate-spin" size={14}/><span className="text-xs">思考中...</span></div>}
275
 
276
+ {/* Audio Generating Indicator */}
277
+ {msg.id === generatingAudioId && (
278
+ <div className="flex items-center gap-2 text-purple-600 py-2 animate-pulse mt-1 border-t border-purple-100 pt-2">
279
+ <Loader2 className="animate-spin" size={14}/>
280
+ <span className="text-xs font-bold">正在生成语音回复...</span>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281
  </div>
282
  )}
283
 
284
+ {msg.audio ? (<button onClick={() => playPCMAudio(msg.audio!)} className="mt-2 flex items-center gap-2 text-xs bg-blue-50 text-blue-600 px-3 py-1.5 rounded-full hover:bg-blue-100 border border-blue-100 transition-colors w-fit"><Volume2 size={14}/> 播放语音 (AI)</button>) : (msg.role === 'model' && msg.text && !isChatProcessing && !generatingAudioId) && (<button onClick={() => speakWithBrowser(msg.text!)} className="mt-2 flex items-center gap-2 text-xs bg-gray-50 text-gray-600 px-3 py-1.5 rounded-full hover:bg-gray-100 border border-gray-200 transition-colors w-fit"><Volume2 size={14}/> 朗读 (本地)</button>)}
 
 
 
 
 
285
  </div>
286
  </div>
287
  ))}
types.ts CHANGED
@@ -388,5 +388,4 @@ export interface AIChatMessage {
388
  audio?: string;
389
  isAudioMessage?: boolean;
390
  timestamp: number;
391
- thoughts?: string[]; // Chain of Thought / Tool execution logs
392
  }
 
388
  audio?: string;
389
  isAudioMessage?: boolean;
390
  timestamp: number;
 
391
  }