dvc890 commited on
Commit
b4aff11
·
verified ·
1 Parent(s): b4714b8

Upload 47 files

Browse files
Files changed (7) hide show
  1. App.tsx +4 -0
  2. components/Sidebar.tsx +2 -1
  3. metadata.json +1 -1
  4. pages/AIAssistant.tsx +338 -0
  5. server.js +121 -2
  6. services/api.ts +8 -1
  7. types.ts +22 -1
App.tsx CHANGED
@@ -17,6 +17,8 @@ const Games = React.lazy(() => import('./pages/Games').then(module => ({ default
17
  const AttendancePage = React.lazy(() => import('./pages/Attendance').then(module => ({ default: module.AttendancePage })));
18
  const Profile = React.lazy(() => import('./pages/Profile').then(module => ({ default: module.Profile })));
19
  const WishesAndFeedback = React.lazy(() => import('./pages/WishesAndFeedback').then(module => ({ default: module.WishesAndFeedback })));
 
 
20
 
21
  import { Login } from './pages/Login';
22
  import { User, UserRole } from './types';
@@ -124,6 +126,7 @@ const AppContent: React.FC = () => {
124
  case 'games': return <Games />;
125
  case 'attendance': return <AttendancePage />;
126
  case 'wishes': return <WishesAndFeedback />;
 
127
  case 'profile': return <Profile />;
128
  default: return <Dashboard onNavigate={(view) => setCurrentView(view)} />;
129
  }
@@ -143,6 +146,7 @@ const AppContent: React.FC = () => {
143
  games: '互动教学中心',
144
  attendance: '考勤管理',
145
  wishes: '心愿与反馈',
 
146
  profile: '个人中心'
147
  };
148
 
 
17
  const AttendancePage = React.lazy(() => import('./pages/Attendance').then(module => ({ default: module.AttendancePage })));
18
  const Profile = React.lazy(() => import('./pages/Profile').then(module => ({ default: module.Profile })));
19
  const WishesAndFeedback = React.lazy(() => import('./pages/WishesAndFeedback').then(module => ({ default: module.WishesAndFeedback })));
20
+ // NEW: AI Assistant Page
21
+ const AIAssistant = React.lazy(() => import('./pages/AIAssistant').then(module => ({ default: module.AIAssistant })));
22
 
23
  import { Login } from './pages/Login';
24
  import { User, UserRole } from './types';
 
126
  case 'games': return <Games />;
127
  case 'attendance': return <AttendancePage />;
128
  case 'wishes': return <WishesAndFeedback />;
129
+ case 'ai-assistant': return <AIAssistant />; // NEW
130
  case 'profile': return <Profile />;
131
  default: return <Dashboard onNavigate={(view) => setCurrentView(view)} />;
132
  }
 
146
  games: '互动教学中心',
147
  attendance: '考勤管理',
148
  wishes: '心愿与反馈',
149
+ 'ai-assistant': 'AI 智能助教',
150
  profile: '个人中心'
151
  };
152
 
components/Sidebar.tsx CHANGED
@@ -1,6 +1,6 @@
1
 
2
  import React from 'react';
3
- import { LayoutDashboard, Users, BookOpen, GraduationCap, Settings, LogOut, FileText, School, UserCog, Palette, X, Building, Gamepad2, CalendarCheck, UserCircle, MessageSquare } from 'lucide-react';
4
  import { UserRole } from '../types';
5
 
6
  interface SidebarProps {
@@ -17,6 +17,7 @@ export const Sidebar: React.FC<SidebarProps> = ({ currentView, onChangeView, use
17
  // PRINCIPAL has access to almost everything ADMIN has, except 'schools' management
18
  const menuItems = [
19
  { id: 'dashboard', label: '工作台', icon: LayoutDashboard, roles: [UserRole.ADMIN, UserRole.PRINCIPAL, UserRole.TEACHER, UserRole.STUDENT] },
 
20
  { id: 'attendance', label: '考勤管理', icon: CalendarCheck, roles: [UserRole.TEACHER, UserRole.PRINCIPAL] },
21
  { id: 'games', label: '互动教学', icon: Gamepad2, roles: [UserRole.TEACHER, UserRole.STUDENT] }, // Removed PRINCIPAL
22
  { id: 'wishes', label: '心愿与反馈', icon: MessageSquare, roles: [UserRole.ADMIN, UserRole.PRINCIPAL, UserRole.TEACHER, UserRole.STUDENT] }, // NEW
 
1
 
2
  import React from 'react';
3
+ import { LayoutDashboard, Users, BookOpen, GraduationCap, Settings, LogOut, FileText, School, UserCog, Palette, X, Building, Gamepad2, CalendarCheck, UserCircle, MessageSquare, Bot } from 'lucide-react';
4
  import { UserRole } from '../types';
5
 
6
  interface SidebarProps {
 
17
  // PRINCIPAL has access to almost everything ADMIN has, except 'schools' management
18
  const menuItems = [
19
  { id: 'dashboard', label: '工作台', icon: LayoutDashboard, roles: [UserRole.ADMIN, UserRole.PRINCIPAL, UserRole.TEACHER, UserRole.STUDENT] },
20
+ { id: 'ai-assistant', label: 'AI 智能助教', icon: Bot, roles: [UserRole.TEACHER, UserRole.STUDENT] }, // NEW
21
  { id: 'attendance', label: '考勤管理', icon: CalendarCheck, roles: [UserRole.TEACHER, UserRole.PRINCIPAL] },
22
  { id: 'games', label: '互动教学', icon: Gamepad2, roles: [UserRole.TEACHER, UserRole.STUDENT] }, // Removed PRINCIPAL
23
  { id: 'wishes', label: '心愿与反馈', icon: MessageSquare, roles: [UserRole.ADMIN, UserRole.PRINCIPAL, UserRole.TEACHER, UserRole.STUDENT] }, // NEW
metadata.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "name": "智慧校园管理系统",
3
  "description": "一个综合性的学生管理系统仪表板,具有基于角色的访问控制、学生档案、课程管理和绩效分析功能。",
4
  "requestFramePermissions": []
5
  }
 
1
  {
2
+ "name": "(AI)智慧校园管理系统",
3
  "description": "一个综合性的学生管理系统仪表板,具有基于角色的访问控制、学生档案、课程管理和绩效分析功能。",
4
  "requestFramePermissions": []
5
  }
pages/AIAssistant.tsx ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import React, { useState, useRef, useEffect } from 'react';
3
+ import { api } from '../services/api';
4
+ import { AIChatMessage, OralAssessment } from '../types';
5
+ import { Bot, Mic, Square, Play, Volume2, Send, CheckCircle, Brain, Sparkles, Loader2, StopCircle } from 'lucide-react';
6
+ import { Emoji } from '../components/Emoji';
7
+
8
+ // Utility to handle Base64 conversion
9
+ const blobToBase64 = (blob: Blob): Promise<string> => {
10
+ return new Promise((resolve, reject) => {
11
+ const reader = new FileReader();
12
+ reader.onloadend = () => {
13
+ const base64String = (reader.result as string).split(',')[1];
14
+ resolve(base64String);
15
+ };
16
+ reader.onerror = reject;
17
+ reader.readAsDataURL(blob);
18
+ });
19
+ };
20
+
21
+ // Utility to decode and play PCM audio (standard Web Audio API)
22
+ const playPCMAudio = async (base64Audio: string, audioContext: AudioContext) => {
23
+ try {
24
+ const binaryString = window.atob(base64Audio);
25
+ const len = binaryString.length;
26
+ const bytes = new Uint8Array(len);
27
+ for (let i = 0; i < len; i++) {
28
+ bytes[i] = binaryString.charCodeAt(i);
29
+ }
30
+
31
+ // Gemini TTS raw output usually needs specific handling.
32
+ // If it's pure PCM, we need to wrap it. However, the Gemini API usually returns standard formats if requested or PCM.
33
+ // Assuming raw PCM 24kHz mono based on documentation typically seen.
34
+ const int16Data = new Int16Array(bytes.buffer);
35
+ const float32Data = new Float32Array(int16Data.length);
36
+ for (let i = 0; i < int16Data.length; i++) {
37
+ float32Data[i] = int16Data[i] / 32768.0;
38
+ }
39
+
40
+ const buffer = audioContext.createBuffer(1, float32Data.length, 24000); // Gemini Flash TTS default
41
+ buffer.getChannelData(0).set(float32Data);
42
+
43
+ const source = audioContext.createBufferSource();
44
+ source.buffer = buffer;
45
+ source.connect(audioContext.destination);
46
+ source.start();
47
+ } catch (e) {
48
+ console.error("Audio playback error:", e);
49
+ }
50
+ };
51
+
52
+ export const AIAssistant: React.FC = () => {
53
+ const [activeTab, setActiveTab] = useState<'chat' | 'assessment'>('chat');
54
+
55
+ // Chat State
56
+ const [messages, setMessages] = useState<AIChatMessage[]>([]);
57
+ const [inputMode, setInputMode] = useState<'text' | 'audio'>('text');
58
+ const [textInput, setTextInput] = useState('');
59
+ const [isRecording, setIsRecording] = useState(false);
60
+ const [isProcessing, setIsProcessing] = useState(false);
61
+
62
+ // Assessment State
63
+ const [assessmentTopic, setAssessmentTopic] = useState('请描述你最喜欢的一个季节及其原因。');
64
+ const [assessmentResult, setAssessmentResult] = useState<any>(null);
65
+
66
+ // Audio Refs
67
+ const mediaRecorderRef = useRef<MediaRecorder | null>(null);
68
+ const audioChunksRef = useRef<Blob[]>([]);
69
+ const audioContextRef = useRef<AudioContext | null>(null);
70
+
71
+ useEffect(() => {
72
+ // Init Audio Context
73
+ // @ts-ignore
74
+ const AudioCtor = window.AudioContext || window.webkitAudioContext;
75
+ audioContextRef.current = new AudioCtor();
76
+
77
+ // Initial welcome message
78
+ if (messages.length === 0) {
79
+ setMessages([{
80
+ id: 'welcome',
81
+ role: 'model',
82
+ text: '你好!我是你的 AI 智能助教。你可以问我任何学习上的问题,或者切换到“口语测评”模式来练习口语。',
83
+ timestamp: Date.now()
84
+ }]);
85
+ }
86
+ }, []);
87
+
88
+ const startRecording = async () => {
89
+ try {
90
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
91
+ const mediaRecorder = new MediaRecorder(stream);
92
+ mediaRecorderRef.current = mediaRecorder;
93
+ audioChunksRef.current = [];
94
+
95
+ mediaRecorder.ondataavailable = (event) => {
96
+ if (event.data.size > 0) {
97
+ audioChunksRef.current.push(event.data);
98
+ }
99
+ };
100
+
101
+ mediaRecorder.start();
102
+ setIsRecording(true);
103
+ } catch (error) {
104
+ console.error("Error accessing microphone:", error);
105
+ alert("无法访问麦克风");
106
+ }
107
+ };
108
+
109
+ const stopRecording = () => {
110
+ if (mediaRecorderRef.current && isRecording) {
111
+ mediaRecorderRef.current.stop();
112
+ setIsRecording(false);
113
+
114
+ mediaRecorderRef.current.onstop = async () => {
115
+ const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/wav' });
116
+ const base64Audio = await blobToBase64(audioBlob);
117
+
118
+ if (activeTab === 'chat') {
119
+ handleChatSubmit(undefined, base64Audio);
120
+ } else {
121
+ handleAssessmentSubmit(base64Audio);
122
+ }
123
+
124
+ // Stop all tracks
125
+ mediaRecorderRef.current?.stream.getTracks().forEach(track => track.stop());
126
+ };
127
+ }
128
+ };
129
+
130
+ const handleChatSubmit = async (text?: string, audioBase64?: string) => {
131
+ if (!text && !audioBase64) return;
132
+
133
+ // Optimistic UI Update
134
+ const newUserMsg: AIChatMessage = {
135
+ id: Date.now().toString(),
136
+ role: 'user',
137
+ text: text || '(语音消息)',
138
+ isAudioMessage: !!audioBase64,
139
+ timestamp: Date.now()
140
+ };
141
+ setMessages(prev => [...prev, newUserMsg]);
142
+ setTextInput('');
143
+ setIsProcessing(true);
144
+
145
+ try {
146
+ const response = await api.ai.chat({ text, audio: audioBase64 });
147
+
148
+ const newAiMsg: AIChatMessage = {
149
+ id: (Date.now() + 1).toString(),
150
+ role: 'model',
151
+ text: response.text,
152
+ audio: response.audio,
153
+ timestamp: Date.now()
154
+ };
155
+ setMessages(prev => [...prev, newAiMsg]);
156
+
157
+ // Auto play audio if response contains it
158
+ if (response.audio && audioContextRef.current) {
159
+ playPCMAudio(response.audio, audioContextRef.current);
160
+ }
161
+
162
+ } catch (error) {
163
+ console.error("Chat error:", error);
164
+ setMessages(prev => [...prev, { id: Date.now().toString(), role: 'model', text: '抱歉,我遇到了一点问题,请稍后再试。', timestamp: Date.now() }]);
165
+ } finally {
166
+ setIsProcessing(false);
167
+ }
168
+ };
169
+
170
+ const handleAssessmentSubmit = async (audioBase64: string) => {
171
+ setIsProcessing(true);
172
+ setAssessmentResult(null);
173
+ try {
174
+ const result = await api.ai.evaluate({
175
+ question: assessmentTopic,
176
+ audio: audioBase64
177
+ });
178
+ setAssessmentResult(result);
179
+ } catch (error) {
180
+ console.error("Eval error:", error);
181
+ alert("评分失败,请重试");
182
+ } finally {
183
+ setIsProcessing(false);
184
+ }
185
+ };
186
+
187
+ const playMessageAudio = (b64: string) => {
188
+ if (audioContextRef.current) playPCMAudio(b64, audioContextRef.current);
189
+ };
190
+
191
+ return (
192
+ <div className="h-full flex flex-col bg-slate-50 overflow-hidden">
193
+ {/* Header Tabs */}
194
+ <div className="bg-white border-b border-gray-200 px-6 pt-4 flex gap-6 shrink-0 shadow-sm z-10">
195
+ <button onClick={() => setActiveTab('chat')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'chat' ? 'border-blue-500 text-blue-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
196
+ <Bot size={18} className={activeTab === 'chat' ? 'text-blue-500' : ''}/> AI 助教 (问答)
197
+ </button>
198
+ <button onClick={() => setActiveTab('assessment')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'assessment' ? 'border-purple-500 text-purple-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
199
+ <Mic size={18} className={activeTab === 'assessment' ? 'text-purple-500' : ''}/> 口语/背诵测评
200
+ </button>
201
+ </div>
202
+
203
+ {/* TAB: CHAT */}
204
+ {activeTab === 'chat' && (
205
+ <div className="flex-1 flex flex-col max-w-4xl mx-auto w-full h-full relative">
206
+ <div className="flex-1 overflow-y-auto p-4 space-y-4 pb-24 custom-scrollbar">
207
+ {messages.map(msg => (
208
+ <div key={msg.id} className={`flex gap-3 ${msg.role === 'user' ? 'flex-row-reverse' : ''}`}>
209
+ <div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 ${msg.role === 'model' ? 'bg-blue-100 text-blue-600' : 'bg-gray-200 text-gray-600'}`}>
210
+ {msg.role === 'model' ? <Sparkles size={20}/> : <Bot size={20}/>}
211
+ </div>
212
+ <div className={`max-w-[80%] p-3 rounded-2xl text-sm ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none shadow-sm'}`}>
213
+ {msg.text}
214
+ {msg.audio && (
215
+ <button onClick={() => playMessageAudio(msg.audio!)} className="mt-2 flex items-center gap-2 text-xs bg-blue-50 text-blue-600 px-3 py-1.5 rounded-full hover:bg-blue-100 border border-blue-100 transition-colors w-fit">
216
+ <Volume2 size={14}/> 播放语音
217
+ </button>
218
+ )}
219
+ </div>
220
+ </div>
221
+ ))}
222
+ {isProcessing && (
223
+ <div className="flex gap-3">
224
+ <div className="w-10 h-10 rounded-full bg-blue-100 text-blue-600 flex items-center justify-center">
225
+ <Loader2 className="animate-spin" size={20}/>
226
+ </div>
227
+ <div className="text-sm text-gray-400 self-center">AI 正在思考中...</div>
228
+ </div>
229
+ )}
230
+ </div>
231
+
232
+ <div className="p-4 bg-white border-t border-gray-200 shrink-0">
233
+ <div className="flex items-center gap-2 max-w-4xl mx-auto bg-gray-100 p-1.5 rounded-full border border-gray-200">
234
+ <button
235
+ onClick={() => setInputMode(inputMode === 'text' ? 'audio' : 'text')}
236
+ className="p-2 rounded-full hover:bg-white text-gray-500 transition-colors"
237
+ title={inputMode === 'text' ? '切换语音' : '切换文字'}
238
+ >
239
+ {inputMode === 'text' ? <Mic size={20}/> : <Square size={20}/>}
240
+ </button>
241
+
242
+ {inputMode === 'text' ? (
243
+ <input
244
+ className="flex-1 bg-transparent border-none outline-none px-2 text-sm"
245
+ placeholder="输入问题..."
246
+ value={textInput}
247
+ onChange={e => setTextInput(e.target.value)}
248
+ onKeyDown={e => e.key === 'Enter' && handleChatSubmit(textInput)}
249
+ />
250
+ ) : (
251
+ <div className="flex-1 text-center text-sm font-medium text-blue-600 animate-pulse">
252
+ {isRecording ? '正在录音... 点击停止' : '点击麦克风开始说话'}
253
+ </div>
254
+ )}
255
+
256
+ {inputMode === 'text' ? (
257
+ <button onClick={() => handleChatSubmit(textInput)} className="p-2 bg-blue-600 rounded-full text-white hover:bg-blue-700 disabled:opacity-50" disabled={!textInput.trim() || isProcessing}>
258
+ <Send size={18}/>
259
+ </button>
260
+ ) : (
261
+ <button
262
+ onMouseDown={startRecording}
263
+ onMouseUp={stopRecording}
264
+ // Touch support for mobile
265
+ onTouchStart={startRecording}
266
+ onTouchEnd={stopRecording}
267
+ className={`p-3 rounded-full text-white transition-all ${isRecording ? 'bg-red-500 scale-110 shadow-lg ring-4 ring-red-200' : 'bg-blue-600 hover:bg-blue-700'}`}
268
+ >
269
+ {isRecording ? <StopCircle size={20}/> : <Mic size={20}/>}
270
+ </button>
271
+ )}
272
+ </div>
273
+ <p className="text-center text-[10px] text-gray-400 mt-2">Gemini Flash-Lite 模型提供支持</p>
274
+ </div>
275
+ </div>
276
+ )}
277
+
278
+ {/* TAB: ASSESSMENT */}
279
+ {activeTab === 'assessment' && (
280
+ <div className="flex-1 p-6 overflow-y-auto">
281
+ <div className="max-w-3xl mx-auto space-y-6">
282
+ {/* Topic Card */}
283
+ <div className="bg-white p-6 rounded-2xl border border-purple-100 shadow-sm">
284
+ <h3 className="text-lg font-bold text-gray-800 mb-2 flex items-center">
285
+ <Brain className="mr-2 text-purple-600"/> 今日测评题目
286
+ </h3>
287
+ <textarea
288
+ className="w-full bg-purple-50/50 border border-purple-100 rounded-xl p-4 text-gray-700 font-medium text-lg resize-none focus:ring-2 focus:ring-purple-200 outline-none"
289
+ value={assessmentTopic}
290
+ onChange={e => setAssessmentTopic(e.target.value)}
291
+ rows={3}
292
+ />
293
+ <div className="mt-4 flex justify-center">
294
+ <button
295
+ onMouseDown={startRecording}
296
+ onMouseUp={stopRecording}
297
+ onTouchStart={startRecording}
298
+ onTouchEnd={stopRecording}
299
+ disabled={isProcessing}
300
+ className={`px-8 py-4 rounded-full font-bold text-white flex items-center gap-3 shadow-lg transition-all ${isRecording ? 'bg-red-500 scale-105' : 'bg-gradient-to-r from-purple-600 to-indigo-600 hover:shadow-purple-200 hover:scale-105'}`}
301
+ >
302
+ {isProcessing ? <Loader2 className="animate-spin"/> : (isRecording ? <StopCircle/> : <Mic/>)}
303
+ {isProcessing ? 'AI 正在评分...' : (isRecording ? '松开结束录音' : '按住开始回答')}
304
+ </button>
305
+ </div>
306
+ </div>
307
+
308
+ {/* Result Card */}
309
+ {assessmentResult && (
310
+ <div className="bg-white p-6 rounded-2xl border border-gray-200 shadow-lg animate-in slide-in-from-bottom-4">
311
+ <div className="flex items-center justify-between border-b border-gray-100 pb-4 mb-4">
312
+ <h3 className="font-bold text-xl text-gray-800">测评报告</h3>
313
+ <div className={`text-3xl font-black ${assessmentResult.score >= 80 ? 'text-green-500' : assessmentResult.score >= 60 ? 'text-yellow-500' : 'text-red-500'}`}>
314
+ {assessmentResult.score}<span className="text-sm text-gray-400 ml-1">分</span>
315
+ </div>
316
+ </div>
317
+
318
+ <div className="space-y-4">
319
+ <div className="bg-gray-50 p-4 rounded-xl">
320
+ <p className="text-xs font-bold text-gray-500 uppercase mb-1">AI 听到的内容 (识别文本)</p>
321
+ <p className="text-gray-700 leading-relaxed">{assessmentResult.transcription}</p>
322
+ </div>
323
+ <div>
324
+ <p className="text-xs font-bold text-gray-500 uppercase mb-2">AI 点评建议</p>
325
+ <div className="p-4 bg-purple-50 text-purple-900 rounded-xl border border-purple-100 text-sm leading-relaxed">
326
+ <Emoji symbol="👨‍🏫" className="mr-2"/>
327
+ {assessmentResult.feedback}
328
+ </div>
329
+ </div>
330
+ </div>
331
+ </div>
332
+ )}
333
+ </div>
334
+ </div>
335
+ )}
336
+ </div>
337
+ );
338
+ };
server.js CHANGED
@@ -7,6 +7,12 @@ const {
7
  WishModel, FeedbackModel
8
  } = require('./models');
9
 
 
 
 
 
 
 
10
  // ... (existing setup code, middleware, connectDB, helpers)
11
  const express = require('express');
12
  const mongoose = require('mongoose');
@@ -22,7 +28,7 @@ const MONGO_URI = 'mongodb+srv://dv890a:db8822723@chatpro.gw3v0v7.mongodb.net/ch
22
  const app = express();
23
  app.use(compression());
24
  app.use(cors());
25
- app.use(bodyParser.json({ limit: '10mb' }));
26
  app.use(express.static(path.join(__dirname, 'dist'), {
27
  setHeaders: (res, filePath) => {
28
  if (filePath.endsWith('.html')) {
@@ -33,6 +39,7 @@ app.use(express.static(path.join(__dirname, 'dist'), {
33
  }
34
  }));
35
 
 
36
  const InMemoryDB = { schools: [], users: [], isFallback: false };
37
  const connectDB = async () => {
38
  try {
@@ -93,8 +100,120 @@ const generateStudentNo = async () => {
93
  return `${year}${random}`;
94
  };
95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  // ... (Rest of Existing Routes) ...
97
  app.get('/api/classes/:className/teachers', async (req, res) => {
 
98
  const { className } = req.params;
99
  const schoolId = req.headers['x-school-id'];
100
  const normalize = (s) => (s || '').replace(/\s+/g, '');
@@ -451,4 +570,4 @@ app.delete('/api/attendance/calendar/:id', async (req, res) => { await SchoolCal
451
  app.post('/api/batch-delete', async (req, res) => { if(req.body.type==='student') await Student.deleteMany({_id:{$in:req.body.ids}}); if(req.body.type==='score') await Score.deleteMany({_id:{$in:req.body.ids}}); res.json({}); });
452
 
453
  app.get('*', (req, res) => { res.sendFile(path.join(__dirname, 'dist', 'index.html')); });
454
- app.listen(PORT, () => console.log(`🚀 Server running on port ${PORT}`));
 
7
  WishModel, FeedbackModel
8
  } = require('./models');
9
 
10
+ // Initialize Gemini
11
+ const { GoogleGenAI, Type, Modality } = require("@google/genai");
12
+ // Note: GoogleGenAI client is initialized inside requests to use process.env.API_KEY dynamically if needed,
13
+ // but here we follow standard pattern.
14
+ const ai = new GoogleGenAI({ apiKey: process.env.API_KEY });
15
+
16
  // ... (existing setup code, middleware, connectDB, helpers)
17
  const express = require('express');
18
  const mongoose = require('mongoose');
 
28
  const app = express();
29
  app.use(compression());
30
  app.use(cors());
31
+ app.use(bodyParser.json({ limit: '50mb' })); // Increased limit for audio
32
  app.use(express.static(path.join(__dirname, 'dist'), {
33
  setHeaders: (res, filePath) => {
34
  if (filePath.endsWith('.html')) {
 
39
  }
40
  }));
41
 
42
+ // ... (DB Connection and helpers remain the same) ...
43
  const InMemoryDB = { schools: [], users: [], isFallback: false };
44
  const connectDB = async () => {
45
  try {
 
100
  return `${year}${random}`;
101
  };
102
 
103
+ // --- NEW AI ROUTES ---
104
+
105
+ // Scenario 1: AI Chat (Audio In -> AI Think -> Text + Audio Out)
106
+ app.post('/api/ai/chat', async (req, res) => {
107
+ const { text, audio } = req.body; // audio is base64 string
108
+
109
+ try {
110
+ const parts = [];
111
+ if (audio) {
112
+ parts.push({
113
+ inlineData: {
114
+ mimeType: 'audio/wav', // Assuming browser sends WAV or compatible
115
+ data: audio
116
+ }
117
+ });
118
+ }
119
+ if (text) {
120
+ parts.push({ text: text });
121
+ }
122
+
123
+ if (parts.length === 0) return res.status(400).json({ error: 'No input provided' });
124
+
125
+ // Step 1: Thinking (Gemini Flash Lite for low latency)
126
+ const thinkingResponse = await ai.models.generateContent({
127
+ model: 'gemini-2.5-flash-lite',
128
+ contents: { parts: parts },
129
+ config: {
130
+ systemInstruction: "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。如果学生使用语音,你也应当在回答中体现出自然的口语风格。",
131
+ }
132
+ });
133
+
134
+ const answerText = thinkingResponse.text || "抱歉,我没有听清,请再说一遍。";
135
+
136
+ // Step 2: Speaking (Gemini TTS)
137
+ const ttsResponse = await ai.models.generateContent({
138
+ model: "gemini-2.5-flash-preview-tts",
139
+ contents: [{ parts: [{ text: answerText }] }],
140
+ config: {
141
+ responseModalities: [Modality.AUDIO],
142
+ speechConfig: {
143
+ voiceConfig: {
144
+ prebuiltVoiceConfig: { voiceName: 'Kore' }, // 'Puck', 'Charon', 'Kore', 'Fenrir', 'Zephyr'
145
+ },
146
+ },
147
+ },
148
+ });
149
+
150
+ const audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
151
+
152
+ res.json({
153
+ text: answerText,
154
+ audio: audioBytes // Base64 PCM data
155
+ });
156
+
157
+ } catch (e) {
158
+ console.error("AI Chat Error:", e);
159
+ res.status(500).json({ error: e.message });
160
+ }
161
+ });
162
+
163
+ // Scenario 2: Evaluation (Question + Audio Answer -> AI Score)
164
+ app.post('/api/ai/evaluate', async (req, res) => {
165
+ const { question, audio } = req.body;
166
+
167
+ if (!question || !audio) return res.status(400).json({ error: 'Missing question or audio' });
168
+
169
+ try {
170
+ // Evaluate using Multimodal capability
171
+ const response = await ai.models.generateContent({
172
+ model: 'gemini-2.5-flash-lite',
173
+ contents: {
174
+ parts: [
175
+ { text: `请作为一名严谨的口语考官,对学生的回答进行评分。
176
+ 题目是:${question}。
177
+ 学生的回答在音频中。
178
+ 请分析学生的:
179
+ 1. 内容准确性 (是否回答了问题)
180
+ 2. 语言表达 (流畅度、词汇)
181
+ 3. 情感/语调
182
+ 请返回 JSON 格式,包含 score (0-100), feedback (简短评语), transcription (你听到的内容)。` },
183
+ {
184
+ inlineData: {
185
+ mimeType: 'audio/wav',
186
+ data: audio
187
+ }
188
+ }
189
+ ]
190
+ },
191
+ config: {
192
+ responseMimeType: "application/json",
193
+ responseSchema: {
194
+ type: Type.OBJECT,
195
+ properties: {
196
+ score: { type: Type.NUMBER },
197
+ feedback: { type: Type.STRING },
198
+ transcription: { type: Type.STRING }
199
+ },
200
+ required: ["score", "feedback", "transcription"]
201
+ }
202
+ }
203
+ });
204
+
205
+ res.json(JSON.parse(response.text));
206
+
207
+ } catch (e) {
208
+ console.error("AI Eval Error:", e);
209
+ res.status(500).json({ error: e.message });
210
+ }
211
+ });
212
+
213
+
214
  // ... (Rest of Existing Routes) ...
215
  app.get('/api/classes/:className/teachers', async (req, res) => {
216
+ // ... existing code ...
217
  const { className } = req.params;
218
  const schoolId = req.headers['x-school-id'];
219
  const normalize = (s) => (s || '').replace(/\s+/g, '');
 
570
  app.post('/api/batch-delete', async (req, res) => { if(req.body.type==='student') await Student.deleteMany({_id:{$in:req.body.ids}}); if(req.body.type==='score') await Score.deleteMany({_id:{$in:req.body.ids}}); res.json({}); });
571
 
572
  app.get('*', (req, res) => { res.sendFile(path.join(__dirname, 'dist', 'index.html')); });
573
+ app.listen(PORT, () => console.log(`🚀 Server running on port ${PORT}`));
services/api.ts CHANGED
@@ -1,5 +1,6 @@
1
 
2
- import { User, ClassInfo, SystemConfig, Subject, School, Schedule, GameSession, StudentReward, LuckyDrawConfig, Attendance, LeaveRequest, AchievementConfig, SchoolCalendarEntry, TeacherExchangeConfig, Wish, Feedback } from '../types';
 
3
 
4
  const getBaseUrl = () => {
5
  let isProd = false;
@@ -267,5 +268,11 @@ export const api = {
267
  create: (data: Partial<Feedback>) => request('/feedback', { method: 'POST', body: JSON.stringify(data) }),
268
  update: (id: string, data: { status?: string, reply?: string }) => request(`/feedback/${id}`, { method: 'PUT', body: JSON.stringify(data) }),
269
  ignoreAll: (targetId: string) => request('/feedback/ignore-all', { method: 'POST', body: JSON.stringify({ targetId }) }),
 
 
 
 
 
 
270
  }
271
  };
 
1
 
2
+ // ... existing imports
3
+ import { User, ClassInfo, SystemConfig, Subject, School, Schedule, GameSession, StudentReward, LuckyDrawConfig, Attendance, LeaveRequest, AchievementConfig, SchoolCalendarEntry, TeacherExchangeConfig, Wish, Feedback, AIChatMessage } from '../types';
4
 
5
  const getBaseUrl = () => {
6
  let isProd = false;
 
268
  create: (data: Partial<Feedback>) => request('/feedback', { method: 'POST', body: JSON.stringify(data) }),
269
  update: (id: string, data: { status?: string, reply?: string }) => request(`/feedback/${id}`, { method: 'PUT', body: JSON.stringify(data) }),
270
  ignoreAll: (targetId: string) => request('/feedback/ignore-all', { method: 'POST', body: JSON.stringify({ targetId }) }),
271
+ },
272
+
273
+ // NEW: AI Endpoints
274
+ ai: {
275
+ chat: (data: { text?: string, audio?: string }) => request('/ai/chat', { method: 'POST', body: JSON.stringify(data) }),
276
+ evaluate: (data: { question: string, audio: string }) => request('/ai/evaluate', { method: 'POST', body: JSON.stringify(data) }),
277
  }
278
  };
types.ts CHANGED
@@ -3,8 +3,29 @@
3
 
4
  // ... existing types
5
 
6
- // New Types for Wish Tree and Feedback Wall
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  export type WishStatus = 'PENDING' | 'FULFILLED';
9
 
10
  export interface Wish {
 
3
 
4
  // ... existing types
5
 
6
+ // AI Assistant Types
7
 
8
+ export interface AIChatMessage {
9
+ id: string;
10
+ role: 'user' | 'model';
11
+ text?: string;
12
+ audio?: string; // Base64 encoded audio for playback
13
+ isAudioMessage?: boolean; // Was the input/output audio based?
14
+ timestamp: number;
15
+ }
16
+
17
+ export interface OralAssessment {
18
+ id: string;
19
+ topic: string;
20
+ difficulty: string;
21
+ studentAudio?: string;
22
+ score?: number;
23
+ feedback?: string;
24
+ transcription?: string; // What the AI heard
25
+ status: 'IDLE' | 'RECORDING' | 'ANALYZING' | 'COMPLETED';
26
+ }
27
+
28
+ // ... rest of the file
29
  export type WishStatus = 'PENDING' | 'FULFILLED';
30
 
31
  export interface Wish {