Spaces:
Sleeping
Sleeping
Upload 62 files
Browse files- ai-routes.js +290 -163
- components/ai/AdminPanel.tsx +63 -39
- components/ai/AssessmentPanel.tsx +82 -123
- components/ai/ChatPanel.tsx +77 -160
- models.js +0 -1
- server.js +8 -5
ai-routes.js
CHANGED
|
@@ -4,7 +4,19 @@ const router = express.Router();
|
|
| 4 |
const OpenAI = require('openai');
|
| 5 |
const { ConfigModel, User, AIUsageModel } = require('./models');
|
| 6 |
|
| 7 |
-
//
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
async function recordUsage(model, provider) {
|
| 9 |
try {
|
| 10 |
const today = new Date().toISOString().split('T')[0];
|
|
@@ -13,72 +25,48 @@ async function recordUsage(model, provider) {
|
|
| 13 |
} catch (e) { console.error("Failed to record AI usage stats:", e); }
|
| 14 |
}
|
| 15 |
|
| 16 |
-
|
| 17 |
-
async function
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
try {
|
| 26 |
-
console.log("[AI] 🎤 Using Hugging Face ASR (Whisper v3)...");
|
| 27 |
-
const buffer = Buffer.from(audioBase64, 'base64');
|
| 28 |
-
|
| 29 |
-
const response = await fetch(
|
| 30 |
-
"https://api-inference.huggingface.co/models/openai/whisper-large-v3",
|
| 31 |
-
{
|
| 32 |
-
headers: {
|
| 33 |
-
Authorization: `Bearer ${token}`,
|
| 34 |
-
},
|
| 35 |
-
method: "POST",
|
| 36 |
-
body: buffer,
|
| 37 |
-
}
|
| 38 |
-
);
|
| 39 |
-
|
| 40 |
-
if (!response.ok) {
|
| 41 |
-
const errText = await response.text();
|
| 42 |
-
if (response.status === 503) {
|
| 43 |
-
console.log("[AI] HF Model loading, retrying...");
|
| 44 |
-
await new Promise(r => setTimeout(r, 3000));
|
| 45 |
-
return transcribeAudioWithHF(audioBase64);
|
| 46 |
-
}
|
| 47 |
-
throw new Error(`HF API Error: ${response.status} ${errText}`);
|
| 48 |
}
|
| 49 |
-
|
| 50 |
-
const result = await response.json();
|
| 51 |
-
console.log("[AI] HF Transcribed:", result.text);
|
| 52 |
-
return result.text;
|
| 53 |
-
} catch (e) {
|
| 54 |
-
console.error("[AI] HF STT Failed:", e.message);
|
| 55 |
-
return null;
|
| 56 |
}
|
| 57 |
}
|
| 58 |
|
| 59 |
function convertGeminiToOpenAI(baseParams) {
|
| 60 |
const messages = [];
|
| 61 |
if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
|
|
|
|
|
|
|
| 62 |
let contents = baseParams.contents;
|
| 63 |
-
if (contents && !Array.isArray(contents))
|
|
|
|
|
|
|
| 64 |
|
| 65 |
if (contents && Array.isArray(contents)) {
|
| 66 |
contents.forEach(content => {
|
|
|
|
| 67 |
let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
const messageContent = [];
|
| 69 |
if (content.parts) {
|
| 70 |
content.parts.forEach(p => {
|
| 71 |
if (p.text) messageContent.push({ type: 'text', text: p.text });
|
| 72 |
-
else if (p.inlineData) {
|
| 73 |
-
|
| 74 |
-
messageContent.push({ type: 'image_url', image_url: { url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}` } });
|
| 75 |
-
} else if (p.inlineData.mimeType.startsWith('audio/')) {
|
| 76 |
-
messageContent.push({ type: 'audio_base64', data: p.inlineData.data });
|
| 77 |
-
}
|
| 78 |
}
|
| 79 |
});
|
| 80 |
}
|
|
|
|
| 81 |
if (messageContent.length > 0) {
|
|
|
|
| 82 |
if (messageContent.length === 1 && messageContent[0].type === 'text') {
|
| 83 |
messages.push({ role: role, content: messageContent[0].text });
|
| 84 |
} else {
|
|
@@ -93,11 +81,17 @@ function convertGeminiToOpenAI(baseParams) {
|
|
| 93 |
const PROVIDERS = { GEMINI: 'GEMINI', OPENROUTER: 'OPENROUTER', GEMMA: 'GEMMA' };
|
| 94 |
const DEFAULT_OPENROUTER_MODELS = ['qwen/qwen3-coder:free', 'openai/gpt-oss-120b:free', 'qwen/qwen3-235b-a22b:free', 'tngtech/deepseek-r1t-chimera:free'];
|
| 95 |
|
|
|
|
| 96 |
let runtimeProviderOrder = [];
|
| 97 |
|
| 98 |
function deprioritizeProvider(providerName) {
|
|
|
|
| 99 |
if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
|
|
|
|
|
|
|
|
|
|
| 100 |
runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
|
|
|
|
| 101 |
}
|
| 102 |
|
| 103 |
function isQuotaError(e) {
|
|
@@ -105,121 +99,208 @@ function isQuotaError(e) {
|
|
| 105 |
return e.status === 429 || e.status === 503 || msg.includes('quota') || msg.includes('overloaded') || msg.includes('resource_exhausted') || msg.includes('rate limit') || msg.includes('credits');
|
| 106 |
}
|
| 107 |
|
|
|
|
| 108 |
async function streamGemini(baseParams, res) {
|
| 109 |
const { GoogleGenAI } = await import("@google/genai");
|
| 110 |
-
|
| 111 |
-
const
|
| 112 |
-
if (
|
| 113 |
|
| 114 |
-
const
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
}
|
| 127 |
}
|
| 128 |
-
return fullText;
|
| 129 |
-
} catch (e) {
|
| 130 |
-
throw e;
|
| 131 |
}
|
|
|
|
| 132 |
}
|
| 133 |
|
| 134 |
async function streamOpenRouter(baseParams, res) {
|
| 135 |
const config = await ConfigModel.findOne({ key: 'main' });
|
| 136 |
const models = (config && config.openRouterModels?.length) ? config.openRouterModels.map(m => m.id) : DEFAULT_OPENROUTER_MODELS;
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
if (!apiKey) throw new Error("OPENROUTER_API_KEY environment variable is not configured.");
|
| 141 |
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
if (text) {
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
} else {
|
| 152 |
-
throw new Error("语音转文字失败 (请检查 HF_TOKEN)");
|
| 153 |
}
|
| 154 |
}
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
|
| 162 |
-
const client = new OpenAI({ baseURL, apiKey });
|
| 163 |
-
try {
|
| 164 |
-
const stream = await client.chat.completions.create({ model: modelName, messages, stream: true });
|
| 165 |
-
recordUsage(modelName, PROVIDERS.OPENROUTER);
|
| 166 |
-
let fullText = '';
|
| 167 |
-
for await (const chunk of stream) {
|
| 168 |
-
const text = chunk.choices[0]?.delta?.content || '';
|
| 169 |
-
if (text) {
|
| 170 |
-
fullText += text;
|
| 171 |
-
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
| 172 |
}
|
|
|
|
| 173 |
}
|
| 174 |
-
|
| 175 |
-
} catch (e) { if (isQuotaError(e)) break; }
|
| 176 |
}
|
| 177 |
-
throw new Error("OpenRouter
|
| 178 |
}
|
| 179 |
|
| 180 |
async function streamGemma(baseParams, res) {
|
| 181 |
const { GoogleGenAI } = await import("@google/genai");
|
| 182 |
-
const apiKey = process.env.API_KEY;
|
| 183 |
-
if (!apiKey) throw new Error("API_KEY environment variable is not configured.");
|
| 184 |
-
|
| 185 |
const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
|
| 186 |
-
const
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
}
|
| 199 |
-
|
| 200 |
-
} catch (e) { if (isQuotaError(e)) continue; }
|
| 201 |
}
|
| 202 |
-
throw new Error("Gemma failed");
|
| 203 |
}
|
| 204 |
|
| 205 |
async function streamContentWithSmartFallback(baseParams, res) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
const config = await ConfigModel.findOne({ key: 'main' });
|
| 207 |
-
const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
|
| 208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
let finalError = null;
|
|
|
|
|
|
|
| 211 |
for (const provider of runtimeProviderOrder) {
|
| 212 |
try {
|
|
|
|
|
|
|
| 213 |
if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
|
| 214 |
else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
|
| 215 |
else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
|
|
|
|
| 216 |
} catch (e) {
|
|
|
|
| 217 |
finalError = e;
|
| 218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
continue;
|
| 220 |
}
|
| 221 |
}
|
| 222 |
-
|
|
|
|
|
|
|
| 223 |
}
|
| 224 |
|
| 225 |
const checkAIAccess = async (req, res, next) => {
|
|
@@ -234,11 +315,14 @@ const checkAIAccess = async (req, res, next) => {
|
|
| 234 |
next();
|
| 235 |
};
|
| 236 |
|
|
|
|
| 237 |
router.get('/live-access', checkAIAccess, async (req, res) => {
|
| 238 |
try {
|
| 239 |
-
const
|
| 240 |
-
if (
|
| 241 |
-
|
|
|
|
|
|
|
| 242 |
} catch (e) { res.status(500).json({ error: e.message }); }
|
| 243 |
});
|
| 244 |
|
|
@@ -260,7 +344,11 @@ router.get('/stats', checkAIAccess, async (req, res) => {
|
|
| 260 |
} catch (e) { res.status(500).json({ error: e.message }); }
|
| 261 |
});
|
| 262 |
|
| 263 |
-
router.post('/reset-pool', checkAIAccess, (req, res) => {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
|
| 265 |
router.post('/chat', checkAIAccess, async (req, res) => {
|
| 266 |
const { text, audio, history } = req.body;
|
|
@@ -281,34 +369,38 @@ router.post('/chat', checkAIAccess, async (req, res) => {
|
|
| 281 |
|
| 282 |
const answerText = await streamContentWithSmartFallback({
|
| 283 |
contents: fullContents,
|
| 284 |
-
config: { systemInstruction: "
|
| 285 |
}, res);
|
| 286 |
|
| 287 |
if (answerText) {
|
| 288 |
try {
|
| 289 |
const { GoogleGenAI } = await import("@google/genai");
|
| 290 |
-
const
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
}
|
|
|
|
|
|
|
| 304 |
} catch (ttsError) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
|
| 305 |
}
|
| 306 |
res.write('data: [DONE]\n\n'); res.end();
|
| 307 |
} catch (e) {
|
|
|
|
| 308 |
res.write(`data: ${JSON.stringify({ error: true, message: e.message })}\n\n`); res.end();
|
| 309 |
}
|
| 310 |
});
|
| 311 |
|
|
|
|
| 312 |
router.post('/evaluate', checkAIAccess, async (req, res) => {
|
| 313 |
const { question, audio, image, images } = req.body;
|
| 314 |
res.setHeader('Content-Type', 'text/event-stream');
|
|
@@ -318,46 +410,81 @@ router.post('/evaluate', checkAIAccess, async (req, res) => {
|
|
| 318 |
|
| 319 |
try {
|
| 320 |
res.write(`data: ${JSON.stringify({ status: 'analyzing' })}\n\n`);
|
| 321 |
-
const evalParts = [{ text: `请对学生的回答评分。题目:${question}。` }];
|
| 322 |
-
if (audio) evalParts.push({ inlineData: { mimeType: 'audio/webm', data: audio } });
|
| 323 |
-
if (images && Array.isArray(images)) images.forEach(img => { if(img) evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: img } }); });
|
| 324 |
-
else if (image) evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
|
| 325 |
|
| 326 |
-
evalParts
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
## Transcription
|
| 328 |
-
(
|
|
|
|
| 329 |
## Feedback
|
| 330 |
-
(
|
|
|
|
| 331 |
## Score
|
| 332 |
-
(0-100
|
| 333 |
|
| 334 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 335 |
const feedbackMatch = fullText.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i);
|
| 336 |
const feedbackText = feedbackMatch ? feedbackMatch[1].trim() : "";
|
|
|
|
|
|
|
| 337 |
if (feedbackText) {
|
| 338 |
res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
|
| 339 |
try {
|
| 340 |
const { GoogleGenAI } = await import("@google/genai");
|
| 341 |
-
const
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
}
|
|
|
|
|
|
|
| 355 |
} catch (ttsErr) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
|
| 356 |
}
|
| 357 |
-
|
|
|
|
|
|
|
|
|
|
| 358 |
} catch (e) {
|
| 359 |
-
|
|
|
|
|
|
|
| 360 |
}
|
| 361 |
});
|
| 362 |
|
| 363 |
-
module.exports = router;
|
|
|
|
| 4 |
const OpenAI = require('openai');
|
| 5 |
const { ConfigModel, User, AIUsageModel } = require('./models');
|
| 6 |
|
| 7 |
+
// ... (Key Management, Usage Tracking, Helpers, Provider Management functions remain same as before)
|
| 8 |
+
// Fetch keys from DB + merge with ENV variables
|
| 9 |
+
async function getKeyPool(type) {
|
| 10 |
+
const config = await ConfigModel.findOne({ key: 'main' });
|
| 11 |
+
const pool = [];
|
| 12 |
+
if (config && config.apiKeys && config.apiKeys[type] && Array.isArray(config.apiKeys[type])) {
|
| 13 |
+
config.apiKeys[type].forEach(k => { if (k && k.trim()) pool.push(k.trim()); });
|
| 14 |
+
}
|
| 15 |
+
if (type === 'gemini' && process.env.API_KEY && !pool.includes(process.env.API_KEY)) pool.push(process.env.API_KEY);
|
| 16 |
+
if (type === 'openrouter' && process.env.OPENROUTER_API_KEY && !pool.includes(process.env.OPENROUTER_API_KEY)) pool.push(process.env.OPENROUTER_API_KEY);
|
| 17 |
+
return pool;
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
async function recordUsage(model, provider) {
|
| 21 |
try {
|
| 22 |
const today = new Date().toISOString().split('T')[0];
|
|
|
|
| 25 |
} catch (e) { console.error("Failed to record AI usage stats:", e); }
|
| 26 |
}
|
| 27 |
|
| 28 |
+
const wait = (ms) => new Promise(resolve => setTimeout(resolve, ms));
|
| 29 |
+
async function callAIWithRetry(aiModelCall, retries = 1) {
|
| 30 |
+
for (let i = 0; i < retries; i++) {
|
| 31 |
+
try { return await aiModelCall(); }
|
| 32 |
+
catch (e) {
|
| 33 |
+
if (e.status === 400 || e.status === 401 || e.status === 403) throw e;
|
| 34 |
+
if (i < retries - 1) { await wait(1000 * Math.pow(2, i)); continue; }
|
| 35 |
+
throw e;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
}
|
| 38 |
}
|
| 39 |
|
| 40 |
function convertGeminiToOpenAI(baseParams) {
|
| 41 |
const messages = [];
|
| 42 |
if (baseParams.config?.systemInstruction) messages.push({ role: 'system', content: baseParams.config.systemInstruction });
|
| 43 |
+
|
| 44 |
+
// Normalize contents to array if it's a single object (Gemini allows shorthand, OpenAI/Middleware needs array)
|
| 45 |
let contents = baseParams.contents;
|
| 46 |
+
if (contents && !Array.isArray(contents)) {
|
| 47 |
+
contents = [contents];
|
| 48 |
+
}
|
| 49 |
|
| 50 |
if (contents && Array.isArray(contents)) {
|
| 51 |
contents.forEach(content => {
|
| 52 |
+
// Default to user role if not specified (common in short-hand calls)
|
| 53 |
let role = (content.role === 'model' || content.role === 'assistant') ? 'assistant' : 'user';
|
| 54 |
+
|
| 55 |
+
// Handle simple text shorthand if parts is missing but text exists (rare but possible in some SDK versions)
|
| 56 |
+
// But standard Gemini is { parts: [...] }
|
| 57 |
+
|
| 58 |
const messageContent = [];
|
| 59 |
if (content.parts) {
|
| 60 |
content.parts.forEach(p => {
|
| 61 |
if (p.text) messageContent.push({ type: 'text', text: p.text });
|
| 62 |
+
else if (p.inlineData && p.inlineData.mimeType.startsWith('image/')) {
|
| 63 |
+
messageContent.push({ type: 'image_url', image_url: { url: `data:${p.inlineData.mimeType};base64,${p.inlineData.data}` } });
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
}
|
| 65 |
});
|
| 66 |
}
|
| 67 |
+
|
| 68 |
if (messageContent.length > 0) {
|
| 69 |
+
// If only one text part, send as string (cleaner for some weaker models)
|
| 70 |
if (messageContent.length === 1 && messageContent[0].type === 'text') {
|
| 71 |
messages.push({ role: role, content: messageContent[0].text });
|
| 72 |
} else {
|
|
|
|
| 81 |
const PROVIDERS = { GEMINI: 'GEMINI', OPENROUTER: 'OPENROUTER', GEMMA: 'GEMMA' };
|
| 82 |
const DEFAULT_OPENROUTER_MODELS = ['qwen/qwen3-coder:free', 'openai/gpt-oss-120b:free', 'qwen/qwen3-235b-a22b:free', 'tngtech/deepseek-r1t-chimera:free'];
|
| 83 |
|
| 84 |
+
// Runtime override logic
|
| 85 |
let runtimeProviderOrder = [];
|
| 86 |
|
| 87 |
function deprioritizeProvider(providerName) {
|
| 88 |
+
// If the provider is already last, do nothing
|
| 89 |
if (runtimeProviderOrder.length > 0 && runtimeProviderOrder[runtimeProviderOrder.length - 1] === providerName) return;
|
| 90 |
+
|
| 91 |
+
console.log(`[AI System] ⚠️ Deprioritizing ${providerName} due to errors. Moving to end of queue.`);
|
| 92 |
+
// Move to end
|
| 93 |
runtimeProviderOrder = runtimeProviderOrder.filter(p => p !== providerName).concat(providerName);
|
| 94 |
+
console.log(`[AI System] 🔄 New Priority Order: ${runtimeProviderOrder.join(' -> ')}`);
|
| 95 |
}
|
| 96 |
|
| 97 |
function isQuotaError(e) {
|
|
|
|
| 99 |
return e.status === 429 || e.status === 503 || msg.includes('quota') || msg.includes('overloaded') || msg.includes('resource_exhausted') || msg.includes('rate limit') || msg.includes('credits');
|
| 100 |
}
|
| 101 |
|
| 102 |
+
// Streaming Helpers
|
| 103 |
async function streamGemini(baseParams, res) {
|
| 104 |
const { GoogleGenAI } = await import("@google/genai");
|
| 105 |
+
const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
|
| 106 |
+
const keys = await getKeyPool('gemini');
|
| 107 |
+
if (keys.length === 0) throw new Error("No Gemini API keys");
|
| 108 |
|
| 109 |
+
for (const apiKey of keys) {
|
| 110 |
+
const client = new GoogleGenAI({ apiKey });
|
| 111 |
+
for (const modelName of models) {
|
| 112 |
+
try {
|
| 113 |
+
console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
|
| 114 |
+
const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
|
| 115 |
+
|
| 116 |
+
// First chunk check usually determines connection success
|
| 117 |
+
let hasStarted = false;
|
| 118 |
+
let fullText = "";
|
| 119 |
+
|
| 120 |
+
for await (const chunk of result) {
|
| 121 |
+
if (!hasStarted) {
|
| 122 |
+
console.log(`[AI] ✅ Connected to Gemini: ${modelName}`);
|
| 123 |
+
recordUsage(modelName, PROVIDERS.GEMINI);
|
| 124 |
+
hasStarted = true;
|
| 125 |
+
}
|
| 126 |
+
if (chunk.text) {
|
| 127 |
+
fullText += chunk.text;
|
| 128 |
+
res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
|
| 129 |
+
if (res.flush) res.flush();
|
| 130 |
+
}
|
| 131 |
+
}
|
| 132 |
+
return fullText;
|
| 133 |
+
} catch (e) {
|
| 134 |
+
console.warn(`[AI] ⚠️ Gemini ${modelName} Error: ${e.message}`);
|
| 135 |
+
if (isQuotaError(e)) {
|
| 136 |
+
console.log(`[AI] 🔄 Quota exceeded for ${modelName}, trying next...`);
|
| 137 |
+
continue; // Try next model or key
|
| 138 |
+
}
|
| 139 |
+
throw e; // Non-quota errors bubble up to switch provider
|
| 140 |
}
|
| 141 |
}
|
|
|
|
|
|
|
|
|
|
| 142 |
}
|
| 143 |
+
throw new Error("Gemini streaming failed (All keys/models exhausted)");
|
| 144 |
}
|
| 145 |
|
| 146 |
async function streamOpenRouter(baseParams, res) {
|
| 147 |
const config = await ConfigModel.findOne({ key: 'main' });
|
| 148 |
const models = (config && config.openRouterModels?.length) ? config.openRouterModels.map(m => m.id) : DEFAULT_OPENROUTER_MODELS;
|
| 149 |
+
const messages = convertGeminiToOpenAI(baseParams);
|
| 150 |
+
const keys = await getKeyPool('openrouter');
|
| 151 |
+
if (keys.length === 0) throw new Error("No OpenRouter API keys");
|
|
|
|
| 152 |
|
| 153 |
+
if (messages.length === 0) {
|
| 154 |
+
throw new Error("Conversion resulted in empty messages array. Check input format.");
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
for (const apiKey of keys) {
|
| 158 |
+
for (const modelName of models) {
|
| 159 |
+
// Find specific model config to check for custom URL
|
| 160 |
+
const modelConfig = config?.openRouterModels?.find(m => m.id === modelName);
|
| 161 |
+
const baseURL = modelConfig?.apiUrl ? modelConfig.apiUrl : "https://openrouter.ai/api/v1";
|
| 162 |
+
const providerLabel = modelConfig?.apiUrl ? 'Custom API' : 'OpenRouter';
|
| 163 |
+
|
| 164 |
+
const client = new OpenAI({ baseURL, apiKey, defaultHeaders: { "HTTP-Referer": "https://smart.com", "X-Title": "Smart School" } });
|
| 165 |
+
|
| 166 |
+
try {
|
| 167 |
+
console.log(`[AI] 🚀 Attempting ${providerLabel} Model: ${modelName} (URL: ${baseURL})`);
|
| 168 |
+
// console.log(`[AI] Payload Messages:`, JSON.stringify(messages).substring(0, 200) + "..."); // Debug log
|
| 169 |
+
|
| 170 |
+
const stream = await client.chat.completions.create({ model: modelName, messages, stream: true });
|
| 171 |
+
|
| 172 |
+
console.log(`[AI] ✅ Connected to ${providerLabel}: ${modelName}`);
|
| 173 |
+
recordUsage(modelName, PROVIDERS.OPENROUTER);
|
| 174 |
+
|
| 175 |
+
let fullText = '';
|
| 176 |
+
for await (const chunk of stream) {
|
| 177 |
+
const text = chunk.choices[0]?.delta?.content || '';
|
| 178 |
if (text) {
|
| 179 |
+
fullText += text;
|
| 180 |
+
res.write(`data: ${JSON.stringify({ text: text })}\n\n`);
|
| 181 |
+
if (res.flush) res.flush();
|
|
|
|
|
|
|
| 182 |
}
|
| 183 |
}
|
| 184 |
+
return fullText;
|
| 185 |
+
} catch (e) {
|
| 186 |
+
console.warn(`[AI] ⚠️ ${providerLabel} ${modelName} Error: ${e.message}`);
|
| 187 |
+
if (isQuotaError(e)) {
|
| 188 |
+
console.log(`[AI] 🔄 Rate limit/Quota for ${modelName}, switching...`);
|
| 189 |
+
break; // Switch to next provider/model logic if implemented in loop, here break to next model in loop
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
}
|
| 191 |
+
// For OpenAI client, some errors might be specific
|
| 192 |
}
|
| 193 |
+
}
|
|
|
|
| 194 |
}
|
| 195 |
+
throw new Error("OpenRouter/Custom stream failed (All models exhausted)");
|
| 196 |
}
|
| 197 |
|
| 198 |
async function streamGemma(baseParams, res) {
|
| 199 |
const { GoogleGenAI } = await import("@google/genai");
|
|
|
|
|
|
|
|
|
|
| 200 |
const models = ['gemma-3-27b-it', 'gemma-3-12b-it'];
|
| 201 |
+
const keys = await getKeyPool('gemini'); // Gemma uses Gemini keys
|
| 202 |
+
if (keys.length === 0) throw new Error("No keys for Gemma");
|
| 203 |
+
|
| 204 |
+
for (const apiKey of keys) {
|
| 205 |
+
const client = new GoogleGenAI({ apiKey });
|
| 206 |
+
for (const modelName of models) {
|
| 207 |
+
try {
|
| 208 |
+
console.log(`[AI] 🚀 Attempting Gemma Model: ${modelName}`);
|
| 209 |
+
const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
|
| 210 |
+
|
| 211 |
+
let hasStarted = false;
|
| 212 |
+
let fullText = "";
|
| 213 |
+
for await (const chunk of result) {
|
| 214 |
+
if (!hasStarted) {
|
| 215 |
+
console.log(`[AI] ✅ Connected to Gemma: ${modelName}`);
|
| 216 |
+
recordUsage(modelName, PROVIDERS.GEMMA);
|
| 217 |
+
hasStarted = true;
|
| 218 |
+
}
|
| 219 |
+
if (chunk.text) {
|
| 220 |
+
fullText += chunk.text;
|
| 221 |
+
res.write(`data: ${JSON.stringify({ text: chunk.text })}\n\n`);
|
| 222 |
+
if (res.flush) res.flush();
|
| 223 |
+
}
|
| 224 |
}
|
| 225 |
+
return fullText;
|
| 226 |
+
} catch (e) {
|
| 227 |
+
console.warn(`[AI] ⚠️ Gemma ${modelName} Error: ${e.message}`);
|
| 228 |
+
if (isQuotaError(e)) continue;
|
| 229 |
}
|
| 230 |
+
}
|
|
|
|
| 231 |
}
|
| 232 |
+
throw new Error("Gemma stream failed");
|
| 233 |
}
|
| 234 |
|
| 235 |
async function streamContentWithSmartFallback(baseParams, res) {
|
| 236 |
+
let hasAudio = false;
|
| 237 |
+
|
| 238 |
+
// Check if contents is array or object, handle accordingly
|
| 239 |
+
const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
|
| 240 |
+
|
| 241 |
+
contentsArray.forEach(c => {
|
| 242 |
+
if (c && c.parts) {
|
| 243 |
+
c.parts.forEach(p => { if (p.inlineData && p.inlineData.mimeType.startsWith('audio/')) hasAudio = true; });
|
| 244 |
+
}
|
| 245 |
+
});
|
| 246 |
+
|
| 247 |
+
// Audio input currently forces Gemini
|
| 248 |
+
if (hasAudio) {
|
| 249 |
+
try {
|
| 250 |
+
console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
|
| 251 |
+
return await streamGemini(baseParams, res);
|
| 252 |
+
} catch(e) {
|
| 253 |
+
console.error(`[AI] ❌ Audio Processing Failed: ${e.message}`);
|
| 254 |
+
deprioritizeProvider(PROVIDERS.GEMINI);
|
| 255 |
+
throw new Error('QUOTA_EXCEEDED_AUDIO');
|
| 256 |
+
}
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
// FETCH CONFIG AND SET PROVIDER ORDER
|
| 260 |
const config = await ConfigModel.findOne({ key: 'main' });
|
| 261 |
+
const configuredOrder = config?.aiProviderOrder && config.aiProviderOrder.length > 0
|
| 262 |
+
? config.aiProviderOrder
|
| 263 |
+
: [PROVIDERS.GEMINI, PROVIDERS.OPENROUTER, PROVIDERS.GEMMA];
|
| 264 |
+
|
| 265 |
+
// If runtime order is empty or contains different elements (e.g. config changed), reset it
|
| 266 |
+
const runtimeSet = new Set(runtimeProviderOrder);
|
| 267 |
+
const configSet = new Set(configuredOrder);
|
| 268 |
+
if (runtimeProviderOrder.length === 0 || runtimeProviderOrder.length !== configuredOrder.length || !configuredOrder.every(p => runtimeSet.has(p))) {
|
| 269 |
+
console.log(`[AI] 📋 Initializing Provider Order: ${configuredOrder.join(' -> ')}`);
|
| 270 |
+
runtimeProviderOrder = [...configuredOrder];
|
| 271 |
+
} else {
|
| 272 |
+
console.log(`[AI] 📋 Current Provider Priority: ${runtimeProviderOrder.join(' -> ')}`);
|
| 273 |
+
}
|
| 274 |
|
| 275 |
let finalError = null;
|
| 276 |
+
|
| 277 |
+
// Use runtimeProviderOrder which might have been adjusted due to quota errors in previous calls
|
| 278 |
for (const provider of runtimeProviderOrder) {
|
| 279 |
try {
|
| 280 |
+
console.log(`[AI] 👉 Trying Provider: ${provider}...`);
|
| 281 |
+
|
| 282 |
if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
|
| 283 |
else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
|
| 284 |
else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
|
| 285 |
+
|
| 286 |
} catch (e) {
|
| 287 |
+
console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
|
| 288 |
finalError = e;
|
| 289 |
+
|
| 290 |
+
if (isQuotaError(e)) {
|
| 291 |
+
console.log(`[AI] 📉 Quota/Rate Limit detected. Switching provider...`);
|
| 292 |
+
deprioritizeProvider(provider);
|
| 293 |
+
continue;
|
| 294 |
+
}
|
| 295 |
+
// If it's a hard error (e.g. network), we might also want to switch,
|
| 296 |
+
// but strict quota error check usually suffices for fallback logic.
|
| 297 |
+
// For robustness, let's allow fallback on most errors in the loop:
|
| 298 |
continue;
|
| 299 |
}
|
| 300 |
}
|
| 301 |
+
|
| 302 |
+
console.error(`[AI] 💀 All providers failed.`);
|
| 303 |
+
throw finalError || new Error('All streaming models unavailable.');
|
| 304 |
}
|
| 305 |
|
| 306 |
const checkAIAccess = async (req, res, next) => {
|
|
|
|
| 315 |
next();
|
| 316 |
};
|
| 317 |
|
| 318 |
+
// NEW: Endpoint to provide a temporary key for Client-Side Live API
|
| 319 |
router.get('/live-access', checkAIAccess, async (req, res) => {
|
| 320 |
try {
|
| 321 |
+
const keys = await getKeyPool('gemini');
|
| 322 |
+
if (keys.length === 0) return res.status(503).json({ error: 'No API keys available' });
|
| 323 |
+
// Return the first available key. In a real prod environment, you might issue a short-lived proxy token.
|
| 324 |
+
// For this architecture, we return the key to allow direct WebSocket connection.
|
| 325 |
+
res.json({ key: keys[0] });
|
| 326 |
} catch (e) { res.status(500).json({ error: e.message }); }
|
| 327 |
});
|
| 328 |
|
|
|
|
| 344 |
} catch (e) { res.status(500).json({ error: e.message }); }
|
| 345 |
});
|
| 346 |
|
| 347 |
+
router.post('/reset-pool', checkAIAccess, (req, res) => {
|
| 348 |
+
runtimeProviderOrder = []; // Will be re-initialized from DB on next call
|
| 349 |
+
console.log('[AI] 🔄 Provider priority pool reset.');
|
| 350 |
+
res.json({ success: true });
|
| 351 |
+
});
|
| 352 |
|
| 353 |
router.post('/chat', checkAIAccess, async (req, res) => {
|
| 354 |
const { text, audio, history } = req.body;
|
|
|
|
| 369 |
|
| 370 |
const answerText = await streamContentWithSmartFallback({
|
| 371 |
contents: fullContents,
|
| 372 |
+
config: { systemInstruction: "你是一位友善、耐心且知识渊博的中小学AI助教。请用简洁、鼓励性的语言回答学生的问题。回复支持 Markdown 格式。" }
|
| 373 |
}, res);
|
| 374 |
|
| 375 |
if (answerText) {
|
| 376 |
try {
|
| 377 |
const { GoogleGenAI } = await import("@google/genai");
|
| 378 |
+
const keys = await getKeyPool('gemini');
|
| 379 |
+
let audioBytes = null;
|
| 380 |
+
for (const apiKey of keys) {
|
| 381 |
+
try {
|
| 382 |
+
const client = new GoogleGenAI({ apiKey });
|
| 383 |
+
const ttsResponse = await client.models.generateContent({
|
| 384 |
+
model: "gemini-2.5-flash-preview-tts",
|
| 385 |
+
contents: [{ parts: [{ text: answerText }] }],
|
| 386 |
+
config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
|
| 387 |
+
});
|
| 388 |
+
audioBytes = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
|
| 389 |
+
if (audioBytes) break;
|
| 390 |
+
} catch(e) { if (isQuotaError(e)) continue; break; }
|
| 391 |
}
|
| 392 |
+
if (audioBytes) res.write(`data: ${JSON.stringify({ audio: audioBytes })}\n\n`);
|
| 393 |
+
else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
|
| 394 |
} catch (ttsError) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
|
| 395 |
}
|
| 396 |
res.write('data: [DONE]\n\n'); res.end();
|
| 397 |
} catch (e) {
|
| 398 |
+
console.error("[AI Chat Route Error]", e);
|
| 399 |
res.write(`data: ${JSON.stringify({ error: true, message: e.message })}\n\n`); res.end();
|
| 400 |
}
|
| 401 |
});
|
| 402 |
|
| 403 |
+
// STREAMING ASSESSMENT ENDPOINT
|
| 404 |
router.post('/evaluate', checkAIAccess, async (req, res) => {
|
| 405 |
const { question, audio, image, images } = req.body;
|
| 406 |
res.setHeader('Content-Type', 'text/event-stream');
|
|
|
|
| 410 |
|
| 411 |
try {
|
| 412 |
res.write(`data: ${JSON.stringify({ status: 'analyzing' })}\n\n`);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 413 |
|
| 414 |
+
const evalParts = [{ text: `请作为一名严谨的老师,对学生的回答进行评分。题目是:${question}。` }];
|
| 415 |
+
if (audio) {
|
| 416 |
+
evalParts.push({ text: "学生的回答在音频中。" });
|
| 417 |
+
evalParts.push({ inlineData: { mimeType: 'audio/webm', data: audio } });
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
// Support multiple images
|
| 421 |
+
if (images && Array.isArray(images) && images.length > 0) {
|
| 422 |
+
evalParts.push({ text: "学生的回答写在以下图片中,请识别所有图片中的文字内容并进行批改:" });
|
| 423 |
+
images.forEach(img => {
|
| 424 |
+
if(img) evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: img } });
|
| 425 |
+
});
|
| 426 |
+
} else if (image) {
|
| 427 |
+
// Legacy single image support
|
| 428 |
+
evalParts.push({ text: "学生的回答写在图片中,请识别图片中的文字内容并进行批改。" });
|
| 429 |
+
evalParts.push({ inlineData: { mimeType: 'image/jpeg', data: image } });
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
// Force structured markdown output for streaming parsing
|
| 433 |
+
evalParts.push({ text: `请分析:1. 内容准确性 2. 表达/书写规范。
|
| 434 |
+
必须严格按照以下格式输出(不要使用Markdown代码块包裹):
|
| 435 |
+
|
| 436 |
## Transcription
|
| 437 |
+
(在此处输出识别到的学生回答内容,如果是图片则为识别的文字)
|
| 438 |
+
|
| 439 |
## Feedback
|
| 440 |
+
(在此处输出简短的鼓励性评语和建议)
|
| 441 |
+
|
| 442 |
## Score
|
| 443 |
+
(在此处仅输出一个0-100的数字)` });
|
| 444 |
|
| 445 |
+
// Stream Text
|
| 446 |
+
const fullText = await streamContentWithSmartFallback({
|
| 447 |
+
// CRITICAL FIX: Pass as array of objects for OpenRouter compatibility
|
| 448 |
+
contents: [{ role: 'user', parts: evalParts }],
|
| 449 |
+
// NO JSON MODE to allow progressive text streaming
|
| 450 |
+
}, res);
|
| 451 |
+
|
| 452 |
+
// Extract Feedback for TTS
|
| 453 |
const feedbackMatch = fullText.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i);
|
| 454 |
const feedbackText = feedbackMatch ? feedbackMatch[1].trim() : "";
|
| 455 |
+
|
| 456 |
+
// Generate TTS if feedback exists
|
| 457 |
if (feedbackText) {
|
| 458 |
res.write(`data: ${JSON.stringify({ status: 'tts' })}\n\n`);
|
| 459 |
try {
|
| 460 |
const { GoogleGenAI } = await import("@google/genai");
|
| 461 |
+
const keys = await getKeyPool('gemini');
|
| 462 |
+
let feedbackAudio = null;
|
| 463 |
+
for (const apiKey of keys) {
|
| 464 |
+
try {
|
| 465 |
+
const client = new GoogleGenAI({ apiKey });
|
| 466 |
+
const ttsResponse = await client.models.generateContent({
|
| 467 |
+
model: "gemini-2.5-flash-preview-tts",
|
| 468 |
+
contents: [{ parts: [{ text: feedbackText }] }],
|
| 469 |
+
config: { responseModalities: ['AUDIO'], speechConfig: { voiceConfig: { prebuiltVoiceConfig: { voiceName: 'Kore' } } } }
|
| 470 |
+
});
|
| 471 |
+
feedbackAudio = ttsResponse.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
|
| 472 |
+
if (feedbackAudio) break;
|
| 473 |
+
} catch(e) { if (isQuotaError(e)) continue; break; }
|
| 474 |
}
|
| 475 |
+
if (feedbackAudio) res.write(`data: ${JSON.stringify({ audio: feedbackAudio })}\n\n`);
|
| 476 |
+
else res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`);
|
| 477 |
} catch (ttsErr) { res.write(`data: ${JSON.stringify({ ttsSkipped: true })}\n\n`); }
|
| 478 |
}
|
| 479 |
+
|
| 480 |
+
res.write('data: [DONE]\n\n');
|
| 481 |
+
res.end();
|
| 482 |
+
|
| 483 |
} catch (e) {
|
| 484 |
+
console.error("AI Eval Error:", e);
|
| 485 |
+
res.write(`data: ${JSON.stringify({ error: true, message: e.message || "Evaluation failed" })}\n\n`);
|
| 486 |
+
res.end();
|
| 487 |
}
|
| 488 |
});
|
| 489 |
|
| 490 |
+
module.exports = router;
|
components/ai/AdminPanel.tsx
CHANGED
|
@@ -28,7 +28,10 @@ export const AdminPanel: React.FC = () => {
|
|
| 28 |
} | null>(null);
|
| 29 |
|
| 30 |
// Key Management
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
// Model Management
|
| 34 |
const [orModels, setOrModels] = useState<OpenRouterModelConfig[]>([]);
|
|
@@ -47,7 +50,10 @@ export const AdminPanel: React.FC = () => {
|
|
| 47 |
try {
|
| 48 |
const cfg = await api.config.get();
|
| 49 |
setSystemConfig(cfg);
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
| 51 |
setOrModels(cfg.openRouterModels && cfg.openRouterModels.length > 0 ? cfg.openRouterModels : DEFAULT_OR_MODELS);
|
| 52 |
|
| 53 |
if (cfg.aiProviderOrder && cfg.aiProviderOrder.length > 0) {
|
|
@@ -73,6 +79,18 @@ export const AdminPanel: React.FC = () => {
|
|
| 73 |
}
|
| 74 |
};
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
const handleAddModel = () => {
|
| 77 |
if (!newModelId.trim()) return;
|
| 78 |
setOrModels([...orModels, {
|
|
@@ -99,16 +117,17 @@ export const AdminPanel: React.FC = () => {
|
|
| 99 |
setProviderOrder(newArr);
|
| 100 |
};
|
| 101 |
|
| 102 |
-
const
|
| 103 |
if (!systemConfig) return;
|
| 104 |
try {
|
| 105 |
await api.config.save({
|
| 106 |
...systemConfig,
|
|
|
|
| 107 |
openRouterModels: orModels,
|
| 108 |
aiProviderOrder: providerOrder
|
| 109 |
});
|
| 110 |
await api.ai.resetPool();
|
| 111 |
-
setToast({ show: true, message: '
|
| 112 |
} catch (e) { setToast({ show: true, message: '保存失败', type: 'error' }); }
|
| 113 |
};
|
| 114 |
|
|
@@ -121,7 +140,7 @@ export const AdminPanel: React.FC = () => {
|
|
| 121 |
</div>
|
| 122 |
<div>
|
| 123 |
<h1 className="text-2xl font-bold text-gray-800">AI 智能助教管理后台</h1>
|
| 124 |
-
<p className="text-gray-500">监控 AI
|
| 125 |
</div>
|
| 126 |
</div>
|
| 127 |
|
|
@@ -166,46 +185,51 @@ export const AdminPanel: React.FC = () => {
|
|
| 166 |
</div>
|
| 167 |
</div>
|
| 168 |
</div>
|
| 169 |
-
|
| 170 |
<div className="bg-white p-6 rounded-xl border border-gray-100 shadow-sm">
|
| 171 |
-
<div className="flex justify-between items-center mb-6"><h3 className="font-bold text-gray-800 flex items-center"><Key className="mr-2 text-amber-500"/>
|
| 172 |
-
|
| 173 |
-
{/* Fixed: Removed API Key Pool configuration section per guidelines */}
|
| 174 |
-
|
| 175 |
<div className="grid grid-cols-1 md:grid-cols-2 gap-8">
|
| 176 |
-
{/* Provider Order Management */}
|
| 177 |
<div>
|
| 178 |
-
<div className="flex justify-between
|
| 179 |
-
<
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
<div className="space-y-2">
|
| 183 |
-
{providerOrder.map((provider, idx) => (
|
| 184 |
-
<div key={provider} className="flex items-center gap-3 bg-white p-3 rounded-lg border border-gray-200 shadow-sm">
|
| 185 |
-
<div className="bg-gray-100 text-gray-500 w-6 h-6 flex items-center justify-center rounded-full text-xs font-bold">{idx + 1}</div>
|
| 186 |
-
<div className="flex-1 font-bold text-gray-700">{provider}</div>
|
| 187 |
-
<div className="flex gap-1">
|
| 188 |
-
<button onClick={() => handleMoveProviderOrder(idx, -1)} disabled={idx === 0} className="p-1 hover:bg-gray-100 rounded text-gray-400 hover:text-blue-500 disabled:opacity-30"><ArrowUp size={16}/></button>
|
| 189 |
-
<button onClick={() => handleMoveProviderOrder(idx, 1)} disabled={idx === providerOrder.length - 1} className="p-1 hover:bg-gray-100 rounded text-gray-400 hover:text-blue-500 disabled:opacity-30"><ArrowDown size={16}/></button>
|
| 190 |
-
</div>
|
| 191 |
-
</div>
|
| 192 |
-
))}
|
| 193 |
-
</div>
|
| 194 |
</div>
|
| 195 |
-
|
| 196 |
<div>
|
| 197 |
-
<div className="flex justify-between
|
| 198 |
-
<
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
</div>
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
</div>
|
| 210 |
</div>
|
| 211 |
</div>
|
|
|
|
| 28 |
} | null>(null);
|
| 29 |
|
| 30 |
// Key Management
|
| 31 |
+
const [geminiKeys, setGeminiKeys] = useState<string[]>([]);
|
| 32 |
+
const [openRouterKeys, setOpenRouterKeys] = useState<string[]>([]);
|
| 33 |
+
const [newGeminiKey, setNewGeminiKey] = useState('');
|
| 34 |
+
const [newOpenRouterKey, setNewOpenRouterKey] = useState('');
|
| 35 |
|
| 36 |
// Model Management
|
| 37 |
const [orModels, setOrModels] = useState<OpenRouterModelConfig[]>([]);
|
|
|
|
| 50 |
try {
|
| 51 |
const cfg = await api.config.get();
|
| 52 |
setSystemConfig(cfg);
|
| 53 |
+
if (cfg.apiKeys) {
|
| 54 |
+
setGeminiKeys(cfg.apiKeys.gemini || []);
|
| 55 |
+
setOpenRouterKeys(cfg.apiKeys.openrouter || []);
|
| 56 |
+
}
|
| 57 |
setOrModels(cfg.openRouterModels && cfg.openRouterModels.length > 0 ? cfg.openRouterModels : DEFAULT_OR_MODELS);
|
| 58 |
|
| 59 |
if (cfg.aiProviderOrder && cfg.aiProviderOrder.length > 0) {
|
|
|
|
| 79 |
}
|
| 80 |
};
|
| 81 |
|
| 82 |
+
const handleAddKey = (type: 'gemini' | 'openrouter') => {
|
| 83 |
+
const key = type === 'gemini' ? newGeminiKey.trim() : newOpenRouterKey.trim();
|
| 84 |
+
if (!key) return;
|
| 85 |
+
if (type === 'gemini') { setGeminiKeys([...geminiKeys, key]); setNewGeminiKey(''); }
|
| 86 |
+
else { setOpenRouterKeys([...openRouterKeys, key]); setNewOpenRouterKey(''); }
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
const removeKey = (type: 'gemini' | 'openrouter', index: number) => {
|
| 90 |
+
if (type === 'gemini') setGeminiKeys(geminiKeys.filter((_, i) => i !== index));
|
| 91 |
+
else setOpenRouterKeys(openRouterKeys.filter((_, i) => i !== index));
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
const handleAddModel = () => {
|
| 95 |
if (!newModelId.trim()) return;
|
| 96 |
setOrModels([...orModels, {
|
|
|
|
| 117 |
setProviderOrder(newArr);
|
| 118 |
};
|
| 119 |
|
| 120 |
+
const saveApiKeys = async () => {
|
| 121 |
if (!systemConfig) return;
|
| 122 |
try {
|
| 123 |
await api.config.save({
|
| 124 |
...systemConfig,
|
| 125 |
+
apiKeys: { gemini: geminiKeys, openrouter: openRouterKeys },
|
| 126 |
openRouterModels: orModels,
|
| 127 |
aiProviderOrder: providerOrder
|
| 128 |
});
|
| 129 |
await api.ai.resetPool();
|
| 130 |
+
setToast({ show: true, message: 'API 配置及模型列表已保存', type: 'success' });
|
| 131 |
} catch (e) { setToast({ show: true, message: '保存失败', type: 'error' }); }
|
| 132 |
};
|
| 133 |
|
|
|
|
| 140 |
</div>
|
| 141 |
<div>
|
| 142 |
<h1 className="text-2xl font-bold text-gray-800">AI 智能助教管理后台</h1>
|
| 143 |
+
<p className="text-gray-500">监控 AI 服务状态与用量,管理密钥池。</p>
|
| 144 |
</div>
|
| 145 |
</div>
|
| 146 |
|
|
|
|
| 185 |
</div>
|
| 186 |
</div>
|
| 187 |
</div>
|
|
|
|
| 188 |
<div className="bg-white p-6 rounded-xl border border-gray-100 shadow-sm">
|
| 189 |
+
<div className="flex justify-between items-center mb-6"><h3 className="font-bold text-gray-800 flex items-center"><Key className="mr-2 text-amber-500"/> 多线路密钥池配置</h3><button onClick={saveApiKeys} className="bg-blue-600 text-white px-4 py-2 rounded-lg text-sm font-bold flex items-center gap-2 hover:bg-blue-700 shadow-sm"><Save size={16}/> 保存所有配置</button></div>
|
|
|
|
|
|
|
|
|
|
| 190 |
<div className="grid grid-cols-1 md:grid-cols-2 gap-8">
|
|
|
|
| 191 |
<div>
|
| 192 |
+
<div className="flex items-center justify-between mb-2"><label className="text-sm font-bold text-gray-700">Google Gemini / Gemma 密钥池</label><span className="text-xs bg-blue-100 text-blue-700 px-2 py-0.5 rounded-full">{geminiKeys.length} 个</span></div>
|
| 193 |
+
<p className="text-xs text-gray-400 mb-3">当一个 Key 额度耗尽时,系统将自动切换至下一个。</p>
|
| 194 |
+
<div className="space-y-2 mb-3">{geminiKeys.map((k, idx) => (<div key={idx} className="flex gap-2 items-center bg-gray-50 p-2 rounded border border-gray-200"><div className="flex-1 font-mono text-xs text-gray-600 truncate">{k.substring(0, 8)}...{k.substring(k.length - 6)}</div><button onClick={() => removeKey('gemini', idx)} className="text-gray-400 hover:text-red-500"><Trash2 size={14}/></button></div>))}</div>
|
| 195 |
+
<div className="flex gap-2"><input className="flex-1 border border-gray-300 rounded px-3 py-1.5 text-sm outline-none focus:ring-2 focus:ring-blue-500" placeholder="输入 Gemini API Key" value={newGeminiKey} onChange={e => setNewGeminiKey(e.target.value)}/><button onClick={() => handleAddKey('gemini')} className="bg-gray-100 hover:bg-gray-200 text-gray-600 px-3 py-1.5 rounded border border-gray-300"><Plus size={16}/></button></div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
</div>
|
|
|
|
| 197 |
<div>
|
| 198 |
+
<div className="flex items-center justify-between mb-2"><label className="text-sm font-bold text-gray-700">OpenRouter (通用) 密钥池</label><span className="text-xs bg-purple-100 text-purple-700 px-2 py-0.5 rounded-full">{openRouterKeys.length} 个</span></div>
|
| 199 |
+
<p className="text-xs text-gray-400 mb-3">备用线路。所有下方“大模型列表”中的模型都将使用这里的 Key。</p>
|
| 200 |
+
<div className="space-y-2 mb-3">{openRouterKeys.map((k, idx) => (<div key={idx} className="flex gap-2 items-center bg-gray-50 p-2 rounded border border-gray-200"><div className="flex-1 font-mono text-xs text-gray-600 truncate">{k.substring(0, 8)}...{k.substring(k.length - 6)}</div><button onClick={() => removeKey('openrouter', idx)} className="text-gray-400 hover:text-red-500"><Trash2 size={14}/></button></div>))}</div>
|
| 201 |
+
<div className="flex gap-2"><input className="flex-1 border border-gray-300 rounded px-3 py-1.5 text-sm outline-none focus:ring-2 focus:ring-purple-500" placeholder="输入 API Key" value={newOpenRouterKey} onChange={e => setNewOpenRouterKey(e.target.value)}/><button onClick={() => handleAddKey('openrouter')} className="bg-gray-100 hover:bg-gray-200 text-gray-600 px-3 py-1.5 rounded border border-gray-300"><Plus size={16}/></button></div>
|
| 202 |
+
</div>
|
| 203 |
+
</div>
|
| 204 |
+
|
| 205 |
+
{/* Provider Order Management */}
|
| 206 |
+
<div className="mt-8 border-t border-gray-100 pt-6">
|
| 207 |
+
<div className="flex justify-between items-center mb-4"><h4 className="font-bold text-gray-700 text-sm flex items-center"><Layers className="mr-2" size={16}/> 大模型调用优先级</h4></div>
|
| 208 |
+
<div className="bg-amber-50 p-4 rounded-lg border border-amber-100 mb-4">
|
| 209 |
+
<p className="text-xs text-amber-800">系统将按照以下顺序尝试调用大模型。如果前一个服务商额度耗尽或报错,会自动切换到下一个。</p>
|
| 210 |
+
</div>
|
| 211 |
+
<div className="space-y-2 max-w-md">
|
| 212 |
+
{providerOrder.map((provider, idx) => (
|
| 213 |
+
<div key={provider} className="flex items-center gap-3 bg-white p-3 rounded-lg border border-gray-200 shadow-sm">
|
| 214 |
+
<div className="bg-gray-100 text-gray-500 w-6 h-6 flex items-center justify-center rounded-full text-xs font-bold">{idx + 1}</div>
|
| 215 |
+
<div className="flex-1 font-bold text-gray-700">{provider}</div>
|
| 216 |
+
<div className="flex gap-1">
|
| 217 |
+
<button onClick={() => handleMoveProviderOrder(idx, -1)} disabled={idx === 0} className="p-1 hover:bg-gray-100 rounded text-gray-400 hover:text-blue-500 disabled:opacity-30"><ArrowUp size={16}/></button>
|
| 218 |
+
<button onClick={() => handleMoveProviderOrder(idx, 1)} disabled={idx === providerOrder.length - 1} className="p-1 hover:bg-gray-100 rounded text-gray-400 hover:text-blue-500 disabled:opacity-30"><ArrowDown size={16}/></button>
|
| 219 |
+
</div>
|
| 220 |
</div>
|
| 221 |
+
))}
|
| 222 |
+
</div>
|
| 223 |
+
</div>
|
| 224 |
+
|
| 225 |
+
<div className="mt-8 border-t border-gray-100 pt-6">
|
| 226 |
+
<div className="flex justify-between items-center mb-4"><h4 className="font-bold text-gray-700 text-sm">OpenAI 格式大模型列表管理</h4></div>
|
| 227 |
+
<div className="space-y-2 mb-4 bg-gray-50 p-3 rounded-lg border border-gray-200">{orModels.map((m, idx) => (<div key={idx} className="flex items-center gap-2 bg-white p-2 rounded border border-gray-100 shadow-sm"><div className="flex flex-col gap-0.5 px-1"><button onClick={()=>handleMoveModel(idx, -1)} className="text-gray-400 hover:text-blue-500 disabled:opacity-30" disabled={idx===0}><ArrowUp size={12}/></button><button onClick={()=>handleMoveModel(idx, 1)} className="text-gray-400 hover:text-blue-500 disabled:opacity-30" disabled={idx===orModels.length-1}><ArrowDown size={12}/></button></div><div className="flex-1 min-w-0"><div className="text-sm font-bold text-gray-800">{m.name || m.id}</div><div className="text-xs text-gray-400 font-mono truncate" title={m.id}>ID: {m.id}</div>{m.apiUrl && <div className="text-[10px] text-blue-500 truncate" title={m.apiUrl}>API: {m.apiUrl}</div>}</div><div className="flex items-center gap-2">{m.isCustom ? (<span className="text-[10px] bg-blue-50 text-blue-600 px-2 py-0.5 rounded">自定义</span>) : (<span className="text-[10px] bg-gray-100 text-gray-500 px-2 py-0.5 rounded">内置</span>)}<button onClick={() => handleRemoveModel(idx)} className={`p-1.5 rounded transition-colors ${m.isCustom ? 'text-gray-400 hover:text-red-500 hover:bg-red-50' : 'text-gray-200 cursor-not-allowed'}`} disabled={!m.isCustom}><Trash2 size={16}/></button></div></div>))}</div>
|
| 228 |
+
<div className="flex flex-col md:flex-row gap-2 items-end bg-gray-50 p-3 rounded-lg border border-gray-200">
|
| 229 |
+
<div className="flex-1 w-full"><label className="text-xs text-gray-500 mb-1 block">模型 ID *</label><input className="w-full border border-gray-300 rounded px-2 py-1.5 text-sm" value={newModelId} onChange={e=>setNewModelId(e.target.value)} placeholder="如: gpt-4o"/></div>
|
| 230 |
+
<div className="flex-1 w-full"><label className="text-xs text-gray-500 mb-1 block">显示名称</label><input className="w-full border border-gray-300 rounded px-2 py-1.5 text-sm" value={newModelName} onChange={e=>setNewModelName(e.target.value)} placeholder="如: GPT-4o"/></div>
|
| 231 |
+
<div className="flex-[1.5] w-full"><label className="text-xs text-gray-500 mb-1 block">API URL (选填, 默认 OpenRouter)</label><input className="w-full border border-gray-300 rounded px-2 py-1.5 text-sm" value={newModelApiUrl} onChange={e=>setNewModelApiUrl(e.target.value)} placeholder="https://api.openai.com/v1"/></div>
|
| 232 |
+
<button onClick={handleAddModel} className="bg-indigo-600 text-white px-4 py-1.5 rounded text-sm hover:bg-indigo-700 h-9 w-full md:w-auto">添加</button>
|
| 233 |
</div>
|
| 234 |
</div>
|
| 235 |
</div>
|
components/ai/AssessmentPanel.tsx
CHANGED
|
@@ -14,28 +14,30 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 14 |
const [assessmentTopic, setAssessmentTopic] = useState('请背诵《静夜思》并解释其含义。');
|
| 15 |
const [selectedImages, setSelectedImages] = useState<File[]>([]);
|
| 16 |
const [isAssessmentRecording, setIsAssessmentRecording] = useState(false);
|
| 17 |
-
const [isWebSpeechListening, setIsWebSpeechListening] = useState(false);
|
| 18 |
const [assessmentStatus, setAssessmentStatus] = useState<'IDLE' | 'UPLOADING' | 'ANALYZING' | 'TTS'>('IDLE');
|
| 19 |
-
|
| 20 |
-
const [streamedAssessment, setStreamedAssessment] = useState<{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
|
| 22 |
|
| 23 |
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
| 24 |
const audioChunksRef = useRef<Blob[]>([]);
|
| 25 |
const audioContextRef = useRef<AudioContext | null>(null);
|
| 26 |
const currentSourceRef = useRef<AudioBufferSourceNode | null>(null);
|
| 27 |
-
const recognitionRef = useRef<any>(null);
|
| 28 |
-
|
| 29 |
-
// Store recognized text in a ref for faster/reliable access during stop callback
|
| 30 |
-
const textRef = useRef('');
|
| 31 |
|
|
|
|
| 32 |
useEffect(() => {
|
| 33 |
// @ts-ignore
|
| 34 |
const AudioCtor = window.AudioContext || window.webkitAudioContext;
|
| 35 |
audioContextRef.current = new AudioCtor();
|
| 36 |
return () => {
|
| 37 |
stopPlayback();
|
| 38 |
-
|
| 39 |
};
|
| 40 |
}, []);
|
| 41 |
|
|
@@ -50,8 +52,13 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 50 |
const speakWithBrowser = (text: string) => {
|
| 51 |
if (!text) return;
|
| 52 |
stopPlayback();
|
| 53 |
-
const
|
|
|
|
| 54 |
utterance.lang = 'zh-CN';
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
window.speechSynthesis.speak(utterance);
|
| 56 |
};
|
| 57 |
|
|
@@ -63,6 +70,9 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 63 |
const AudioCtor = window.AudioContext || window.webkitAudioContext;
|
| 64 |
audioContextRef.current = new AudioCtor();
|
| 65 |
}
|
|
|
|
|
|
|
|
|
|
| 66 |
const bytes = base64ToUint8Array(base64Audio);
|
| 67 |
const audioBuffer = decodePCM(bytes, audioContextRef.current!);
|
| 68 |
const source = audioContextRef.current!.createBufferSource();
|
|
@@ -70,127 +80,52 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 70 |
source.connect(audioContextRef.current!.destination);
|
| 71 |
source.start(0);
|
| 72 |
currentSourceRef.current = source;
|
| 73 |
-
} catch (e) {
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
const startRecording = async (e?: React.MouseEvent | React.TouchEvent) => {
|
| 77 |
-
if (e) { e.preventDefault(); e.stopPropagation(); }
|
| 78 |
-
console.log("[Assessment] Starting Recording...");
|
| 79 |
-
textRef.current = '';
|
| 80 |
-
setRecognizedText('');
|
| 81 |
-
|
| 82 |
-
// @ts-ignore
|
| 83 |
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
| 84 |
-
if (SpeechRecognition) {
|
| 85 |
-
try {
|
| 86 |
-
if (recognitionRef.current) recognitionRef.current.abort();
|
| 87 |
-
const recognition = new SpeechRecognition();
|
| 88 |
-
recognition.lang = 'zh-CN';
|
| 89 |
-
recognition.interimResults = true;
|
| 90 |
-
recognition.continuous = true;
|
| 91 |
-
|
| 92 |
-
recognition.onstart = () => {
|
| 93 |
-
setIsWebSpeechListening(true);
|
| 94 |
-
setIsAssessmentRecording(true);
|
| 95 |
-
};
|
| 96 |
-
|
| 97 |
-
recognition.onresult = (event: any) => {
|
| 98 |
-
let full = '';
|
| 99 |
-
for (let i = 0; i < event.results.length; ++i) {
|
| 100 |
-
full += event.results[i][0].transcript;
|
| 101 |
-
}
|
| 102 |
-
textRef.current = full;
|
| 103 |
-
setRecognizedText(full);
|
| 104 |
-
};
|
| 105 |
-
|
| 106 |
-
recognition.onerror = (e: any) => {
|
| 107 |
-
console.warn("[Assessment] Web Speech Error:", e.error);
|
| 108 |
-
if (e.error !== 'aborted') {
|
| 109 |
-
startAudioRecordingFallback();
|
| 110 |
-
} else {
|
| 111 |
-
setIsAssessmentRecording(false);
|
| 112 |
-
setIsWebSpeechListening(false);
|
| 113 |
-
}
|
| 114 |
-
};
|
| 115 |
-
|
| 116 |
-
recognition.onend = () => {
|
| 117 |
-
setIsWebSpeechListening(false);
|
| 118 |
-
};
|
| 119 |
-
|
| 120 |
-
recognitionRef.current = recognition;
|
| 121 |
-
recognition.start();
|
| 122 |
-
return;
|
| 123 |
-
} catch (e) {
|
| 124 |
-
console.error("[Assessment] Web Speech Failed", e);
|
| 125 |
-
startAudioRecordingFallback();
|
| 126 |
-
}
|
| 127 |
-
} else {
|
| 128 |
-
startAudioRecordingFallback();
|
| 129 |
}
|
| 130 |
};
|
| 131 |
|
| 132 |
-
const
|
| 133 |
-
console.log("[Assessment] Falling back to MediaRecorder");
|
| 134 |
try {
|
| 135 |
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 136 |
const mediaRecorder = new MediaRecorder(stream);
|
| 137 |
mediaRecorderRef.current = mediaRecorder;
|
| 138 |
audioChunksRef.current = [];
|
| 139 |
-
|
| 140 |
mediaRecorder.ondataavailable = (event) => {
|
| 141 |
-
if (event.data.size > 0)
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
mediaRecorder.onstop = async () => {
|
| 145 |
-
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
|
| 146 |
-
const base64 = await blobToBase64(audioBlob);
|
| 147 |
-
handleAssessmentStreamingSubmit({ audio: base64 });
|
| 148 |
-
stream.getTracks().forEach(track => track.stop());
|
| 149 |
};
|
| 150 |
-
|
| 151 |
mediaRecorder.start();
|
| 152 |
setIsAssessmentRecording(true);
|
| 153 |
-
setIsWebSpeechListening(false);
|
| 154 |
} catch (e) {
|
| 155 |
setToast({ show: true, message: '无法访问麦克风', type: 'error' });
|
| 156 |
-
setIsAssessmentRecording(false);
|
| 157 |
}
|
| 158 |
};
|
| 159 |
|
| 160 |
const stopRecording = () => {
|
| 161 |
-
|
| 162 |
-
const wasWebSpeech = isWebSpeechListening;
|
| 163 |
-
|
| 164 |
-
if (wasWebSpeech && recognitionRef.current) {
|
| 165 |
-
recognitionRef.current.stop();
|
| 166 |
-
// Use current textRef because state might be stale
|
| 167 |
-
const finalSpeechText = textRef.current;
|
| 168 |
-
setTimeout(() => {
|
| 169 |
-
if (finalSpeechText.trim()) {
|
| 170 |
-
handleAssessmentStreamingSubmit({ text: finalSpeechText });
|
| 171 |
-
} else {
|
| 172 |
-
// Try fallback if no text captured but button was held
|
| 173 |
-
setToast({ show: true, message: '未检测到有效语音内容', type: 'error' });
|
| 174 |
-
}
|
| 175 |
-
}, 300);
|
| 176 |
-
} else if (mediaRecorderRef.current && mediaRecorderRef.current.state !== 'inactive') {
|
| 177 |
mediaRecorderRef.current.stop();
|
| 178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
}
|
| 180 |
-
|
| 181 |
-
setIsAssessmentRecording(false);
|
| 182 |
-
setIsWebSpeechListening(false);
|
| 183 |
};
|
| 184 |
|
| 185 |
-
const handleAssessmentStreamingSubmit = async ({ audio, images
|
| 186 |
setAssessmentStatus('UPLOADING');
|
| 187 |
setStreamedAssessment({ transcription: '', feedback: '', score: null, audio: undefined });
|
| 188 |
stopPlayback();
|
| 189 |
|
| 190 |
try {
|
| 191 |
-
let finalQuestion = assessmentTopic;
|
| 192 |
-
if (text) finalQuestion += `\n\n学生口述回答内容:${text}\n(请基于此文本进行评分)`;
|
| 193 |
-
|
| 194 |
const response = await fetch('/api/ai/evaluate', {
|
| 195 |
method: 'POST',
|
| 196 |
headers: {
|
|
@@ -199,13 +134,15 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 199 |
'x-user-role': currentUser?.role || '',
|
| 200 |
'x-school-id': currentUser?.schoolId || ''
|
| 201 |
},
|
| 202 |
-
body: JSON.stringify({ question:
|
| 203 |
});
|
| 204 |
|
| 205 |
if (!response.ok) throw new Error(response.statusText);
|
|
|
|
|
|
|
| 206 |
setAssessmentStatus('ANALYZING');
|
| 207 |
|
| 208 |
-
const reader = response.body
|
| 209 |
const decoder = new TextDecoder();
|
| 210 |
let accumulatedRaw = '';
|
| 211 |
let buffer = '';
|
|
@@ -213,6 +150,7 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 213 |
while (true) {
|
| 214 |
const { done, value } = await reader.read();
|
| 215 |
if (done) break;
|
|
|
|
| 216 |
buffer += decoder.decode(value, { stream: true });
|
| 217 |
const parts = buffer.split('\n\n');
|
| 218 |
buffer = parts.pop() || '';
|
|
@@ -221,37 +159,52 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 221 |
if (line.startsWith('data: ')) {
|
| 222 |
const jsonStr = line.replace('data: ', '').trim();
|
| 223 |
if (jsonStr === '[DONE]') break;
|
|
|
|
| 224 |
try {
|
| 225 |
const data = JSON.parse(jsonStr);
|
| 226 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
if (data.text) {
|
| 228 |
accumulatedRaw += data.text;
|
| 229 |
-
const
|
| 230 |
-
const
|
| 231 |
-
const
|
|
|
|
| 232 |
setStreamedAssessment(prev => ({
|
| 233 |
...prev,
|
| 234 |
-
transcription:
|
| 235 |
-
feedback:
|
| 236 |
-
score:
|
| 237 |
}));
|
| 238 |
}
|
|
|
|
| 239 |
if (data.audio) {
|
| 240 |
setStreamedAssessment(prev => ({ ...prev, audio: data.audio }));
|
| 241 |
playPCMAudio(data.audio);
|
| 242 |
}
|
|
|
|
| 243 |
if (data.ttsSkipped) {
|
| 244 |
const fb = streamedAssessment.feedback || accumulatedRaw.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i)?.[1] || '';
|
| 245 |
if (fb) speakWithBrowser(fb);
|
| 246 |
}
|
| 247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
} catch (e) {}
|
| 249 |
}
|
| 250 |
}
|
| 251 |
}
|
| 252 |
setAssessmentStatus('IDLE');
|
|
|
|
| 253 |
} catch (error: any) {
|
| 254 |
-
|
|
|
|
| 255 |
setAssessmentStatus('IDLE');
|
| 256 |
}
|
| 257 |
};
|
|
@@ -266,8 +219,9 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 266 |
if (selectedImages.length === 0) return;
|
| 267 |
setAssessmentStatus('UPLOADING');
|
| 268 |
try {
|
| 269 |
-
const
|
| 270 |
-
|
|
|
|
| 271 |
} catch(e) {
|
| 272 |
setAssessmentStatus('IDLE');
|
| 273 |
setToast({ show: true, message: '图片压缩上传失败', type: 'error' });
|
|
@@ -280,6 +234,7 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 280 |
<button onClick={stopPlayback} className="absolute top-4 right-4 z-50 bg-white/80 backdrop-blur p-2 rounded-full shadow-md text-red-500 hover:bg-white border border-gray-200" title="停止播放"><StopIcon size={20}/></button>
|
| 281 |
|
| 282 |
<div className="max-w-3xl mx-auto space-y-6">
|
|
|
|
| 283 |
<div className="bg-white p-6 rounded-2xl border border-purple-100 shadow-sm">
|
| 284 |
<h3 className="text-lg font-bold text-gray-800 mb-2 flex items-center justify-between">
|
| 285 |
<span className="flex items-center"><Brain className="mr-2 text-purple-600"/> 今日测评题目</span>
|
|
@@ -298,7 +253,7 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 298 |
className={`px-8 py-4 rounded-full font-bold text-white flex items-center gap-3 shadow-lg transition-all ${isAssessmentRecording ? 'bg-red-500 scale-105' : 'bg-gradient-to-r from-purple-600 to-indigo-600 hover:shadow-purple-200 hover:scale-105 disabled:opacity-50'}`}
|
| 299 |
>
|
| 300 |
{assessmentStatus !== 'IDLE' ? <Loader2 className="animate-spin"/> : (isAssessmentRecording ? <StopCircle/> : <Mic/>)}
|
| 301 |
-
{assessmentStatus === 'UPLOADING' ? '上传中...' : assessmentStatus === 'ANALYZING' ? 'AI 正在分析...' : assessmentStatus === 'TTS' ? '生成语音...' : isAssessmentRecording ?
|
| 302 |
</button>
|
| 303 |
) : (
|
| 304 |
<div className="w-full">
|
|
@@ -315,6 +270,7 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 315 |
<>
|
| 316 |
<ImageIcon className="mx-auto text-purple-300 mb-2" size={40}/>
|
| 317 |
<p className="text-purple-600 font-bold">点击上传作业图片</p>
|
|
|
|
| 318 |
</>
|
| 319 |
) : (
|
| 320 |
<div className="z-0 w-full pointer-events-none opacity-50 flex items-center justify-center">
|
|
@@ -347,7 +303,7 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 347 |
className="mt-6 w-full px-8 py-3 bg-purple-600 text-white rounded-lg font-bold hover:bg-purple-700 flex items-center justify-center gap-2 shadow-md transition-all"
|
| 348 |
>
|
| 349 |
{assessmentStatus !== 'IDLE' ? <Loader2 className="animate-spin" size={18}/> : <CheckCircle size={18}/>}
|
| 350 |
-
{assessmentStatus === 'UPLOADING' ? '压缩上传中...' : assessmentStatus === 'ANALYZING' ? 'AI
|
| 351 |
</button>
|
| 352 |
)}
|
| 353 |
</div>
|
|
@@ -355,6 +311,7 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 355 |
</div>
|
| 356 |
</div>
|
| 357 |
|
|
|
|
| 358 |
{(streamedAssessment.transcription || streamedAssessment.feedback || streamedAssessment.score !== null) && (
|
| 359 |
<div className="bg-white p-6 rounded-2xl border border-gray-200 shadow-lg animate-in slide-in-from-bottom-4">
|
| 360 |
<div className="flex items-center justify-between border-b border-gray-100 pb-4 mb-4">
|
|
@@ -363,32 +320,34 @@ export const AssessmentPanel: React.FC<AssessmentPanelProps> = ({ currentUser })
|
|
| 363 |
{assessmentStatus !== 'IDLE' && (
|
| 364 |
<div className="flex items-center gap-1 text-xs px-2 py-1 bg-purple-50 text-purple-600 rounded-full animate-pulse">
|
| 365 |
<Zap size={12}/>
|
| 366 |
-
|
| 367 |
</div>
|
| 368 |
)}
|
| 369 |
</div>
|
| 370 |
<div className="flex items-center gap-4">
|
| 371 |
{streamedAssessment.audio && (
|
| 372 |
-
<button onClick={() => playPCMAudio(streamedAssessment.audio!)} className="flex items-center gap-1 text-sm bg-purple-100 text-purple-700 px-3 py-1 rounded-full hover:bg-purple-200">
|
| 373 |
<Volume2 size={16}/> 听AI点评
|
| 374 |
</button>
|
| 375 |
)}
|
| 376 |
-
{streamedAssessment.score !== null
|
| 377 |
<div className={`text-3xl font-black ${streamedAssessment.score >= 80 ? 'text-green-500' : streamedAssessment.score >= 60 ? 'text-yellow-500' : 'text-red-500'}`}>
|
| 378 |
{streamedAssessment.score}<span className="text-sm text-gray-400 ml-1">分</span>
|
| 379 |
</div>
|
|
|
|
|
|
|
| 380 |
)}
|
| 381 |
</div>
|
| 382 |
</div>
|
| 383 |
<div className="space-y-4">
|
| 384 |
<div className="bg-gray-50 p-4 rounded-xl">
|
| 385 |
<p className="text-xs font-bold text-gray-500 uppercase mb-1">AI 识别内容</p>
|
| 386 |
-
<p className="text-gray-700 leading-relaxed text-sm whitespace-pre-wrap">{streamedAssessment.transcription ||
|
| 387 |
</div>
|
| 388 |
<div>
|
| 389 |
<p className="text-xs font-bold text-gray-500 uppercase mb-2">AI 点评建议</p>
|
| 390 |
<div className="p-4 bg-purple-50 text-purple-900 rounded-xl border border-purple-100 text-sm leading-relaxed whitespace-pre-wrap">
|
| 391 |
-
{streamedAssessment.feedback ||
|
| 392 |
</div>
|
| 393 |
</div>
|
| 394 |
</div>
|
|
|
|
| 14 |
const [assessmentTopic, setAssessmentTopic] = useState('请背诵《静夜思》并解释其含义。');
|
| 15 |
const [selectedImages, setSelectedImages] = useState<File[]>([]);
|
| 16 |
const [isAssessmentRecording, setIsAssessmentRecording] = useState(false);
|
|
|
|
| 17 |
const [assessmentStatus, setAssessmentStatus] = useState<'IDLE' | 'UPLOADING' | 'ANALYZING' | 'TTS'>('IDLE');
|
| 18 |
+
|
| 19 |
+
const [streamedAssessment, setStreamedAssessment] = useState<{
|
| 20 |
+
transcription: string;
|
| 21 |
+
feedback: string;
|
| 22 |
+
score: number | null;
|
| 23 |
+
audio?: string;
|
| 24 |
+
}>({ transcription: '', feedback: '', score: null });
|
| 25 |
+
|
| 26 |
const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
|
| 27 |
|
| 28 |
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
| 29 |
const audioChunksRef = useRef<Blob[]>([]);
|
| 30 |
const audioContextRef = useRef<AudioContext | null>(null);
|
| 31 |
const currentSourceRef = useRef<AudioBufferSourceNode | null>(null);
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
// Initialize AudioContext
|
| 34 |
useEffect(() => {
|
| 35 |
// @ts-ignore
|
| 36 |
const AudioCtor = window.AudioContext || window.webkitAudioContext;
|
| 37 |
audioContextRef.current = new AudioCtor();
|
| 38 |
return () => {
|
| 39 |
stopPlayback();
|
| 40 |
+
window.speechSynthesis.cancel();
|
| 41 |
};
|
| 42 |
}, []);
|
| 43 |
|
|
|
|
| 52 |
const speakWithBrowser = (text: string) => {
|
| 53 |
if (!text) return;
|
| 54 |
stopPlayback();
|
| 55 |
+
const cleanText = cleanTextForTTS(text);
|
| 56 |
+
const utterance = new SpeechSynthesisUtterance(cleanText);
|
| 57 |
utterance.lang = 'zh-CN';
|
| 58 |
+
utterance.rate = 1.0;
|
| 59 |
+
const voices = window.speechSynthesis.getVoices();
|
| 60 |
+
const zhVoice = voices.find(v => v.lang === 'zh-CN' && !v.name.includes('Hong Kong') && !v.name.includes('Taiwan'));
|
| 61 |
+
if (zhVoice) utterance.voice = zhVoice;
|
| 62 |
window.speechSynthesis.speak(utterance);
|
| 63 |
};
|
| 64 |
|
|
|
|
| 70 |
const AudioCtor = window.AudioContext || window.webkitAudioContext;
|
| 71 |
audioContextRef.current = new AudioCtor();
|
| 72 |
}
|
| 73 |
+
if (audioContextRef.current?.state === 'suspended') {
|
| 74 |
+
await audioContextRef.current.resume();
|
| 75 |
+
}
|
| 76 |
const bytes = base64ToUint8Array(base64Audio);
|
| 77 |
const audioBuffer = decodePCM(bytes, audioContextRef.current!);
|
| 78 |
const source = audioContextRef.current!.createBufferSource();
|
|
|
|
| 80 |
source.connect(audioContextRef.current!.destination);
|
| 81 |
source.start(0);
|
| 82 |
currentSourceRef.current = source;
|
| 83 |
+
} catch (e) {
|
| 84 |
+
console.error("Audio playback error", e);
|
| 85 |
+
setToast({ show: true, message: '语音播放失败', type: 'error' });
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
}
|
| 87 |
};
|
| 88 |
|
| 89 |
+
const startRecording = async () => {
|
|
|
|
| 90 |
try {
|
| 91 |
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 92 |
const mediaRecorder = new MediaRecorder(stream);
|
| 93 |
mediaRecorderRef.current = mediaRecorder;
|
| 94 |
audioChunksRef.current = [];
|
| 95 |
+
|
| 96 |
mediaRecorder.ondataavailable = (event) => {
|
| 97 |
+
if (event.data.size > 0) {
|
| 98 |
+
audioChunksRef.current.push(event.data);
|
| 99 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
};
|
| 101 |
+
|
| 102 |
mediaRecorder.start();
|
| 103 |
setIsAssessmentRecording(true);
|
|
|
|
| 104 |
} catch (e) {
|
| 105 |
setToast({ show: true, message: '无法访问麦克风', type: 'error' });
|
|
|
|
| 106 |
}
|
| 107 |
};
|
| 108 |
|
| 109 |
const stopRecording = () => {
|
| 110 |
+
if (mediaRecorderRef.current && isAssessmentRecording) {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
mediaRecorderRef.current.stop();
|
| 112 |
+
setIsAssessmentRecording(false);
|
| 113 |
+
|
| 114 |
+
mediaRecorderRef.current.onstop = async () => {
|
| 115 |
+
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
|
| 116 |
+
const base64 = await blobToBase64(audioBlob);
|
| 117 |
+
handleAssessmentStreamingSubmit({ audio: base64 });
|
| 118 |
+
mediaRecorderRef.current?.stream.getTracks().forEach(track => track.stop());
|
| 119 |
+
};
|
| 120 |
}
|
|
|
|
|
|
|
|
|
|
| 121 |
};
|
| 122 |
|
| 123 |
+
const handleAssessmentStreamingSubmit = async ({ audio, images }: { audio?: string, images?: string[] }) => {
|
| 124 |
setAssessmentStatus('UPLOADING');
|
| 125 |
setStreamedAssessment({ transcription: '', feedback: '', score: null, audio: undefined });
|
| 126 |
stopPlayback();
|
| 127 |
|
| 128 |
try {
|
|
|
|
|
|
|
|
|
|
| 129 |
const response = await fetch('/api/ai/evaluate', {
|
| 130 |
method: 'POST',
|
| 131 |
headers: {
|
|
|
|
| 134 |
'x-user-role': currentUser?.role || '',
|
| 135 |
'x-school-id': currentUser?.schoolId || ''
|
| 136 |
},
|
| 137 |
+
body: JSON.stringify({ question: assessmentTopic, audio, images })
|
| 138 |
});
|
| 139 |
|
| 140 |
if (!response.ok) throw new Error(response.statusText);
|
| 141 |
+
if (!response.body) throw new Error('No response body');
|
| 142 |
+
|
| 143 |
setAssessmentStatus('ANALYZING');
|
| 144 |
|
| 145 |
+
const reader = response.body.getReader();
|
| 146 |
const decoder = new TextDecoder();
|
| 147 |
let accumulatedRaw = '';
|
| 148 |
let buffer = '';
|
|
|
|
| 150 |
while (true) {
|
| 151 |
const { done, value } = await reader.read();
|
| 152 |
if (done) break;
|
| 153 |
+
|
| 154 |
buffer += decoder.decode(value, { stream: true });
|
| 155 |
const parts = buffer.split('\n\n');
|
| 156 |
buffer = parts.pop() || '';
|
|
|
|
| 159 |
if (line.startsWith('data: ')) {
|
| 160 |
const jsonStr = line.replace('data: ', '').trim();
|
| 161 |
if (jsonStr === '[DONE]') break;
|
| 162 |
+
|
| 163 |
try {
|
| 164 |
const data = JSON.parse(jsonStr);
|
| 165 |
+
|
| 166 |
+
if (data.status) {
|
| 167 |
+
if (data.status === 'analyzing') setAssessmentStatus('ANALYZING');
|
| 168 |
+
if (data.status === 'tts') setAssessmentStatus('TTS');
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
if (data.text) {
|
| 172 |
accumulatedRaw += data.text;
|
| 173 |
+
const transcriptionMatch = accumulatedRaw.match(/## Transcription\s+([\s\S]*?)(?=## Feedback|$)/i);
|
| 174 |
+
const feedbackMatch = accumulatedRaw.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i);
|
| 175 |
+
const scoreMatch = accumulatedRaw.match(/## Score\s+(\d+)/i);
|
| 176 |
+
|
| 177 |
setStreamedAssessment(prev => ({
|
| 178 |
...prev,
|
| 179 |
+
transcription: transcriptionMatch ? transcriptionMatch[1].trim() : prev.transcription,
|
| 180 |
+
feedback: feedbackMatch ? feedbackMatch[1].trim() : prev.feedback,
|
| 181 |
+
score: scoreMatch ? parseInt(scoreMatch[1]) : prev.score
|
| 182 |
}));
|
| 183 |
}
|
| 184 |
+
|
| 185 |
if (data.audio) {
|
| 186 |
setStreamedAssessment(prev => ({ ...prev, audio: data.audio }));
|
| 187 |
playPCMAudio(data.audio);
|
| 188 |
}
|
| 189 |
+
|
| 190 |
if (data.ttsSkipped) {
|
| 191 |
const fb = streamedAssessment.feedback || accumulatedRaw.match(/## Feedback\s+([\s\S]*?)(?=## Score|$)/i)?.[1] || '';
|
| 192 |
if (fb) speakWithBrowser(fb);
|
| 193 |
}
|
| 194 |
+
|
| 195 |
+
if (data.error) {
|
| 196 |
+
setToast({ show: true, message: data.message || '评分出错', type: 'error' });
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
} catch (e) {}
|
| 200 |
}
|
| 201 |
}
|
| 202 |
}
|
| 203 |
setAssessmentStatus('IDLE');
|
| 204 |
+
|
| 205 |
} catch (error: any) {
|
| 206 |
+
console.error("Assessment error", error);
|
| 207 |
+
setToast({ show: true, message: '评分失败: ' + error.message, type: 'error' });
|
| 208 |
setAssessmentStatus('IDLE');
|
| 209 |
}
|
| 210 |
};
|
|
|
|
| 219 |
if (selectedImages.length === 0) return;
|
| 220 |
setAssessmentStatus('UPLOADING');
|
| 221 |
try {
|
| 222 |
+
const base64Promises = selectedImages.map(file => compressImage(file));
|
| 223 |
+
const base64Images = await Promise.all(base64Promises);
|
| 224 |
+
handleAssessmentStreamingSubmit({ images: base64Images });
|
| 225 |
} catch(e) {
|
| 226 |
setAssessmentStatus('IDLE');
|
| 227 |
setToast({ show: true, message: '图片压缩上传失败', type: 'error' });
|
|
|
|
| 234 |
<button onClick={stopPlayback} className="absolute top-4 right-4 z-50 bg-white/80 backdrop-blur p-2 rounded-full shadow-md text-red-500 hover:bg-white border border-gray-200" title="停止播放"><StopIcon size={20}/></button>
|
| 235 |
|
| 236 |
<div className="max-w-3xl mx-auto space-y-6">
|
| 237 |
+
{/* Topic Card */}
|
| 238 |
<div className="bg-white p-6 rounded-2xl border border-purple-100 shadow-sm">
|
| 239 |
<h3 className="text-lg font-bold text-gray-800 mb-2 flex items-center justify-between">
|
| 240 |
<span className="flex items-center"><Brain className="mr-2 text-purple-600"/> 今日测评题目</span>
|
|
|
|
| 253 |
className={`px-8 py-4 rounded-full font-bold text-white flex items-center gap-3 shadow-lg transition-all ${isAssessmentRecording ? 'bg-red-500 scale-105' : 'bg-gradient-to-r from-purple-600 to-indigo-600 hover:shadow-purple-200 hover:scale-105 disabled:opacity-50'}`}
|
| 254 |
>
|
| 255 |
{assessmentStatus !== 'IDLE' ? <Loader2 className="animate-spin"/> : (isAssessmentRecording ? <StopCircle/> : <Mic/>)}
|
| 256 |
+
{assessmentStatus === 'UPLOADING' ? '上传中...' : assessmentStatus === 'ANALYZING' ? 'AI 正在分析...' : assessmentStatus === 'TTS' ? '生成语音...' : isAssessmentRecording ? '松开结束录音' : '按住开始回答'}
|
| 257 |
</button>
|
| 258 |
) : (
|
| 259 |
<div className="w-full">
|
|
|
|
| 270 |
<>
|
| 271 |
<ImageIcon className="mx-auto text-purple-300 mb-2" size={40}/>
|
| 272 |
<p className="text-purple-600 font-bold">点击上传作业图片</p>
|
| 273 |
+
<p className="text-xs text-gray-400">支持批量上传 • 自动压缩处理</p>
|
| 274 |
</>
|
| 275 |
) : (
|
| 276 |
<div className="z-0 w-full pointer-events-none opacity-50 flex items-center justify-center">
|
|
|
|
| 303 |
className="mt-6 w-full px-8 py-3 bg-purple-600 text-white rounded-lg font-bold hover:bg-purple-700 flex items-center justify-center gap-2 shadow-md transition-all"
|
| 304 |
>
|
| 305 |
{assessmentStatus !== 'IDLE' ? <Loader2 className="animate-spin" size={18}/> : <CheckCircle size={18}/>}
|
| 306 |
+
{assessmentStatus === 'UPLOADING' ? '压缩上传中...' : assessmentStatus === 'ANALYZING' ? 'AI 正在分析...' : assessmentStatus === 'TTS' ? '生成语音...' : `开始批改 (${selectedImages.length}张)`}
|
| 307 |
</button>
|
| 308 |
)}
|
| 309 |
</div>
|
|
|
|
| 311 |
</div>
|
| 312 |
</div>
|
| 313 |
|
| 314 |
+
{/* Streamed Result Card */}
|
| 315 |
{(streamedAssessment.transcription || streamedAssessment.feedback || streamedAssessment.score !== null) && (
|
| 316 |
<div className="bg-white p-6 rounded-2xl border border-gray-200 shadow-lg animate-in slide-in-from-bottom-4">
|
| 317 |
<div className="flex items-center justify-between border-b border-gray-100 pb-4 mb-4">
|
|
|
|
| 320 |
{assessmentStatus !== 'IDLE' && (
|
| 321 |
<div className="flex items-center gap-1 text-xs px-2 py-1 bg-purple-50 text-purple-600 rounded-full animate-pulse">
|
| 322 |
<Zap size={12}/>
|
| 323 |
+
{assessmentStatus === 'ANALYZING' ? '正在智能分析内容...' : assessmentStatus === 'TTS' ? '正在生成语音点评...' : '处理中...'}
|
| 324 |
</div>
|
| 325 |
)}
|
| 326 |
</div>
|
| 327 |
<div className="flex items-center gap-4">
|
| 328 |
{streamedAssessment.audio && (
|
| 329 |
+
<button onClick={() => playPCMAudio(streamedAssessment.audio!)} className="flex items-center gap-1 text-sm bg-purple-100 text-purple-700 px-3 py-1 rounded-full hover:bg-purple-200 animate-in fade-in">
|
| 330 |
<Volume2 size={16}/> 听AI点评
|
| 331 |
</button>
|
| 332 |
)}
|
| 333 |
+
{streamedAssessment.score !== null ? (
|
| 334 |
<div className={`text-3xl font-black ${streamedAssessment.score >= 80 ? 'text-green-500' : streamedAssessment.score >= 60 ? 'text-yellow-500' : 'text-red-500'}`}>
|
| 335 |
{streamedAssessment.score}<span className="text-sm text-gray-400 ml-1">分</span>
|
| 336 |
</div>
|
| 337 |
+
) : (
|
| 338 |
+
<div className="text-sm text-gray-400 italic">评分中...</div>
|
| 339 |
)}
|
| 340 |
</div>
|
| 341 |
</div>
|
| 342 |
<div className="space-y-4">
|
| 343 |
<div className="bg-gray-50 p-4 rounded-xl">
|
| 344 |
<p className="text-xs font-bold text-gray-500 uppercase mb-1">AI 识别内容</p>
|
| 345 |
+
<p className="text-gray-700 leading-relaxed text-sm whitespace-pre-wrap">{streamedAssessment.transcription || <span className="text-gray-400">正在识别...</span>}</p>
|
| 346 |
</div>
|
| 347 |
<div>
|
| 348 |
<p className="text-xs font-bold text-gray-500 uppercase mb-2">AI 点评建议</p>
|
| 349 |
<div className="p-4 bg-purple-50 text-purple-900 rounded-xl border border-purple-100 text-sm leading-relaxed whitespace-pre-wrap">
|
| 350 |
+
{streamedAssessment.feedback || <span className="text-purple-300">AI 正在思考评语...</span>}
|
| 351 |
</div>
|
| 352 |
</div>
|
| 353 |
</div>
|
components/ai/ChatPanel.tsx
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
|
| 2 |
import React, { useState, useRef, useEffect } from 'react';
|
| 3 |
import { AIChatMessage, User } from '../../types';
|
| 4 |
-
import { Bot, Mic, Volume2, Send, Sparkles, Loader2, StopCircle, Trash2 } from 'lucide-react';
|
| 5 |
import ReactMarkdown from 'react-markdown';
|
| 6 |
import remarkGfm from 'remark-gfm';
|
| 7 |
import { blobToBase64, base64ToUint8Array, decodePCM, cleanTextForTTS } from '../../utils/mediaHelpers';
|
|
@@ -22,14 +22,18 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 22 |
timestamp: Date.now()
|
| 23 |
}];
|
| 24 |
} catch (e) {
|
| 25 |
-
return [{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
}
|
| 27 |
});
|
| 28 |
-
|
| 29 |
const [textInput, setTextInput] = useState('');
|
|
|
|
| 30 |
const [isChatProcessing, setIsChatProcessing] = useState(false);
|
| 31 |
const [isChatRecording, setIsChatRecording] = useState(false);
|
| 32 |
-
const [isWebSpeechListening, setIsWebSpeechListening] = useState(false);
|
| 33 |
const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
|
| 34 |
|
| 35 |
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
|
@@ -37,23 +41,31 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 37 |
const audioContextRef = useRef<AudioContext | null>(null);
|
| 38 |
const currentSourceRef = useRef<AudioBufferSourceNode | null>(null);
|
| 39 |
const messagesEndRef = useRef<HTMLDivElement>(null);
|
| 40 |
-
const recognitionRef = useRef<any>(null);
|
| 41 |
-
// Fixed: Added missing inputRef to fix the error in line 324
|
| 42 |
-
const inputRef = useRef<HTMLInputElement>(null);
|
| 43 |
-
|
| 44 |
-
// Track the text that was already in the box when we started speaking
|
| 45 |
-
const baseTextRef = useRef('');
|
| 46 |
|
|
|
|
| 47 |
useEffect(() => {
|
| 48 |
// @ts-ignore
|
| 49 |
const AudioCtor = window.AudioContext || window.webkitAudioContext;
|
| 50 |
audioContextRef.current = new AudioCtor();
|
| 51 |
return () => {
|
| 52 |
stopPlayback();
|
| 53 |
-
|
| 54 |
};
|
| 55 |
}, []);
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
useEffect(() => {
|
| 58 |
messagesEndRef.current?.scrollIntoView({ behavior: isChatProcessing ? 'auto' : 'smooth', block: 'end' });
|
| 59 |
}, [messages, isChatProcessing]);
|
|
@@ -69,8 +81,13 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 69 |
const speakWithBrowser = (text: string) => {
|
| 70 |
if (!text) return;
|
| 71 |
stopPlayback();
|
| 72 |
-
const
|
|
|
|
| 73 |
utterance.lang = 'zh-CN';
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
window.speechSynthesis.speak(utterance);
|
| 75 |
};
|
| 76 |
|
|
@@ -94,82 +111,11 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 94 |
currentSourceRef.current = source;
|
| 95 |
} catch (e) {
|
| 96 |
console.error("Audio playback error", e);
|
|
|
|
| 97 |
}
|
| 98 |
};
|
| 99 |
|
| 100 |
-
const startRecording = async (
|
| 101 |
-
if (e) { e.preventDefault(); e.stopPropagation(); }
|
| 102 |
-
|
| 103 |
-
// Save what's already in the input so we can append to it
|
| 104 |
-
baseTextRef.current = textInput;
|
| 105 |
-
|
| 106 |
-
console.log("[Voice] Start listening...");
|
| 107 |
-
// @ts-ignore
|
| 108 |
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
| 109 |
-
|
| 110 |
-
if (SpeechRecognition) {
|
| 111 |
-
try {
|
| 112 |
-
if (recognitionRef.current) recognitionRef.current.abort();
|
| 113 |
-
|
| 114 |
-
const recognition = new SpeechRecognition();
|
| 115 |
-
recognition.lang = 'zh-CN';
|
| 116 |
-
recognition.interimResults = true;
|
| 117 |
-
recognition.continuous = true;
|
| 118 |
-
|
| 119 |
-
recognition.onstart = () => {
|
| 120 |
-
setIsWebSpeechListening(true);
|
| 121 |
-
setIsChatRecording(true);
|
| 122 |
-
};
|
| 123 |
-
|
| 124 |
-
recognition.onresult = (event: any) => {
|
| 125 |
-
let interimTranscript = '';
|
| 126 |
-
let finalTranscript = '';
|
| 127 |
-
|
| 128 |
-
for (let i = event.resultIndex; i < event.results.length; ++i) {
|
| 129 |
-
const transcript = event.results[i][0].transcript;
|
| 130 |
-
if (event.results[i].isFinal) {
|
| 131 |
-
finalTranscript += transcript;
|
| 132 |
-
} else {
|
| 133 |
-
interimTranscript += transcript;
|
| 134 |
-
}
|
| 135 |
-
}
|
| 136 |
-
|
| 137 |
-
// Always append newly finalized text to the base
|
| 138 |
-
if (finalTranscript) {
|
| 139 |
-
baseTextRef.current += finalTranscript;
|
| 140 |
-
}
|
| 141 |
-
|
| 142 |
-
// Update input box with: Old text + currently recognized final chunks + currently recognized interim chunks
|
| 143 |
-
setTextInput(baseTextRef.current + interimTranscript);
|
| 144 |
-
};
|
| 145 |
-
|
| 146 |
-
recognition.onerror = (e: any) => {
|
| 147 |
-
console.warn("[Voice] Web Speech Error:", e.error);
|
| 148 |
-
if (e.error === 'not-allowed') {
|
| 149 |
-
setToast({ show: true, message: '请允许麦克风访问', type: 'error' });
|
| 150 |
-
}
|
| 151 |
-
stopRecording();
|
| 152 |
-
};
|
| 153 |
-
|
| 154 |
-
recognition.onend = () => {
|
| 155 |
-
setIsWebSpeechListening(false);
|
| 156 |
-
setIsChatRecording(false);
|
| 157 |
-
};
|
| 158 |
-
|
| 159 |
-
recognitionRef.current = recognition;
|
| 160 |
-
recognition.start();
|
| 161 |
-
return;
|
| 162 |
-
} catch (e) {
|
| 163 |
-
console.error("[Voice] Web Speech Init Exception", e);
|
| 164 |
-
startAudioRecordingFallback();
|
| 165 |
-
}
|
| 166 |
-
} else {
|
| 167 |
-
startAudioRecordingFallback();
|
| 168 |
-
}
|
| 169 |
-
};
|
| 170 |
-
|
| 171 |
-
const startAudioRecordingFallback = async () => {
|
| 172 |
-
console.log("[Voice] Using MediaRecorder Fallback");
|
| 173 |
try {
|
| 174 |
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 175 |
const mediaRecorder = new MediaRecorder(stream);
|
|
@@ -177,51 +123,51 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 177 |
audioChunksRef.current = [];
|
| 178 |
|
| 179 |
mediaRecorder.ondataavailable = (event) => {
|
| 180 |
-
if (event.data.size > 0)
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
mediaRecorder.onstop = async () => {
|
| 184 |
-
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
|
| 185 |
-
const base64 = await blobToBase64(audioBlob);
|
| 186 |
-
handleChatSubmit(undefined, base64);
|
| 187 |
-
stream.getTracks().forEach(track => track.stop());
|
| 188 |
};
|
| 189 |
|
| 190 |
mediaRecorder.start();
|
| 191 |
setIsChatRecording(true);
|
| 192 |
-
setIsWebSpeechListening(false);
|
| 193 |
} catch (e) {
|
| 194 |
-
setToast({ show: true, message: '
|
| 195 |
}
|
| 196 |
};
|
| 197 |
|
| 198 |
const stopRecording = () => {
|
| 199 |
-
|
| 200 |
-
if (isWebSpeechListening && recognitionRef.current) {
|
| 201 |
-
recognitionRef.current.stop();
|
| 202 |
-
} else if (mediaRecorderRef.current && isChatRecording) {
|
| 203 |
mediaRecorderRef.current.stop();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
}
|
| 205 |
-
setIsChatRecording(false);
|
| 206 |
-
setIsWebSpeechListening(false);
|
| 207 |
};
|
| 208 |
|
| 209 |
const handleChatSubmit = async (text?: string, audioBase64?: string) => {
|
| 210 |
-
|
| 211 |
-
if (!finalContent && !audioBase64) return;
|
| 212 |
-
|
| 213 |
stopPlayback();
|
| 214 |
const historyPayload = messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text }));
|
| 215 |
|
| 216 |
const newUserMsg: AIChatMessage = {
|
| 217 |
id: Date.now().toString(),
|
| 218 |
role: 'user',
|
| 219 |
-
text:
|
| 220 |
isAudioMessage: !!audioBase64,
|
| 221 |
timestamp: Date.now()
|
| 222 |
};
|
| 223 |
const newAiMsgId = (Date.now() + 1).toString();
|
| 224 |
-
const newAiMsg: AIChatMessage = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 225 |
|
| 226 |
setMessages(prev => [...prev, newUserMsg, newAiMsg]);
|
| 227 |
setTextInput('');
|
|
@@ -236,11 +182,13 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 236 |
'x-user-role': currentUser?.role || '',
|
| 237 |
'x-school-id': currentUser?.schoolId || ''
|
| 238 |
},
|
| 239 |
-
body: JSON.stringify({ text
|
| 240 |
});
|
| 241 |
|
| 242 |
if (!response.ok) throw new Error(response.statusText);
|
| 243 |
-
|
|
|
|
|
|
|
| 244 |
const decoder = new TextDecoder();
|
| 245 |
let aiTextAccumulated = '';
|
| 246 |
let buffer = '';
|
|
@@ -267,23 +215,36 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 267 |
playPCMAudio(data.audio);
|
| 268 |
}
|
| 269 |
if (data.ttsSkipped) {
|
|
|
|
| 270 |
speakWithBrowser(aiTextAccumulated);
|
| 271 |
}
|
|
|
|
|
|
|
|
|
|
| 272 |
} catch (e) {}
|
| 273 |
}
|
| 274 |
}
|
| 275 |
}
|
| 276 |
} catch (error: any) {
|
| 277 |
-
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '
|
| 278 |
} finally { setIsChatProcessing(false); }
|
| 279 |
};
|
| 280 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
return (
|
| 282 |
<div className="flex-1 flex flex-col max-w-4xl mx-auto w-full min-h-0 relative overflow-hidden h-full">
|
| 283 |
{toast.show && <Toast message={toast.message} type={toast.type} onClose={()=>setToast({...toast, show: false})}/>}
|
| 284 |
|
| 285 |
<div className="absolute top-2 right-4 z-10">
|
| 286 |
-
<button onClick={
|
| 287 |
<Trash2 size={14}/> 清除记录
|
| 288 |
</button>
|
| 289 |
</div>
|
|
@@ -296,64 +257,20 @@ export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
|
|
| 296 |
</div>
|
| 297 |
<div className={`max-w-[80%] p-3 rounded-2xl text-sm overflow-hidden ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none shadow-sm'}`}>
|
| 298 |
<div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
|
| 299 |
-
{msg.
|
|
|
|
| 300 |
</div>
|
| 301 |
</div>
|
| 302 |
))}
|
| 303 |
-
{isChatProcessing && (
|
| 304 |
-
<div className="flex gap-3">
|
| 305 |
-
<div className="w-10 h-10 rounded-full bg-blue-100 text-blue-600 flex items-center justify-center shrink-0">
|
| 306 |
-
<Loader2 className="animate-spin" size={20}/>
|
| 307 |
-
</div>
|
| 308 |
-
<div className="bg-white border border-gray-100 p-3 rounded-2xl rounded-tl-none shadow-sm flex items-center gap-2 text-gray-400 text-xs">
|
| 309 |
-
思考中...
|
| 310 |
-
</div>
|
| 311 |
-
</div>
|
| 312 |
-
)}
|
| 313 |
<div ref={messagesEndRef} />
|
| 314 |
</div>
|
| 315 |
|
| 316 |
<div className="p-4 bg-white border-t border-gray-200 shrink-0 z-20">
|
| 317 |
-
<div className="flex items-center gap-
|
| 318 |
-
{
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
{isWebSpeechListening ? '正在识别' : '正在录制'}
|
| 322 |
-
</div>
|
| 323 |
-
)}
|
| 324 |
-
|
| 325 |
-
<input
|
| 326 |
-
ref={inputRef}
|
| 327 |
-
className="flex-1 bg-transparent border-none outline-none px-3 text-sm py-2"
|
| 328 |
-
placeholder={isChatRecording ? "正在倾听..." : "输入问题..."}
|
| 329 |
-
value={textInput}
|
| 330 |
-
onChange={e => setTextInput(e.target.value)}
|
| 331 |
-
onKeyDown={e => e.key === 'Enter' && !isChatProcessing && handleChatSubmit(textInput)}
|
| 332 |
-
disabled={isChatProcessing}
|
| 333 |
-
/>
|
| 334 |
-
|
| 335 |
-
<div className="flex items-center gap-2 shrink-0">
|
| 336 |
-
<button
|
| 337 |
-
onMouseDown={startRecording}
|
| 338 |
-
onMouseUp={stopRecording}
|
| 339 |
-
onTouchStart={startRecording}
|
| 340 |
-
onTouchEnd={stopRecording}
|
| 341 |
-
className={`p-3 rounded-xl transition-all ${isChatRecording ? 'bg-red-500 scale-110 shadow-lg text-white ring-4 ring-red-100' : 'bg-gray-100 text-gray-500 hover:bg-gray-200'}`}
|
| 342 |
-
title="按住说话"
|
| 343 |
-
>
|
| 344 |
-
{isChatRecording ? <StopCircle size={22}/> : <Mic size={22}/>}
|
| 345 |
-
</button>
|
| 346 |
-
|
| 347 |
-
<button
|
| 348 |
-
onClick={() => handleChatSubmit(textInput)}
|
| 349 |
-
className={`p-3 rounded-xl transition-all ${!textInput.trim() || isChatProcessing ? 'bg-gray-100 text-gray-300' : 'bg-blue-600 text-white hover:bg-blue-700 shadow-md'}`}
|
| 350 |
-
disabled={!textInput.trim() || isChatProcessing}
|
| 351 |
-
>
|
| 352 |
-
<Send size={22}/>
|
| 353 |
-
</button>
|
| 354 |
-
</div>
|
| 355 |
</div>
|
| 356 |
-
<div className="text-[10px] text-gray-400 text-center mt-2">支持文字输入或按住麦克风图标进行语音提问</div>
|
| 357 |
</div>
|
| 358 |
</div>
|
| 359 |
);
|
|
|
|
| 1 |
|
| 2 |
import React, { useState, useRef, useEffect } from 'react';
|
| 3 |
import { AIChatMessage, User } from '../../types';
|
| 4 |
+
import { Bot, Mic, Square, Volume2, Send, Sparkles, Loader2, StopCircle, Trash2 } from 'lucide-react';
|
| 5 |
import ReactMarkdown from 'react-markdown';
|
| 6 |
import remarkGfm from 'remark-gfm';
|
| 7 |
import { blobToBase64, base64ToUint8Array, decodePCM, cleanTextForTTS } from '../../utils/mediaHelpers';
|
|
|
|
| 22 |
timestamp: Date.now()
|
| 23 |
}];
|
| 24 |
} catch (e) {
|
| 25 |
+
return [{
|
| 26 |
+
id: 'welcome',
|
| 27 |
+
role: 'model',
|
| 28 |
+
text: '你好!我是你的 AI 智能助教。有什么可以帮你的吗?',
|
| 29 |
+
timestamp: Date.now()
|
| 30 |
+
}];
|
| 31 |
}
|
| 32 |
});
|
|
|
|
| 33 |
const [textInput, setTextInput] = useState('');
|
| 34 |
+
const [inputMode, setInputMode] = useState<'text' | 'audio'>('text');
|
| 35 |
const [isChatProcessing, setIsChatProcessing] = useState(false);
|
| 36 |
const [isChatRecording, setIsChatRecording] = useState(false);
|
|
|
|
| 37 |
const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
|
| 38 |
|
| 39 |
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
|
|
|
| 41 |
const audioContextRef = useRef<AudioContext | null>(null);
|
| 42 |
const currentSourceRef = useRef<AudioBufferSourceNode | null>(null);
|
| 43 |
const messagesEndRef = useRef<HTMLDivElement>(null);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
// Initialize AudioContext
|
| 46 |
useEffect(() => {
|
| 47 |
// @ts-ignore
|
| 48 |
const AudioCtor = window.AudioContext || window.webkitAudioContext;
|
| 49 |
audioContextRef.current = new AudioCtor();
|
| 50 |
return () => {
|
| 51 |
stopPlayback();
|
| 52 |
+
window.speechSynthesis.cancel();
|
| 53 |
};
|
| 54 |
}, []);
|
| 55 |
|
| 56 |
+
// Persist messages
|
| 57 |
+
useEffect(() => {
|
| 58 |
+
try {
|
| 59 |
+
const MAX_COUNT = 50;
|
| 60 |
+
const welcome = messages.find(m => m.id === 'welcome');
|
| 61 |
+
const others = messages.filter(m => m.id !== 'welcome');
|
| 62 |
+
const recent = others.slice(-MAX_COUNT);
|
| 63 |
+
const messagesToSave = (welcome ? [welcome] : []).concat(recent);
|
| 64 |
+
localStorage.setItem('ai_chat_history', JSON.stringify(messagesToSave));
|
| 65 |
+
} catch (e) {}
|
| 66 |
+
}, [messages]);
|
| 67 |
+
|
| 68 |
+
// Scroll to bottom
|
| 69 |
useEffect(() => {
|
| 70 |
messagesEndRef.current?.scrollIntoView({ behavior: isChatProcessing ? 'auto' : 'smooth', block: 'end' });
|
| 71 |
}, [messages, isChatProcessing]);
|
|
|
|
| 81 |
const speakWithBrowser = (text: string) => {
|
| 82 |
if (!text) return;
|
| 83 |
stopPlayback();
|
| 84 |
+
const cleanText = cleanTextForTTS(text);
|
| 85 |
+
const utterance = new SpeechSynthesisUtterance(cleanText);
|
| 86 |
utterance.lang = 'zh-CN';
|
| 87 |
+
utterance.rate = 1.0;
|
| 88 |
+
const voices = window.speechSynthesis.getVoices();
|
| 89 |
+
const zhVoice = voices.find(v => v.lang === 'zh-CN' && !v.name.includes('Hong Kong') && !v.name.includes('Taiwan'));
|
| 90 |
+
if (zhVoice) utterance.voice = zhVoice;
|
| 91 |
window.speechSynthesis.speak(utterance);
|
| 92 |
};
|
| 93 |
|
|
|
|
| 111 |
currentSourceRef.current = source;
|
| 112 |
} catch (e) {
|
| 113 |
console.error("Audio playback error", e);
|
| 114 |
+
setToast({ show: true, message: '语音播放失败', type: 'error' });
|
| 115 |
}
|
| 116 |
};
|
| 117 |
|
| 118 |
+
const startRecording = async () => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
try {
|
| 120 |
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 121 |
const mediaRecorder = new MediaRecorder(stream);
|
|
|
|
| 123 |
audioChunksRef.current = [];
|
| 124 |
|
| 125 |
mediaRecorder.ondataavailable = (event) => {
|
| 126 |
+
if (event.data.size > 0) {
|
| 127 |
+
audioChunksRef.current.push(event.data);
|
| 128 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
};
|
| 130 |
|
| 131 |
mediaRecorder.start();
|
| 132 |
setIsChatRecording(true);
|
|
|
|
| 133 |
} catch (e) {
|
| 134 |
+
setToast({ show: true, message: '无法访问麦克风', type: 'error' });
|
| 135 |
}
|
| 136 |
};
|
| 137 |
|
| 138 |
const stopRecording = () => {
|
| 139 |
+
if (mediaRecorderRef.current && isChatRecording) {
|
|
|
|
|
|
|
|
|
|
| 140 |
mediaRecorderRef.current.stop();
|
| 141 |
+
setIsChatRecording(false);
|
| 142 |
+
|
| 143 |
+
mediaRecorderRef.current.onstop = async () => {
|
| 144 |
+
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
|
| 145 |
+
const base64 = await blobToBase64(audioBlob);
|
| 146 |
+
handleChatSubmit(undefined, base64);
|
| 147 |
+
mediaRecorderRef.current?.stream.getTracks().forEach(track => track.stop());
|
| 148 |
+
};
|
| 149 |
}
|
|
|
|
|
|
|
| 150 |
};
|
| 151 |
|
| 152 |
const handleChatSubmit = async (text?: string, audioBase64?: string) => {
|
| 153 |
+
if (!text && !audioBase64) return;
|
|
|
|
|
|
|
| 154 |
stopPlayback();
|
| 155 |
const historyPayload = messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text }));
|
| 156 |
|
| 157 |
const newUserMsg: AIChatMessage = {
|
| 158 |
id: Date.now().toString(),
|
| 159 |
role: 'user',
|
| 160 |
+
text: text || '(语音消息)',
|
| 161 |
isAudioMessage: !!audioBase64,
|
| 162 |
timestamp: Date.now()
|
| 163 |
};
|
| 164 |
const newAiMsgId = (Date.now() + 1).toString();
|
| 165 |
+
const newAiMsg: AIChatMessage = {
|
| 166 |
+
id: newAiMsgId,
|
| 167 |
+
role: 'model',
|
| 168 |
+
text: '',
|
| 169 |
+
timestamp: Date.now()
|
| 170 |
+
};
|
| 171 |
|
| 172 |
setMessages(prev => [...prev, newUserMsg, newAiMsg]);
|
| 173 |
setTextInput('');
|
|
|
|
| 182 |
'x-user-role': currentUser?.role || '',
|
| 183 |
'x-school-id': currentUser?.schoolId || ''
|
| 184 |
},
|
| 185 |
+
body: JSON.stringify({ text, audio: audioBase64, history: historyPayload })
|
| 186 |
});
|
| 187 |
|
| 188 |
if (!response.ok) throw new Error(response.statusText);
|
| 189 |
+
if (!response.body) throw new Error('No response body');
|
| 190 |
+
|
| 191 |
+
const reader = response.body.getReader();
|
| 192 |
const decoder = new TextDecoder();
|
| 193 |
let aiTextAccumulated = '';
|
| 194 |
let buffer = '';
|
|
|
|
| 215 |
playPCMAudio(data.audio);
|
| 216 |
}
|
| 217 |
if (data.ttsSkipped) {
|
| 218 |
+
setToast({ show: true, message: 'AI 语音额度已用尽,已切换至本地语音播报', type: 'error' });
|
| 219 |
speakWithBrowser(aiTextAccumulated);
|
| 220 |
}
|
| 221 |
+
if (data.error) {
|
| 222 |
+
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message || '未知错误'}` } : m));
|
| 223 |
+
}
|
| 224 |
} catch (e) {}
|
| 225 |
}
|
| 226 |
}
|
| 227 |
}
|
| 228 |
} catch (error: any) {
|
| 229 |
+
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '抱歉,连接断开或发生错误,请重试。' } : m));
|
| 230 |
} finally { setIsChatProcessing(false); }
|
| 231 |
};
|
| 232 |
|
| 233 |
+
const clearHistory = () => {
|
| 234 |
+
setMessages([{
|
| 235 |
+
id: 'welcome',
|
| 236 |
+
role: 'model',
|
| 237 |
+
text: '你好!我是你的 AI 智能助教。有什么可以帮你的吗?',
|
| 238 |
+
timestamp: Date.now()
|
| 239 |
+
}]);
|
| 240 |
+
};
|
| 241 |
+
|
| 242 |
return (
|
| 243 |
<div className="flex-1 flex flex-col max-w-4xl mx-auto w-full min-h-0 relative overflow-hidden h-full">
|
| 244 |
{toast.show && <Toast message={toast.message} type={toast.type} onClose={()=>setToast({...toast, show: false})}/>}
|
| 245 |
|
| 246 |
<div className="absolute top-2 right-4 z-10">
|
| 247 |
+
<button onClick={clearHistory} className="text-xs text-gray-400 hover:text-red-500 flex items-center gap-1 bg-white/80 p-1.5 rounded-lg border border-transparent hover:border-red-100 transition-all shadow-sm backdrop-blur">
|
| 248 |
<Trash2 size={14}/> 清除记录
|
| 249 |
</button>
|
| 250 |
</div>
|
|
|
|
| 257 |
</div>
|
| 258 |
<div className={`max-w-[80%] p-3 rounded-2xl text-sm overflow-hidden ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none shadow-sm'}`}>
|
| 259 |
<div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
|
| 260 |
+
{msg.role === 'model' && !msg.text && isChatProcessing && <div className="flex items-center gap-2 text-gray-400 py-1"><Loader2 className="animate-spin" size={14}/><span className="text-xs">思考中...</span></div>}
|
| 261 |
+
{msg.audio ? (<button onClick={() => playPCMAudio(msg.audio!)} className="mt-2 flex items-center gap-2 text-xs bg-blue-50 text-blue-600 px-3 py-1.5 rounded-full hover:bg-blue-100 border border-blue-100 transition-colors w-fit"><Volume2 size={14}/> 播放语音 (AI)</button>) : (msg.role === 'model' && msg.text && !isChatProcessing) && (<button onClick={() => speakWithBrowser(msg.text!)} className="mt-2 flex items-center gap-2 text-xs bg-gray-50 text-gray-600 px-3 py-1.5 rounded-full hover:bg-gray-100 border border-gray-200 transition-colors w-fit"><Volume2 size={14}/> 朗读 (本地)</button>)}
|
| 262 |
</div>
|
| 263 |
</div>
|
| 264 |
))}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
<div ref={messagesEndRef} />
|
| 266 |
</div>
|
| 267 |
|
| 268 |
<div className="p-4 bg-white border-t border-gray-200 shrink-0 z-20">
|
| 269 |
+
<div className="flex items-center gap-2 max-w-4xl mx-auto bg-gray-100 p-1.5 rounded-full border border-gray-200">
|
| 270 |
+
<button onClick={() => setInputMode(inputMode === 'text' ? 'audio' : 'text')} className="p-2 rounded-full hover:bg-white text-gray-500 transition-colors">{inputMode === 'text' ? <Mic size={20}/> : <Square size={20}/>}</button>
|
| 271 |
+
{inputMode === 'text' ? (<input className="flex-1 bg-transparent border-none outline-none px-2 text-sm" placeholder="输入问题..." value={textInput} onChange={e => setTextInput(e.target.value)} onKeyDown={e => e.key === 'Enter' && handleChatSubmit(textInput)}/>) : (<div className="flex-1 text-center text-sm font-medium text-blue-600 animate-pulse">{isChatRecording ? '正在录音... 点击停止' : '点击麦克风开始说话'}</div>)}
|
| 272 |
+
{inputMode === 'text' ? (<button onClick={() => handleChatSubmit(textInput)} className="p-2 bg-blue-600 rounded-full text-white hover:bg-blue-700 disabled:opacity-50" disabled={!textInput.trim() || isChatProcessing}><Send size={18}/></button>) : (<button onMouseDown={startRecording} onMouseUp={stopRecording} onTouchStart={startRecording} onTouchEnd={stopRecording} className={`p-3 rounded-full text-white transition-all ${isChatRecording ? 'bg-red-500 scale-110 shadow-lg ring-4 ring-red-200' : 'bg-blue-600 hover:bg-blue-700'}`}>{isChatRecording ? <StopCircle size={20}/> : <Mic size={20}/>}</button>)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
</div>
|
|
|
|
| 274 |
</div>
|
| 275 |
</div>
|
| 276 |
);
|
models.js
CHANGED
|
@@ -123,7 +123,6 @@ const ConfigSchema = new mongoose.Schema({
|
|
| 123 |
aiTotalCalls: { type: Number, default: 0 },
|
| 124 |
aiProviderOrder: { type: [String], default: ['GEMINI', 'OPENROUTER', 'GEMMA'] }, // NEW
|
| 125 |
periodConfig: [{ period: Number, name: String, startTime: String, endTime: String }],
|
| 126 |
-
hfToken: String, // NEW: Hugging Face Token for Fallback STT
|
| 127 |
apiKeys: {
|
| 128 |
gemini: [String],
|
| 129 |
openrouter: [String]
|
|
|
|
| 123 |
aiTotalCalls: { type: Number, default: 0 },
|
| 124 |
aiProviderOrder: { type: [String], default: ['GEMINI', 'OPENROUTER', 'GEMMA'] }, // NEW
|
| 125 |
periodConfig: [{ period: Number, name: String, startTime: String, endTime: String }],
|
|
|
|
| 126 |
apiKeys: {
|
| 127 |
gemini: [String],
|
| 128 |
openrouter: [String]
|
server.js
CHANGED
|
@@ -73,19 +73,22 @@ wss.on('connection', async (ws, req) => {
|
|
| 73 |
let isGeminiConnected = false;
|
| 74 |
|
| 75 |
try {
|
| 76 |
-
// 1.
|
| 77 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
if (!apiKey) {
|
| 80 |
-
|
| 81 |
-
ws.send(JSON.stringify({ type: 'error', message: 'Server API Key is missing in environment' }));
|
| 82 |
ws.close();
|
| 83 |
return;
|
| 84 |
}
|
| 85 |
|
| 86 |
// 2. Initialize Gemini SDK
|
| 87 |
const { GoogleGenAI } = await import("@google/genai");
|
| 88 |
-
const client = new GoogleGenAI({ apiKey
|
| 89 |
|
| 90 |
// 3. Connect to Gemini (Isolated Session per Connection)
|
| 91 |
geminiSession = await client.live.connect({
|
|
|
|
| 73 |
let isGeminiConnected = false;
|
| 74 |
|
| 75 |
try {
|
| 76 |
+
// 1. Get API Key (Server-side Config)
|
| 77 |
+
const config = await ConfigModel.findOne({ key: 'main' });
|
| 78 |
+
let apiKey = process.env.API_KEY;
|
| 79 |
+
if (config && config.apiKeys && config.apiKeys.gemini && config.apiKeys.gemini.length > 0) {
|
| 80 |
+
apiKey = config.apiKeys.gemini[0];
|
| 81 |
+
}
|
| 82 |
|
| 83 |
if (!apiKey) {
|
| 84 |
+
ws.send(JSON.stringify({ type: 'error', message: 'No Server API Key Configured' }));
|
|
|
|
| 85 |
ws.close();
|
| 86 |
return;
|
| 87 |
}
|
| 88 |
|
| 89 |
// 2. Initialize Gemini SDK
|
| 90 |
const { GoogleGenAI } = await import("@google/genai");
|
| 91 |
+
const client = new GoogleGenAI({ apiKey });
|
| 92 |
|
| 93 |
// 3. Connect to Gemini (Isolated Session per Connection)
|
| 94 |
geminiSession = await client.live.connect({
|