dvc890 commited on
Commit
019d28a
·
verified ·
1 Parent(s): 86f8388

Upload 66 files

Browse files
ai-routes.js CHANGED
@@ -97,7 +97,7 @@ function isQuotaError(e) {
97
  }
98
 
99
  // Streaming Helpers
100
- async function streamGemini(baseParams, res) {
101
  const { GoogleGenAI } = await import("@google/genai");
102
  const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
103
  const keys = await getKeyPool('gemini');
@@ -108,7 +108,16 @@ async function streamGemini(baseParams, res) {
108
  for (const modelName of models) {
109
  try {
110
  console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
111
- const result = await client.models.generateContentStream({ ...baseParams, model: modelName });
 
 
 
 
 
 
 
 
 
112
 
113
  let hasStarted = false;
114
  let fullText = "";
@@ -119,9 +128,13 @@ async function streamGemini(baseParams, res) {
119
  recordUsage(modelName, PROVIDERS.GEMINI);
120
  hasStarted = true;
121
  }
 
 
 
 
 
122
  if (chunk.text) {
123
  fullText += chunk.text;
124
- // FIX: Use { type: 'text', content: ... }
125
  res.write(`data: ${JSON.stringify({ type: 'text', content: chunk.text })}\n\n`);
126
  if (res.flush) res.flush();
127
  }
@@ -142,7 +155,7 @@ async function streamGemini(baseParams, res) {
142
 
143
  // --- DOUBAO DIRECT STREAMING (Axios) ---
144
 
145
- async function streamDoubao(baseParams, res, userId, mode = 'chat', config) {
146
  const keys = await getKeyPool('doubao');
147
  if (keys.length === 0) throw new Error("No Doubao API keys configured");
148
 
@@ -160,14 +173,14 @@ async function streamDoubao(baseParams, res, userId, mode = 'chat', config) {
160
 
161
  for (const apiKey of keys) {
162
  try {
163
- console.log(`[AI] 🚀 Calling Doubao API (Axios): ${endpointId}`);
164
 
165
  // Payload matching user's CURL example
166
  const payload = {
167
  model: endpointId,
168
  messages: messages,
169
  stream: true,
170
- thinking: { type: "disabled" } // Explicitly disable thinking
171
  };
172
 
173
  const response = await axios.post(
@@ -187,6 +200,7 @@ async function streamDoubao(baseParams, res, userId, mode = 'chat', config) {
187
  recordUsage(modelId, PROVIDERS.DOUBAO);
188
 
189
  let fullText = "";
 
190
  let hasStarted = false;
191
  let buffer = "";
192
 
@@ -208,11 +222,20 @@ async function streamDoubao(baseParams, res, userId, mode = 'chat', config) {
208
  try {
209
  const jsonStr = trimmed.substring(6);
210
  const json = JSON.parse(jsonStr);
211
- const content = json.choices?.[0]?.delta?.content || '';
 
 
 
 
 
 
 
 
 
 
212
 
213
  if (content) {
214
  fullText += content;
215
- // FIX: Ensure format is { type: 'text', content: ... }
216
  res.write(`data: ${JSON.stringify({ type: 'text', content: content })}\n\n`);
217
  if (res.flush) res.flush();
218
  }
@@ -260,7 +283,8 @@ async function streamOpenRouter(baseParams, res) {
260
  // If user uses Doubao via OpenRouter/Custom Proxy, also try to apply cache/thinking params
261
  if (modelName.toLowerCase().includes('doubao')) {
262
  extraBody.caching = { type: "enabled", prefix: true };
263
- extraBody.thinking = { type: "disabled" };
 
264
  }
265
 
266
  try {
@@ -337,7 +361,7 @@ async function streamGemma(baseParams, res) {
337
  throw new Error("Gemma stream failed");
338
  }
339
 
340
- async function streamContentWithSmartFallback(baseParams, res, userId, mode = 'chat') {
341
  let hasAudio = false;
342
  const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
343
 
@@ -350,7 +374,7 @@ async function streamContentWithSmartFallback(baseParams, res, userId, mode = 'c
350
  if (hasAudio) {
351
  try {
352
  console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
353
- return await streamGemini(baseParams, res);
354
  } catch(e) {
355
  console.error(`[AI] ❌ Audio Processing Failed: ${e.message}`);
356
  deprioritizeProvider(PROVIDERS.GEMINI);
@@ -371,10 +395,10 @@ async function streamContentWithSmartFallback(baseParams, res, userId, mode = 'c
371
  let finalError = null;
372
  for (const provider of runtimeProviderOrder) {
373
  try {
374
- console.log(`[AI] 👉 Trying Provider: ${provider}... Mode: ${mode}`);
375
- if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res);
376
  else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
377
- else if (provider === PROVIDERS.DOUBAO) return await streamDoubao(baseParams, res, userId, mode, config);
378
  else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
379
  } catch (e) {
380
  console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
@@ -436,13 +460,14 @@ router.post('/reset-pool', checkAIAccess, (req, res) => {
436
  // --- PERSISTENT CHAT HISTORY HANDLER ---
437
  // Instead of relying on client-side 'history', we use MongoDB to ensure cross-device memory.
438
  router.post('/chat', async (req, res) => {
439
- const { text, audio, images, history } = req.body; // Added images
440
  const userRole = req.headers['x-user-role'];
441
  const username = req.headers['x-user-username'];
442
  const schoolId = req.headers['x-school-id'];
443
 
444
  // ... (Keep Context building logic same) ...
445
- const systemInstruction = await buildUserContext(username, userRole, schoolId);
 
446
 
447
  // Build History
448
  // Filter out messages with empty text to prevent API errors
@@ -509,9 +534,11 @@ router.post('/chat', async (req, res) => {
509
  config: {
510
  systemInstruction: systemInstruction
511
  }
512
- }, res);
513
 
514
  // 2. Save User Message to DB
 
 
515
  await ChatHistoryModel.create({
516
  userId: req.headers['x-user-username'],
517
  role: 'user',
@@ -520,8 +547,9 @@ router.post('/chat', async (req, res) => {
520
  });
521
 
522
  // 3. Generate TTS (Independent of Text Model)
523
- // Only generate audio if text is sufficient length and not just punctuation
524
- if (fullText && fullText.length > 2) {
 
525
  res.write(`data: ${JSON.stringify({ type: 'status', status: 'tts' })}\n\n`);
526
  try {
527
  const { GoogleGenAI } = await import("@google/genai");
 
97
  }
98
 
99
  // Streaming Helpers
100
+ async function streamGemini(baseParams, res, enableThinking = false) {
101
  const { GoogleGenAI } = await import("@google/genai");
102
  const models = ['gemini-2.5-flash', 'gemini-2.5-flash-lite'];
103
  const keys = await getKeyPool('gemini');
 
108
  for (const modelName of models) {
109
  try {
110
  console.log(`[AI] 🚀 Attempting Gemini Model: ${modelName} (Key ends with ...${apiKey.slice(-4)})`);
111
+
112
+ // Add thinking config if requested
113
+ const requestParams = { ...baseParams, model: modelName };
114
+ if (enableThinking) {
115
+ requestParams.config = requestParams.config || {};
116
+ // Gemini 2.5 Flash supports thinking config (set budget)
117
+ requestParams.config.thinkingConfig = { thinkingBudget: 1024 };
118
+ }
119
+
120
+ const result = await client.models.generateContentStream(requestParams);
121
 
122
  let hasStarted = false;
123
  let fullText = "";
 
128
  recordUsage(modelName, PROVIDERS.GEMINI);
129
  hasStarted = true;
130
  }
131
+
132
+ // Check for thought content (provider specific, Gemini SDK usually keeps it in candidates)
133
+ // Note: Current Google GenAI Node SDK might not separate thought perfectly in stream without checking complex response structure.
134
+ // For now, we assume Gemini just streams text. If Gemini adds explicit thought parts in future SDKs, we parse here.
135
+
136
  if (chunk.text) {
137
  fullText += chunk.text;
 
138
  res.write(`data: ${JSON.stringify({ type: 'text', content: chunk.text })}\n\n`);
139
  if (res.flush) res.flush();
140
  }
 
155
 
156
  // --- DOUBAO DIRECT STREAMING (Axios) ---
157
 
158
+ async function streamDoubao(baseParams, res, userId, mode = 'chat', config, enableThinking = false) {
159
  const keys = await getKeyPool('doubao');
160
  if (keys.length === 0) throw new Error("No Doubao API keys configured");
161
 
 
173
 
174
  for (const apiKey of keys) {
175
  try {
176
+ console.log(`[AI] 🚀 Calling Doubao API (Axios): ${endpointId}, Thinking: ${enableThinking}`);
177
 
178
  // Payload matching user's CURL example
179
  const payload = {
180
  model: endpointId,
181
  messages: messages,
182
  stream: true,
183
+ thinking: { type: enableThinking ? "enabled" : "disabled" }
184
  };
185
 
186
  const response = await axios.post(
 
200
  recordUsage(modelId, PROVIDERS.DOUBAO);
201
 
202
  let fullText = "";
203
+ let fullThought = "";
204
  let hasStarted = false;
205
  let buffer = "";
206
 
 
222
  try {
223
  const jsonStr = trimmed.substring(6);
224
  const json = JSON.parse(jsonStr);
225
+ const delta = json.choices?.[0]?.delta;
226
+
227
+ // Handle Thinking Content (DeepSeek/Doubao format usually puts it in reasoning_content)
228
+ const reasoning = delta?.reasoning_content;
229
+ const content = delta?.content;
230
+
231
+ if (reasoning) {
232
+ fullThought += reasoning;
233
+ res.write(`data: ${JSON.stringify({ type: 'thinking', content: reasoning })}\n\n`);
234
+ if (res.flush) res.flush();
235
+ }
236
 
237
  if (content) {
238
  fullText += content;
 
239
  res.write(`data: ${JSON.stringify({ type: 'text', content: content })}\n\n`);
240
  if (res.flush) res.flush();
241
  }
 
283
  // If user uses Doubao via OpenRouter/Custom Proxy, also try to apply cache/thinking params
284
  if (modelName.toLowerCase().includes('doubao')) {
285
  extraBody.caching = { type: "enabled", prefix: true };
286
+ // We rely on the request passing thinking enabled, but OpenRouter implementation might vary
287
+ // For now, standard OpenRouter doesn't support 'thinking' param standardly, mostly handled via model choice (e.g. R1)
288
  }
289
 
290
  try {
 
361
  throw new Error("Gemma stream failed");
362
  }
363
 
364
+ async function streamContentWithSmartFallback(baseParams, res, userId, mode = 'chat', enableThinking = false) {
365
  let hasAudio = false;
366
  const contentsArray = Array.isArray(baseParams.contents) ? baseParams.contents : [baseParams.contents];
367
 
 
374
  if (hasAudio) {
375
  try {
376
  console.log(`[AI] 🎤 Audio detected, forcing Gemini provider.`);
377
+ return await streamGemini(baseParams, res, enableThinking);
378
  } catch(e) {
379
  console.error(`[AI] ❌ Audio Processing Failed: ${e.message}`);
380
  deprioritizeProvider(PROVIDERS.GEMINI);
 
395
  let finalError = null;
396
  for (const provider of runtimeProviderOrder) {
397
  try {
398
+ console.log(`[AI] 👉 Trying Provider: ${provider}... Mode: ${mode}, Thinking: ${enableThinking}`);
399
+ if (provider === PROVIDERS.GEMINI) return await streamGemini(baseParams, res, enableThinking);
400
  else if (provider === PROVIDERS.OPENROUTER) return await streamOpenRouter(baseParams, res);
401
+ else if (provider === PROVIDERS.DOUBAO) return await streamDoubao(baseParams, res, userId, mode, config, enableThinking);
402
  else if (provider === PROVIDERS.GEMMA) return await streamGemma(baseParams, res);
403
  } catch (e) {
404
  console.error(`[AI] ❌ Provider ${provider} Failed: ${e.message}`);
 
460
  // --- PERSISTENT CHAT HISTORY HANDLER ---
461
  // Instead of relying on client-side 'history', we use MongoDB to ensure cross-device memory.
462
  router.post('/chat', async (req, res) => {
463
+ const { text, audio, images, history, enableThinking, overrideSystemPrompt } = req.body; // Added images, enableThinking, overrideSystemPrompt
464
  const userRole = req.headers['x-user-role'];
465
  const username = req.headers['x-user-username'];
466
  const schoolId = req.headers['x-school-id'];
467
 
468
  // ... (Keep Context building logic same) ...
469
+ // If overrideSystemPrompt is provided (Work Assistant), use it. Otherwise build standard context.
470
+ const systemInstruction = overrideSystemPrompt || await buildUserContext(username, userRole, schoolId);
471
 
472
  // Build History
473
  // Filter out messages with empty text to prevent API errors
 
534
  config: {
535
  systemInstruction: systemInstruction
536
  }
537
+ }, res, req.headers['x-user-username'], 'chat', enableThinking);
538
 
539
  // 2. Save User Message to DB
540
+ // For Work Assistant, we might want to segregate history, but for simplicity we save all to same collection
541
+ // Client-side can filter or use distinct states
542
  await ChatHistoryModel.create({
543
  userId: req.headers['x-user-username'],
544
  role: 'user',
 
547
  });
548
 
549
  // 3. Generate TTS (Independent of Text Model)
550
+ // Only generate audio if text is sufficient length, not just punctuation, AND NO Thinking logic (Work Assistant disables audio usually)
551
+ // Check req.body.disableAudio which might be passed by Work Assistant
552
+ if (fullText && fullText.length > 2 && !req.body.disableAudio) {
553
  res.write(`data: ${JSON.stringify({ type: 'status', status: 'tts' })}\n\n`);
554
  try {
555
  const { GoogleGenAI } = await import("@google/genai");
components/ai/WorkAssistantPanel.tsx ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import React, { useState, useRef, useEffect } from 'react';
3
+ import { AIChatMessage, User } from '../../types';
4
+ import { Bot, Send, Sparkles, Loader2, Image as ImageIcon, X, Trash2, Brain, ChevronDown, ChevronRight, Copy, Check } from 'lucide-react';
5
+ import ReactMarkdown from 'react-markdown';
6
+ import remarkGfm from 'remark-gfm';
7
+ import { compressImage } from '../../utils/mediaHelpers';
8
+ import { Toast, ToastState } from '../Toast';
9
+
10
+ interface WorkAssistantPanelProps {
11
+ currentUser: User | null;
12
+ }
13
+
14
+ // Pre-defined Roles
15
+ const ROLES = [
16
+ {
17
+ id: 'editor',
18
+ name: '公众号小编',
19
+ icon: '📝',
20
+ prompt: `你是一位专业的微信公众号小编。
21
+ 请根据用户提供的图片和描述撰写一篇图文并茂的文章。
22
+ 1. 风格:生动活泼、有感染力,适当使用 Emoji。
23
+ 2. 标题:提供3个吸引人的标题供选择。
24
+ 3. 图片排版:在文章中合适的位置插入图片占位符。格式必须为:**[图片N]** (例如:**[图片1]**, **[图片2]**)。请根据图片内容逻辑安排顺序,并标注是哪张图片。
25
+ 4. 结构:包含开头引导、正文内容、结尾互动。`
26
+ },
27
+ {
28
+ id: 'host',
29
+ name: '活动主持/策划',
30
+ icon: '🎤',
31
+ prompt: `你是一位经验丰富的学校活动策划和主持人。
32
+ 协助老师撰写活动流程、主持稿或致辞。
33
+ 1. 风格:庄重、大气或热情(根据活动性质调整)。
34
+ 2. 内容:逻辑清晰,环节紧凑。
35
+ 3. 格式:标明【环节名称】、【时长预估】、【具体话术】。`
36
+ },
37
+ {
38
+ id: 'writer',
39
+ name: '文案润色专家',
40
+ icon: '✍️',
41
+ prompt: `你是一位资深的文字编辑。
42
+ 请帮助老师优化、润色草稿或口语化的文字。
43
+ 1. 目标:使文字更加通顺、优雅、符合书面语规范。
44
+ 2. 修正:纠正错别字和语病。
45
+ 3. 提升:优化修辞,增强表现力,但保持原意不变。`
46
+ },
47
+ {
48
+ id: 'promoter',
49
+ name: '宣传文案/朋友圈',
50
+ icon: '📢',
51
+ prompt: `你是一位擅长社交媒体传播的文案。
52
+ 请为学校活动撰写短小精悍的宣传语,适用于朋友圈、班级群或海报。
53
+ 1. 长度:200字以内。
54
+ 2. 重点:突出亮点,号召行动。
55
+ 3. 格式:分行排版,便于手机阅读。`
56
+ }
57
+ ];
58
+
59
+ export const WorkAssistantPanel: React.FC<WorkAssistantPanelProps> = ({ currentUser }) => {
60
+ const [selectedRole, setSelectedRole] = useState(ROLES[0]);
61
+ const [enableThinking, setEnableThinking] = useState(false);
62
+
63
+ // Chat State
64
+ const [messages, setMessages] = useState<AIChatMessage[]>([]);
65
+ const [textInput, setTextInput] = useState('');
66
+ const [selectedImages, setSelectedImages] = useState<File[]>([]);
67
+ const [isProcessing, setIsProcessing] = useState(false);
68
+
69
+ // UI State
70
+ const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
71
+ const [isThinkingExpanded, setIsThinkingExpanded] = useState<Record<string, boolean>>({});
72
+
73
+ const messagesEndRef = useRef<HTMLDivElement>(null);
74
+ const fileInputRef = useRef<HTMLInputElement>(null);
75
+
76
+ useEffect(() => {
77
+ messagesEndRef.current?.scrollIntoView({ behavior: 'smooth', block: 'end' });
78
+ }, [messages, isProcessing, isThinkingExpanded]);
79
+
80
+ const handleImageSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
81
+ if (e.target.files) {
82
+ setSelectedImages(prev => [...prev, ...Array.from(e.target.files!)]);
83
+ }
84
+ };
85
+
86
+ const handleCopy = (text: string) => {
87
+ navigator.clipboard.writeText(text);
88
+ setToast({ show: true, message: '内容已复制', type: 'success' });
89
+ };
90
+
91
+ const handleSubmit = async () => {
92
+ if ((!textInput.trim() && selectedImages.length === 0) || isProcessing) return;
93
+
94
+ setIsProcessing(true);
95
+ const currentText = textInput;
96
+ const currentImages = [...selectedImages];
97
+
98
+ // Reset Inputs
99
+ setTextInput('');
100
+ setSelectedImages([]);
101
+
102
+ // ID generation
103
+ const newAiMsgId = Date.now().toString();
104
+
105
+ try {
106
+ // Process Images to Base64
107
+ const base64Images = await Promise.all(currentImages.map(f => compressImage(f)));
108
+
109
+ // User Message
110
+ const newUserMsg: AIChatMessage = {
111
+ id: (Date.now() - 1).toString(),
112
+ role: 'user',
113
+ text: currentText,
114
+ images: base64Images,
115
+ timestamp: Date.now()
116
+ };
117
+
118
+ // AI Placeholder
119
+ const newAiMsg: AIChatMessage = {
120
+ id: newAiMsgId,
121
+ role: 'model',
122
+ text: '',
123
+ thought: '',
124
+ timestamp: Date.now(),
125
+ images: base64Images // Pass images to AI message for rendering placeholders locally if needed, but actually we use user's images
126
+ };
127
+
128
+ setMessages(prev => [...prev, newUserMsg, newAiMsg]);
129
+
130
+ // Default expand thinking
131
+ if (enableThinking) {
132
+ setIsThinkingExpanded(prev => ({ ...prev, [newAiMsgId]: true }));
133
+ }
134
+
135
+ // Build history context (Simplified: just role context + recent messages)
136
+ const historyContext = messages.slice(-5).map(m => ({ role: m.role, text: m.text }));
137
+
138
+ // Custom prompt logic for image indexing
139
+ let finalPrompt = currentText;
140
+ if (base64Images.length > 0) {
141
+ finalPrompt += `\n\n(附带了 ${base64Images.length} 张图片。在文章中插入时请使用 **[图片1]**, **[图片2]** 等占位符来对应第1、第2张图)`;
142
+ }
143
+
144
+ const response = await fetch('/api/ai/chat', {
145
+ method: 'POST',
146
+ headers: {
147
+ 'Content-Type': 'application/json',
148
+ 'x-user-username': currentUser?.username || '',
149
+ 'x-user-role': currentUser?.role || '',
150
+ 'x-school-id': currentUser?.schoolId || ''
151
+ },
152
+ body: JSON.stringify({
153
+ text: finalPrompt,
154
+ images: base64Images,
155
+ history: historyContext,
156
+ enableThinking,
157
+ overrideSystemPrompt: selectedRole.prompt, // Inject specific role prompt
158
+ disableAudio: true // No TTS for work assistant
159
+ })
160
+ });
161
+
162
+ if (!response.ok) throw new Error(response.statusText);
163
+ if (!response.body) throw new Error('No response body');
164
+
165
+ const reader = response.body.getReader();
166
+ const decoder = new TextDecoder();
167
+ let aiTextAccumulated = '';
168
+ let aiThoughtAccumulated = '';
169
+ let buffer = '';
170
+
171
+ while (true) {
172
+ const { done, value } = await reader.read();
173
+ if (done) break;
174
+
175
+ buffer += decoder.decode(value, { stream: true });
176
+ const parts = buffer.split('\n\n');
177
+ buffer = parts.pop() || '';
178
+
179
+ for (const line of parts) {
180
+ if (line.startsWith('data: ')) {
181
+ const jsonStr = line.replace('data: ', '').trim();
182
+ if (jsonStr === '[DONE]') break;
183
+
184
+ try {
185
+ const data = JSON.parse(jsonStr);
186
+
187
+ if (data.type === 'thinking') {
188
+ aiThoughtAccumulated += data.content;
189
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, thought: aiThoughtAccumulated } : m));
190
+ } else if (data.type === 'text') {
191
+ // Once text starts, collapse thinking
192
+ if (aiTextAccumulated === '' && aiThoughtAccumulated !== '') {
193
+ setIsThinkingExpanded(prev => ({ ...prev, [newAiMsgId]: false }));
194
+ }
195
+ aiTextAccumulated += data.content;
196
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: aiTextAccumulated } : m));
197
+ } else if (data.type === 'error') {
198
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message}` } : m));
199
+ }
200
+ } catch (e) {}
201
+ }
202
+ }
203
+ }
204
+
205
+ } catch (error: any) {
206
+ setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `抱歉,处理失败: ${error.message}` } : m));
207
+ } finally {
208
+ setIsProcessing(false);
209
+ }
210
+ };
211
+
212
+ // Custom Renderer for [图片N]
213
+ const renderContent = (text: string, sourceImages: string[] | undefined) => {
214
+ // Split by image placeholders: **[图片N]** or [图片N]
215
+ const parts = text.split(/(\*\*\[图片\d+\]\*\*|\[图片\d+\])/g);
216
+
217
+ return (
218
+ <div>
219
+ {parts.map((part, idx) => {
220
+ const match = part.match(/\d+/);
221
+ if ((part.startsWith('[图片') || part.startsWith('**[图片')) && match && sourceImages) {
222
+ const imgIndex = parseInt(match[0]) - 1;
223
+ if (sourceImages[imgIndex]) {
224
+ return (
225
+ <div key={idx} className="my-4">
226
+ <img
227
+ src={`data:image/jpeg;base64,${sourceImages[imgIndex]}`}
228
+ className="max-w-full md:max-w-md rounded-lg shadow-sm border border-gray-200"
229
+ alt={`Image ${imgIndex + 1}`}
230
+ />
231
+ <div className="text-center text-xs text-gray-400 mt-1">图 {imgIndex + 1}</div>
232
+ </div>
233
+ );
234
+ }
235
+ }
236
+ return <ReactMarkdown key={idx} remarkPlugins={[remarkGfm]} components={{p: ({node, ...props}) => <p className="mb-2 last:mb-0" {...props}/>}}>{part}</ReactMarkdown>;
237
+ })}
238
+ </div>
239
+ );
240
+ };
241
+
242
+ return (
243
+ <div className="flex flex-col h-full bg-slate-50 relative">
244
+ {toast.show && <Toast message={toast.message} type={toast.type} onClose={()=>setToast({...toast, show: false})}/>}
245
+
246
+ {/* Toolbar */}
247
+ <div className="bg-white px-6 py-3 border-b border-gray-200 flex flex-wrap gap-4 items-center justify-between shadow-sm shrink-0 z-20">
248
+ <div className="flex items-center gap-2">
249
+ <span className="text-sm font-bold text-gray-700">工作场景:</span>
250
+ <div className="relative group">
251
+ <button className="flex items-center gap-2 px-3 py-1.5 bg-indigo-50 text-indigo-700 rounded-lg text-sm font-bold border border-indigo-100 hover:bg-indigo-100 transition-colors">
252
+ <span>{selectedRole.icon} {selectedRole.name}</span>
253
+ <ChevronDown size={14}/>
254
+ </button>
255
+ <div className="absolute top-full left-0 mt-1 w-56 bg-white border border-gray-200 rounded-xl shadow-xl overflow-hidden hidden group-hover:block z-50">
256
+ {ROLES.map(role => (
257
+ <button
258
+ key={role.id}
259
+ onClick={() => setSelectedRole(role)}
260
+ className={`w-full text-left px-4 py-3 text-sm hover:bg-indigo-50 flex items-center gap-2 ${selectedRole.id === role.id ? 'bg-indigo-50 text-indigo-700 font-bold' : 'text-gray-700'}`}
261
+ >
262
+ <span>{role.icon}</span>
263
+ <span>{role.name}</span>
264
+ </button>
265
+ ))}
266
+ </div>
267
+ </div>
268
+ </div>
269
+
270
+ <div className="flex items-center gap-4">
271
+ <div className="flex items-center gap-2" title="仅支持部分高级模型 (如 Doubao Pro, Gemini Flash)">
272
+ <span className="text-sm text-gray-600 font-medium flex items-center gap-1">
273
+ <Brain size={16} className={enableThinking ? "text-purple-600" : "text-gray-400"}/> 深度思考
274
+ </span>
275
+ <label className="relative inline-flex items-center cursor-pointer">
276
+ <input type="checkbox" checked={enableThinking} onChange={e => setEnableThinking(e.target.checked)} className="sr-only peer"/>
277
+ <div className="w-9 h-5 bg-gray-200 peer-focus:outline-none rounded-full peer peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-4 after:w-4 after:transition-all peer-checked:bg-purple-600"></div>
278
+ </label>
279
+ </div>
280
+ <button onClick={() => setMessages([])} className="text-gray-400 hover:text-red-500 p-2 rounded-full hover:bg-red-50 transition-colors">
281
+ <Trash2 size={18}/>
282
+ </button>
283
+ </div>
284
+ </div>
285
+
286
+ {/* Chat Area */}
287
+ <div className="flex-1 overflow-y-auto p-6 space-y-6 custom-scrollbar">
288
+ {messages.length === 0 && (
289
+ <div className="flex flex-col items-center justify-center h-full text-gray-400 opacity-60">
290
+ <Bot size={48} className="mb-4"/>
291
+ <p className="text-lg font-bold">我是你的{selectedRole.name}</p>
292
+ <p className="text-sm">上传图片或输入要求,开始工作吧</p>
293
+ </div>
294
+ )}
295
+
296
+ {messages.map((msg, index) => {
297
+ // Find the user message immediately preceding this AI message to get source images for placeholders
298
+ let sourceImages: string[] = [];
299
+ if (msg.role === 'model') {
300
+ // Look back for the connected user message (usually index - 1)
301
+ const prevMsg = messages[index - 1];
302
+ if (prevMsg && prevMsg.role === 'user' && prevMsg.images) {
303
+ sourceImages = prevMsg.images;
304
+ }
305
+ } else if (msg.role === 'user' && msg.images) {
306
+ sourceImages = msg.images;
307
+ }
308
+
309
+ return (
310
+ <div key={msg.id} className={`flex gap-4 ${msg.role === 'user' ? 'flex-row-reverse' : ''} max-w-4xl mx-auto w-full`}>
311
+ <div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 shadow-sm ${msg.role === 'model' ? 'bg-white border border-indigo-100 text-indigo-600' : 'bg-blue-600 text-white'}`}>
312
+ {msg.role === 'model' ? <Sparkles size={20}/> : <span className="font-bold text-xs">ME</span>}
313
+ </div>
314
+
315
+ <div className={`flex flex-col gap-2 max-w-[85%] ${msg.role === 'user' ? 'items-end' : 'items-start'}`}>
316
+ {msg.role === 'model' && msg.thought && (
317
+ <div className="w-full bg-purple-50 rounded-xl border border-purple-100 overflow-hidden mb-2">
318
+ <button
319
+ onClick={() => setIsThinkingExpanded(prev => ({ ...prev, [msg.id]: !prev[msg.id] }))}
320
+ className="w-full px-4 py-2 flex items-center justify-between text-xs font-bold text-purple-700 bg-purple-100/50 hover:bg-purple-100 transition-colors"
321
+ >
322
+ <span className="flex items-center gap-2"><Brain size={14}/> 深度思考过程</span>
323
+ {isThinkingExpanded[msg.id] ? <ChevronDown size={14}/> : <ChevronRight size={14}/>}
324
+ </button>
325
+ {isThinkingExpanded[msg.id] && (
326
+ <div className="p-4 text-xs text-purple-800 whitespace-pre-wrap leading-relaxed border-t border-purple-100 font-mono bg-white/50">
327
+ {msg.thought}
328
+ </div>
329
+ )}
330
+ </div>
331
+ )}
332
+
333
+ <div className={`p-4 rounded-2xl shadow-sm text-sm overflow-hidden relative group ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white text-gray-800 border border-gray-100 rounded-tl-none'}`}>
334
+ {/* User Image Preview Grid */}
335
+ {msg.role === 'user' && sourceImages.length > 0 && (
336
+ <div className="grid grid-cols-3 gap-2 mb-3">
337
+ {sourceImages.map((img, i) => (
338
+ <div key={i} className="relative aspect-square">
339
+ <img src={`data:image/jpeg;base64,${img}`} className="w-full h-full object-cover rounded-lg border border-white/20" />
340
+ <div className="absolute bottom-0 right-0 bg-black/50 text-white text-[10px] px-1.5 rounded-tl-lg">图{i+1}</div>
341
+ </div>
342
+ ))}
343
+ </div>
344
+ )}
345
+
346
+ {/* Content Rendering */}
347
+ <div className={`markdown-body ${msg.role === 'user' ? 'text-white' : ''}`}>
348
+ {msg.role === 'model' ? renderContent(msg.text || '', sourceImages) : <p className="whitespace-pre-wrap">{msg.text}</p>}
349
+ </div>
350
+
351
+ {/* Copy Button for Model */}
352
+ {msg.role === 'model' && !isProcessing && (
353
+ <button
354
+ onClick={() => handleCopy(msg.text || '')}
355
+ className="absolute top-2 right-2 p-1.5 text-gray-400 hover:text-blue-600 bg-white/80 backdrop-blur rounded-lg opacity-0 group-hover:opacity-100 transition-all shadow-sm border border-gray-100"
356
+ title="复制内容"
357
+ >
358
+ <Copy size={14}/>
359
+ </button>
360
+ )}
361
+ </div>
362
+ </div>
363
+ </div>
364
+ );
365
+ })}
366
+ <div ref={messagesEndRef} />
367
+ </div>
368
+
369
+ {/* Input Area */}
370
+ <div className="bg-white border-t border-gray-200 p-4 z-20">
371
+ <div className="max-w-4xl mx-auto flex flex-col gap-3">
372
+ {/* Image Preview */}
373
+ {selectedImages.length > 0 && (
374
+ <div className="flex gap-2 overflow-x-auto pb-2 px-1">
375
+ {selectedImages.map((file, idx) => (
376
+ <div key={idx} className="relative w-16 h-16 shrink-0 group rounded-lg overflow-hidden border border-gray-200 shadow-sm">
377
+ <img src={URL.createObjectURL(file)} className="w-full h-full object-cover" />
378
+ <div className="absolute inset-0 bg-black/40 opacity-0 group-hover:opacity-100 transition-opacity flex items-center justify-center">
379
+ <button onClick={() => setSelectedImages(prev => prev.filter((_, i) => i !== idx))} className="text-white hover:text-red-400"><X size={16}/></button>
380
+ </div>
381
+ <div className="absolute bottom-0 right-0 bg-blue-600 text-white text-[9px] px-1 rounded-tl">图{idx+1}</div>
382
+ </div>
383
+ ))}
384
+ </div>
385
+ )}
386
+
387
+ <div className="flex gap-2 items-end bg-gray-50 p-2 rounded-2xl border border-gray-200 focus-within:ring-2 focus-within:ring-indigo-100 focus-within:border-indigo-300 transition-all">
388
+ <button onClick={() => fileInputRef.current?.click()} className="p-3 text-gray-500 hover:bg-white hover:text-indigo-600 rounded-xl transition-colors shrink-0" title="上传图片">
389
+ <ImageIcon size={22}/>
390
+ </button>
391
+ <input type="file" multiple accept="image/*" ref={fileInputRef} className="hidden" onChange={handleImageSelect} onClick={(e) => (e.currentTarget.value = '')} />
392
+
393
+ <textarea
394
+ className="flex-1 bg-transparent border-none outline-none text-sm resize-none max-h-32 py-3 px-2"
395
+ placeholder={`给${selectedRole.name}下达指令... (支持 Shift+Enter 换行)`}
396
+ rows={1}
397
+ value={textInput}
398
+ onChange={e => setTextInput(e.target.value)}
399
+ onKeyDown={e => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleSubmit(); } }}
400
+ />
401
+
402
+ <button
403
+ onClick={handleSubmit}
404
+ disabled={(!textInput.trim() && selectedImages.length === 0) || isProcessing}
405
+ className={`p-3 rounded-xl transition-all shrink-0 shadow-sm ${(!textInput.trim() && selectedImages.length === 0) || isProcessing ? 'bg-gray-200 text-gray-400 cursor-not-allowed' : 'bg-indigo-600 text-white hover:bg-indigo-700 hover:scale-105'}`}
406
+ >
407
+ {isProcessing ? <Loader2 className="animate-spin" size={20}/> : <Send size={20}/>}
408
+ </button>
409
+ </div>
410
+ <div className="text-center text-xs text-gray-400">
411
+ * AI 生成内容仅供参考,请人工审核后使用
412
+ </div>
413
+ </div>
414
+ </div>
415
+ </div>
416
+ );
417
+ };
pages/AIAssistant.tsx CHANGED
@@ -2,10 +2,11 @@
2
  import React, { useState, useEffect } from 'react';
3
  import { api } from '../services/api';
4
  import { UserRole } from '../types';
5
- import { Bot, Mic, Loader2 } from 'lucide-react';
6
  import { AdminPanel } from '../components/ai/AdminPanel';
7
  import { ChatPanel } from '../components/ai/ChatPanel';
8
  import { AssessmentPanel } from '../components/ai/AssessmentPanel';
 
9
 
10
  export const AIAssistant: React.FC = () => {
11
  const currentUser = api.auth.getCurrentUser();
@@ -13,7 +14,7 @@ export const AIAssistant: React.FC = () => {
13
 
14
  const [loading, setLoading] = useState(true);
15
  const [isEnabled, setIsEnabled] = useState(false);
16
- const [activeTab, setActiveTab] = useState<'chat' | 'assessment'>('chat');
17
 
18
  useEffect(() => {
19
  checkStatus();
@@ -50,21 +51,26 @@ export const AIAssistant: React.FC = () => {
50
 
51
  return (
52
  <div className="h-full flex flex-col bg-slate-50 overflow-hidden relative">
53
- <div className="bg-white border-b border-gray-200 px-6 pt-4 flex justify-between shrink-0 shadow-sm z-10">
54
- <div className="flex gap-6">
55
  <button onClick={() => setActiveTab('chat')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'chat' ? 'border-blue-500 text-blue-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
56
  <Bot size={18} className={activeTab === 'chat' ? 'text-blue-500' : ''}/> AI 助教 (问答)
57
  </button>
58
  <button onClick={() => setActiveTab('assessment')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'assessment' ? 'border-purple-500 text-purple-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
59
  <Mic size={18} className={activeTab === 'assessment' ? 'text-purple-500' : ''}/> 口语/背诵测评
60
  </button>
 
 
 
61
  </div>
62
  </div>
63
 
64
  {activeTab === 'chat' ? (
65
  <ChatPanel currentUser={currentUser} />
66
- ) : (
67
  <AssessmentPanel currentUser={currentUser} />
 
 
68
  )}
69
  </div>
70
  );
 
2
  import React, { useState, useEffect } from 'react';
3
  import { api } from '../services/api';
4
  import { UserRole } from '../types';
5
+ import { Bot, Mic, Loader2, Briefcase } from 'lucide-react';
6
  import { AdminPanel } from '../components/ai/AdminPanel';
7
  import { ChatPanel } from '../components/ai/ChatPanel';
8
  import { AssessmentPanel } from '../components/ai/AssessmentPanel';
9
+ import { WorkAssistantPanel } from '../components/ai/WorkAssistantPanel';
10
 
11
  export const AIAssistant: React.FC = () => {
12
  const currentUser = api.auth.getCurrentUser();
 
14
 
15
  const [loading, setLoading] = useState(true);
16
  const [isEnabled, setIsEnabled] = useState(false);
17
+ const [activeTab, setActiveTab] = useState<'chat' | 'assessment' | 'work'>('chat');
18
 
19
  useEffect(() => {
20
  checkStatus();
 
51
 
52
  return (
53
  <div className="h-full flex flex-col bg-slate-50 overflow-hidden relative">
54
+ <div className="bg-white border-b border-gray-200 px-6 pt-4 flex justify-between shrink-0 shadow-sm z-10 overflow-x-auto">
55
+ <div className="flex gap-6 min-w-max">
56
  <button onClick={() => setActiveTab('chat')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'chat' ? 'border-blue-500 text-blue-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
57
  <Bot size={18} className={activeTab === 'chat' ? 'text-blue-500' : ''}/> AI 助教 (问答)
58
  </button>
59
  <button onClick={() => setActiveTab('assessment')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'assessment' ? 'border-purple-500 text-purple-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
60
  <Mic size={18} className={activeTab === 'assessment' ? 'text-purple-500' : ''}/> 口语/背诵测评
61
  </button>
62
+ <button onClick={() => setActiveTab('work')} className={`pb-3 text-sm font-bold border-b-2 transition-colors flex items-center gap-2 ${activeTab === 'work' ? 'border-indigo-500 text-indigo-600' : 'border-transparent text-gray-500 hover:text-gray-700'}`}>
63
+ <Briefcase size={18} className={activeTab === 'work' ? 'text-indigo-500' : ''}/> 工作助理 (文案/策划)
64
+ </button>
65
  </div>
66
  </div>
67
 
68
  {activeTab === 'chat' ? (
69
  <ChatPanel currentUser={currentUser} />
70
+ ) : activeTab === 'assessment' ? (
71
  <AssessmentPanel currentUser={currentUser} />
72
+ ) : (
73
+ <WorkAssistantPanel currentUser={currentUser} />
74
  )}
75
  </div>
76
  );
services/api.ts CHANGED
@@ -271,7 +271,7 @@ export const api = {
271
  },
272
 
273
  ai: {
274
- chat: (data: { text?: string, audio?: string, history?: { role: string, text?: string }[] }) => request('/ai/chat', { method: 'POST', body: JSON.stringify(data) }),
275
  evaluate: (data: { question: string, audio?: string, image?: string }) => request('/ai/evaluate', { method: 'POST', body: JSON.stringify(data) }),
276
  resetPool: () => request('/ai/reset-pool', { method: 'POST' }),
277
  getStats: () => request('/ai/stats'), // NEW Detailed Stats
@@ -283,4 +283,4 @@ export const api = {
283
  update: (id: string, data: Partial<Todo>) => request(`/todos/${id}`, { method: 'PUT', body: JSON.stringify(data) }),
284
  delete: (id: string) => request(`/todos/${id}`, { method: 'DELETE' }),
285
  }
286
- };
 
271
  },
272
 
273
  ai: {
274
+ chat: (data: { text?: string, audio?: string, history?: { role: string, text?: string }[], enableThinking?: boolean, overrideSystemPrompt?: string, disableAudio?: boolean }) => request('/ai/chat', { method: 'POST', body: JSON.stringify(data) }),
275
  evaluate: (data: { question: string, audio?: string, image?: string }) => request('/ai/evaluate', { method: 'POST', body: JSON.stringify(data) }),
276
  resetPool: () => request('/ai/reset-pool', { method: 'POST' }),
277
  getStats: () => request('/ai/stats'), // NEW Detailed Stats
 
283
  update: (id: string, data: Partial<Todo>) => request(`/todos/${id}`, { method: 'PUT', body: JSON.stringify(data) }),
284
  delete: (id: string) => request(`/todos/${id}`, { method: 'DELETE' }),
285
  }
286
+ };
types.ts CHANGED
@@ -400,6 +400,7 @@ export interface AIChatMessage {
400
  id: string;
401
  role: 'user' | 'model';
402
  text?: string;
 
403
  audio?: string;
404
  images?: string[];
405
  isAudioMessage?: boolean;
 
400
  id: string;
401
  role: 'user' | 'model';
402
  text?: string;
403
+ thought?: string; // New: For Deep Thinking Chain of Thought
404
  audio?: string;
405
  images?: string[];
406
  isAudioMessage?: boolean;