stud-manager / components /ai /ChatPanel.tsx
dvc890's picture
Upload 67 files
2b32ad3 verified
import React, { useState, useRef, useEffect } from 'react';
import { AIChatMessage, User } from '../../types';
import { Bot, Mic, Square, Volume2, Send, Sparkles, Loader2, Image as ImageIcon, Trash2, X, StopCircle, Globe, Brain, Search, Copy, ChevronDown, ChevronRight, Camera } from 'lucide-react';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
import { blobToBase64, base64ToUint8Array, decodePCM, cleanTextForTTS, compressImage } from '../../utils/mediaHelpers';
import { Toast, ToastState } from '../Toast';
interface ChatPanelProps {
currentUser: User | null;
}
export const ChatPanel: React.FC<ChatPanelProps> = ({ currentUser }) => {
const [messages, setMessages] = useState<AIChatMessage[]>(() => {
try {
const saved = localStorage.getItem('ai_chat_history');
return saved ? JSON.parse(saved) : [{
id: 'welcome',
role: 'model',
text: '你好!我是你的 AI 智能助教。',
timestamp: Date.now()
}];
} catch (e) {
return [{ id: 'welcome', role: 'model', text: '你好!', timestamp: Date.now() }];
}
});
// Input States
const [textInput, setTextInput] = useState('');
const [isRecording, setIsRecording] = useState(false);
const [isMobile, setIsMobile] = useState(false);
// Config States
const [enableThinking, setEnableThinking] = useState(false);
const [enableSearch, setEnableSearch] = useState(false);
// Attachments
const [selectedImages, setSelectedImages] = useState<File[]>([]);
const [audioAttachment, setAudioAttachment] = useState<string | null>(null); // Base64
const [isChatProcessing, setIsChatProcessing] = useState(false);
const [playingMessageId, setPlayingMessageId] = useState<string | null>(null);
const [toast, setToast] = useState<ToastState>({ show: false, message: '', type: 'success' });
const [isThinkingExpanded, setIsThinkingExpanded] = useState<Record<string, boolean>>({});
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const audioChunksRef = useRef<Blob[]>([]);
const audioContextRef = useRef<AudioContext | null>(null);
const currentSourceRef = useRef<AudioBufferSourceNode | null>(null);
const messagesEndRef = useRef<HTMLDivElement>(null);
const scrollContainerRef = useRef<HTMLDivElement>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
const cameraInputRef = useRef<HTMLInputElement>(null);
const abortControllerRef = useRef<AbortController | null>(null);
// Initialize AudioContext & Check Mobile
useEffect(() => {
// @ts-ignore
const AudioCtor = window.AudioContext || window.webkitAudioContext;
audioContextRef.current = new AudioCtor();
const checkMobile = () => {
const userAgent = navigator.userAgent || navigator.vendor || (window as any).opera;
const mobile = /android|webos|iphone|ipad|ipod|blackberry|iemobile|opera mini/i.test(userAgent.toLowerCase());
setIsMobile(mobile);
};
checkMobile();
return () => {
stopPlayback();
window.speechSynthesis.cancel();
};
}, []);
// Persist messages
useEffect(() => {
try {
const MAX_COUNT = 50;
const welcome = messages.find(m => m.id === 'welcome');
const others = messages.filter(m => m.id !== 'welcome');
const recent = others.slice(-MAX_COUNT);
const messagesToSave = (welcome ? [welcome] : []).concat(recent);
localStorage.setItem('ai_chat_history', JSON.stringify(messagesToSave));
} catch (e) {}
}, [messages]);
// SMART SCROLL LOGIC
useEffect(() => {
if (!scrollContainerRef.current || !messagesEndRef.current) return;
const container = scrollContainerRef.current;
const { scrollTop, scrollHeight, clientHeight } = container;
const isNearBottom = scrollHeight - scrollTop - clientHeight < 150;
const lastMsg = messages[messages.length - 1];
const isUserMsg = lastMsg?.role === 'user';
if (isNearBottom || (isUserMsg && !isChatProcessing)) {
messagesEndRef.current.scrollIntoView({ behavior: 'smooth', block: 'end' });
}
}, [messages, isChatProcessing, isThinkingExpanded]);
const stopPlayback = () => {
if (currentSourceRef.current) {
try { currentSourceRef.current.stop(); } catch (e) {}
currentSourceRef.current = null;
}
window.speechSynthesis.cancel();
setPlayingMessageId(null);
};
const handleStopGeneration = () => {
if (abortControllerRef.current) {
abortControllerRef.current.abort();
abortControllerRef.current = null;
setIsChatProcessing(false);
setToast({ show: true, message: '已停止生成', type: 'error' });
}
};
const playAudio = async (msg: AIChatMessage) => {
stopPlayback();
if (msg.audio) {
try {
if (!audioContextRef.current) {
// @ts-ignore
const AudioCtor = window.AudioContext || window.webkitAudioContext;
audioContextRef.current = new AudioCtor();
}
if (audioContextRef.current?.state === 'suspended') {
await audioContextRef.current.resume();
}
const bytes = base64ToUint8Array(msg.audio);
const audioBuffer = decodePCM(bytes, audioContextRef.current!);
const source = audioContextRef.current!.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioContextRef.current!.destination);
source.onended = () => setPlayingMessageId(null);
source.start(0);
currentSourceRef.current = source;
setPlayingMessageId(msg.id);
return;
} catch (e) {
console.warn("PCM Playback failed, falling back to Browser TTS", e);
}
}
if (!msg.text) return;
const cleanText = cleanTextForTTS(msg.text);
const utterance = new SpeechSynthesisUtterance(cleanText);
utterance.lang = 'zh-CN';
utterance.onstart = () => setPlayingMessageId(msg.id);
utterance.onend = () => setPlayingMessageId(null);
utterance.onerror = () => setPlayingMessageId(null);
window.speechSynthesis.speak(utterance);
};
const startRecording = async () => {
if (audioAttachment) return;
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
audioChunksRef.current = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) audioChunksRef.current.push(event.data);
};
mediaRecorder.start();
setIsRecording(true);
} catch (e) {
setToast({ show: true, message: '无法访问麦克风', type: 'error' });
}
};
const stopRecording = () => {
if (mediaRecorderRef.current && isRecording) {
mediaRecorderRef.current.stop();
setIsRecording(false);
mediaRecorderRef.current.onstop = async () => {
const audioBlob = new Blob(audioChunksRef.current, { type: 'audio/webm' });
const base64 = await blobToBase64(audioBlob);
setAudioAttachment(base64);
mediaRecorderRef.current?.stream.getTracks().forEach(track => track.stop());
};
}
};
const handleImageSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
if (e.target.files) {
setSelectedImages(prev => [...prev, ...Array.from(e.target.files!)]);
}
};
const handleCopy = (text: string) => {
navigator.clipboard.writeText(text);
setToast({ show: true, message: '已复制', type: 'success' });
};
const handleSubmit = async () => {
if ((!textInput.trim() && !audioAttachment && selectedImages.length === 0) || isChatProcessing) return;
stopPlayback();
const currentText = textInput;
const currentAudio = audioAttachment;
const currentImages = [...selectedImages];
setTextInput('');
setAudioAttachment(null);
setSelectedImages([]);
if (audioContextRef.current?.state === 'suspended') {
try { await audioContextRef.current.resume(); } catch(e){}
}
setIsChatProcessing(true);
abortControllerRef.current = new AbortController();
const newAiMsgId = crypto.randomUUID();
const newUserMsgId = crypto.randomUUID();
try {
const base64Images = await Promise.all(currentImages.map(f => compressImage(f)));
const newUserMsg: AIChatMessage = {
id: newUserMsgId,
role: 'user',
text: currentAudio ? '(语音消息)' : (currentText || (currentImages.length ? '' : '')),
isAudioMessage: !!currentAudio,
images: base64Images,
timestamp: Date.now()
};
const newAiMsg: AIChatMessage = { id: newAiMsgId, role: 'model', text: '', thought: '', timestamp: Date.now(), isSearching: enableSearch };
setMessages(prev => [...prev, newUserMsg, newAiMsg]);
setTimeout(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth', block: 'end' });
}, 100);
if (enableThinking) setIsThinkingExpanded(prev => ({ ...prev, [newAiMsgId]: true }));
const response = await fetch('/api/ai/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-user-username': currentUser?.username || '',
'x-user-role': currentUser?.role || '',
'x-school-id': currentUser?.schoolId || ''
},
body: JSON.stringify({
text: currentText,
audio: currentAudio,
images: base64Images,
history: messages.filter(m => m.id !== 'welcome').map(m => ({ role: m.role, text: m.text })),
enableThinking,
enableSearch
}),
signal: abortControllerRef.current.signal
});
if (!response.ok) throw new Error(response.statusText);
if (!response.body) throw new Error('No response body');
const reader = response.body.getReader();
const decoder = new TextDecoder();
let aiTextAccumulated = '';
let aiThoughtAccumulated = '';
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const parts = buffer.split('\n\n');
buffer = parts.pop() || '';
for (const line of parts) {
if (line.startsWith('data: ')) {
const jsonStr = line.replace('data: ', '').trim();
try {
const data = JSON.parse(jsonStr);
if (data.type === 'text') {
if (aiTextAccumulated === '' && aiThoughtAccumulated !== '') {
setIsThinkingExpanded(prev => ({ ...prev, [newAiMsgId]: false }));
}
aiTextAccumulated += data.content;
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: aiTextAccumulated, isSearching: false } : m));
}
else if (data.type === 'thinking') {
aiThoughtAccumulated += data.content;
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, thought: aiThoughtAccumulated } : m));
}
else if (data.type === 'search') {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, isSearching: true } : m));
}
else if (data.type === 'status' && data.status === 'tts') {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, isGeneratingAudio: true } : m));
}
else if (data.type === 'audio') {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, audio: data.audio, isGeneratingAudio: false } : m));
const tempMsg = { ...newAiMsg, text: aiTextAccumulated, audio: data.audio };
playAudio(tempMsg);
}
else if (data.type === 'status' && data.ttsSkipped) {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, isGeneratingAudio: false } : m));
if (aiTextAccumulated) {
const tempMsg = { ...newAiMsg, text: aiTextAccumulated };
playAudio(tempMsg);
}
}
else if (data.type === 'error') {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: `⚠️ 错误: ${data.message}`, isGeneratingAudio: false, isSearching: false } : m));
}
} catch (e) {}
}
}
}
} catch (error: any) {
if (error.name !== 'AbortError') {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, text: '抱歉,连接断开或发生错误。', isSearching: false } : m));
}
} finally {
setMessages(prev => prev.map(m => m.id === newAiMsgId ? { ...m, isSearching: false, isGeneratingAudio: false } : m));
setIsChatProcessing(false);
abortControllerRef.current = null;
}
};
const clearHistory = () => {
setMessages([{ id: 'welcome', role: 'model', text: '你好!我是你的 AI 智能助教。', timestamp: Date.now() }]);
};
return (
<div className="flex-1 flex flex-col max-w-4xl mx-auto w-full min-h-0 relative overflow-hidden h-full">
{toast.show && <Toast message={toast.message} type={toast.type} onClose={()=>setToast({...toast, show: false})}/>}
<div className="absolute top-2 right-4 z-10 flex gap-2">
<button onClick={clearHistory} className="text-xs text-gray-400 hover:text-red-500 flex items-center gap-1 bg-white/80 p-1.5 rounded-lg border border-transparent hover:border-red-100 transition-all shadow-sm backdrop-blur">
<Trash2 size={14}/> 清除
</button>
</div>
<div ref={scrollContainerRef} className="flex-1 overflow-y-auto p-4 space-y-4 pb-4 custom-scrollbar">
{messages.map(msg => (
<div key={msg.id} className={`flex gap-3 ${msg.role === 'user' ? 'flex-row-reverse' : ''}`}>
<div className={`w-10 h-10 rounded-full flex items-center justify-center shrink-0 ${msg.role === 'model' ? 'bg-blue-100 text-blue-600' : 'bg-gray-200 text-gray-600'}`}>
{msg.role === 'model' ? <Sparkles size={20}/> : <Bot size={20}/>}
</div>
<div className={`max-w-[85%] flex flex-col items-start ${msg.role === 'user' ? 'items-end' : ''}`}>
{msg.role === 'model' && msg.thought && (
<div className="w-full bg-purple-50 rounded-xl border border-purple-100 overflow-hidden mb-2 max-w-full">
<button
onClick={() => setIsThinkingExpanded(prev => ({ ...prev, [msg.id]: !prev[msg.id] }))}
className="w-full px-4 py-2 flex items-center gap-2 text-xs font-bold text-purple-700 bg-purple-100/50 hover:bg-purple-100 transition-colors"
>
<Brain size={14}/>
<span>深度思考过程</span>
<div className="ml-auto text-xs opacity-50">{isThinkingExpanded[msg.id] ? '收起' : '展开'}</div>
</button>
{isThinkingExpanded[msg.id] && (
<div className="p-4 text-xs text-purple-800 whitespace-pre-wrap leading-relaxed border-t border-purple-100 font-mono bg-white/50">
{msg.thought}
</div>
)}
</div>
)}
{msg.role === 'model' && msg.isSearching && (
<div className="flex items-center gap-2 bg-blue-50 text-blue-600 px-3 py-2 rounded-xl mb-2 text-xs border border-blue-100 animate-pulse w-fit">
<Globe size={14} className="animate-spin"/>
<span>正在联网搜索相关信息...</span>
</div>
)}
<div className={`p-3 rounded-2xl text-sm overflow-hidden shadow-sm ${msg.role === 'user' ? 'bg-blue-600 text-white rounded-tr-none' : 'bg-white border border-gray-200 text-gray-800 rounded-tl-none'}`}>
{msg.images && msg.images.length > 0 && (
<div className="flex gap-2 mb-2 flex-wrap">
{msg.images.map((img, i) => (
<img key={i} src={`data:image/jpeg;base64,${img}`} className="max-w-[150px] max-h-[150px] rounded-lg object-cover border border-white/20" alt="sent" />
))}
</div>
)}
<div className="markdown-body"><ReactMarkdown remarkPlugins={[remarkGfm]}>{msg.text || ''}</ReactMarkdown></div>
{msg.role === 'model' && !msg.text && !msg.isSearching && isChatProcessing && (
<div className="flex items-center gap-2 text-gray-400 py-1">
<Loader2 className="animate-spin" size={14}/><span className="text-xs">思考中...</span>
</div>
)}
{msg.isGeneratingAudio && (
<div className="flex items-center gap-2 text-blue-600 py-1 mt-1 bg-blue-50 px-2 rounded-lg w-fit animate-pulse">
<Loader2 className="animate-spin" size={12}/>
<span className="text-[10px] font-bold">正在合成语音...</span>
</div>
)}
{(msg.role === 'model' && (msg.text || msg.audio) && !isChatProcessing && !msg.isGeneratingAudio) && (
<div className="flex gap-2 mt-2">
<button
onClick={() => playingMessageId === msg.id ? stopPlayback() : playAudio(msg)}
className={`flex items-center gap-2 text-xs px-3 py-1.5 rounded-full border transition-colors w-fit ${
playingMessageId === msg.id
? 'bg-blue-100 text-blue-600 border-blue-200 animate-pulse'
: 'bg-gray-50 text-gray-600 hover:bg-gray-100 border border-gray-200'
}`}
>
{playingMessageId === msg.id ? <Square size={14} fill="currentColor"/> : <Volume2 size={14}/>}
{playingMessageId === msg.id ? '朗读中...' : '朗读'}
</button>
<button
onClick={() => handleCopy(msg.text || '')}
className="flex items-center gap-2 text-xs px-3 py-1.5 rounded-full border bg-gray-50 text-gray-600 hover:bg-gray-100 border-gray-200 transition-colors w-fit"
title="复制"
>
<Copy size={14}/>
</button>
</div>
)}
</div>
</div>
</div>
))}
<div ref={messagesEndRef} />
</div>
<div className="p-4 bg-white border-t border-gray-200 shrink-0 z-20">
<div className="max-w-4xl mx-auto flex flex-col gap-2">
<div className="flex justify-between items-center px-1">
<div className="flex gap-3">
<button
onClick={() => setEnableThinking(!enableThinking)}
className={`flex items-center gap-1 text-xs px-2 py-1 rounded-md border transition-all ${enableThinking ? 'bg-purple-50 text-purple-600 border-purple-200' : 'bg-gray-50 text-gray-500 border-transparent hover:bg-gray-100'}`}
>
<Brain size={14} className={enableThinking ? "fill-current" : ""}/> 深度思考
</button>
<button
onClick={() => setEnableSearch(!enableSearch)}
className={`flex items-center gap-1 text-xs px-2 py-1 rounded-md border transition-all ${enableSearch ? 'bg-blue-50 text-blue-600 border-blue-200' : 'bg-gray-50 text-gray-500 border-transparent hover:bg-gray-100'}`}
>
<Globe size={14}/> 联网搜索
</button>
</div>
</div>
{(selectedImages.length > 0 || audioAttachment) && (
<div className="flex gap-2 overflow-x-auto pb-2">
{selectedImages.map((file, idx) => (
<div key={idx} className="relative w-16 h-16 shrink-0 group rounded-lg overflow-hidden border border-gray-200">
<img src={URL.createObjectURL(file)} className="w-full h-full object-cover" />
<button onClick={() => setSelectedImages(prev => prev.filter((_, i) => i !== idx))} className="absolute top-0.5 right-0.5 bg-black/50 text-white rounded-full p-0.5 hover:bg-red-500"><X size={12}/></button>
</div>
))}
{audioAttachment && (
<div className="relative h-16 px-3 bg-blue-50 border border-blue-200 rounded-lg flex items-center justify-center shrink-0 min-w-[80px]">
<div className="text-blue-600 flex flex-col items-center">
<Volume2 size={20}/>
<span className="text-[10px] font-bold">语音已录制</span>
</div>
<button onClick={() => setAudioAttachment(null)} className="absolute top-0.5 right-0.5 bg-gray-300 text-white rounded-full p-0.5 hover:bg-red-500"><X size={12}/></button>
</div>
)}
</div>
)}
<div className="flex items-end gap-2 bg-gray-100 p-2 rounded-2xl border border-gray-200">
<div className="flex items-center">
<button onClick={() => fileInputRef.current?.click()} className="p-2 text-gray-500 hover:bg-white hover:text-blue-600 rounded-full transition-colors shrink-0" title="相册/文件">
<ImageIcon size={22}/>
</button>
{isMobile && (
<button onClick={() => cameraInputRef.current?.click()} className="p-2 text-gray-500 hover:bg-white hover:text-blue-600 rounded-full transition-colors shrink-0" title="拍照">
<Camera size={22}/>
</button>
)}
</div>
<input
type="file"
multiple
accept="image/*"
ref={fileInputRef}
className="hidden"
onChange={handleImageSelect}
onClick={(e) => (e.currentTarget.value = '')}
/>
{isMobile && (
<input
type="file"
accept="image/*"
capture="environment"
ref={cameraInputRef}
className="hidden"
onChange={handleImageSelect}
onClick={(e) => (e.currentTarget.value = '')}
/>
)}
<div className="flex-1 min-h-[40px] flex items-center">
{audioAttachment ? (
<div className="w-full text-center text-sm text-gray-400 italic bg-transparent">
语音消息已就绪 (文字输入已禁用)
</div>
) : (
<textarea
className="w-full bg-transparent border-none outline-none text-sm resize-none max-h-24 py-2"
placeholder="输入文字..."
rows={1}
value={textInput}
onChange={e => setTextInput(e.target.value)}
onKeyDown={e => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleSubmit();
}
}}
/>
)}
</div>
{isChatProcessing ? (
<button onClick={handleStopGeneration} className="p-2 bg-red-500 text-white rounded-full hover:bg-red-600 transition-all shrink-0 shadow-sm animate-pulse" title="停止生成">
<Square size={20} fill="currentColor"/>
</button>
) : ((textInput.trim() || audioAttachment || selectedImages.length > 0) ? (
<button
onClick={handleSubmit}
// FIX: Add onMouseDown/onTouchEnd to prevent focus loss issues on mobile
onMouseDown={(e) => e.preventDefault()}
onTouchEnd={(e) => { e.preventDefault(); handleSubmit(); }}
type="button"
className="p-2 bg-blue-600 text-white rounded-full hover:bg-blue-700 transition-all shrink-0 shadow-sm disabled:opacity-50"
>
<Send size={20}/>
</button>
) : (
<button
onMouseDown={startRecording}
onMouseUp={stopRecording}
onTouchStart={startRecording}
onTouchEnd={stopRecording}
onContextMenu={(e) => { e.preventDefault(); e.stopPropagation(); }}
className={`p-2 rounded-full transition-all shrink-0 select-none ${isRecording ? 'bg-red-500 text-white scale-110 shadow-lg ring-4 ring-red-200' : 'bg-gray-200 text-gray-600 hover:bg-gray-300'}`}
>
{isRecording ? <StopCircle size={22}/> : <Mic size={22}/>}
</button>
))}
</div>
{isRecording && <div className="text-center text-xs text-red-500 font-bold animate-pulse">正在录音... 松开发送到输入框</div>}
</div>
</div>
</div>
);
};