import React, { useState, useRef, useEffect, useCallback } from 'react';
import { useParams, useNavigate } from 'react-router-dom';
import { useAuth } from '../contexts/AuthContext';
import client from '../api/client';
import {
Box,
Typography,
TextField,
IconButton,
Avatar,
CircularProgress,
Button,
Chip,
Tooltip,
Fade,
Dialog
} from '@mui/material';
import {
Send as SendIcon,
ArrowBack as BackIcon,
Mic as MicIcon,
Image as ImageIcon,
HelpOutline as WhyIcon,
Diamond as DiamondIcon,
VolumeUp as SpeakerIcon,
Pause as PauseIcon,
Stop as StopIcon,
Close as CloseIcon,
ZoomIn as ZoomInIcon
} from '@mui/icons-material';
import { sendMessage, sendMultimodalMessage, getAgent } from '../api/client';
import ExplainabilityModal from '../components/ExplainabilityModal';
import AgentSwitcher from '../components/AgentSwitcher';
import AudioRecorder from '../components/AudioRecorder';
function Chat() {
const { agentName } = useParams();
const navigate = useNavigate();
const messagesEndRef = useRef(null);
const fileInputRef = useRef(null);
const audioInputRef = useRef(null);
// State
const { user } = useAuth();
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const [agent, setAgent] = useState(null);
const [selectedFile, setSelectedFile] = useState(null);
const [selectedAudio, setSelectedAudio] = useState(null);
const [imagePreview, setImagePreview] = useState(null);
const [isRecording, setIsRecording] = useState(false);
// TTS state for inline word highlighting
const [activeTTSIndex, setActiveTTSIndex] = useState(-1);
const [currentWordIndex, setCurrentWordIndex] = useState(-1);
const [isTTSPlaying, setIsTTSPlaying] = useState(false);
const utteranceRef = useRef(null);
// Settings loaded from user profile
const [autoPlayTTS, setAutoPlayTTS] = useState(false);
const [ttsProvider, setTTSProvider] = useState('elevenlabs');
const [explainabilityModal, setExplainabilityModal] = useState({ open: false, data: null });
// Lightbox state
const [lightboxOpen, setLightboxOpen] = useState(false);
const [lightboxImage, setLightboxImage] = useState(null);
// Load preferences
useEffect(() => {
if (user?.preferences) {
if (user.preferences.auto_play_tts !== undefined) setAutoPlayTTS(user.preferences.auto_play_tts);
if (user.preferences.tts_provider) setTTSProvider(user.preferences.tts_provider);
}
}, [user]);
const loadAgent = React.useCallback(async () => {
try {
const response = await getAgent(agentName);
setAgent(response);
// Add welcome message
const domain = response.metadata?.prompt_analysis?.domain || 'general';
setMessages([{
role: 'assistant',
content: `Hello! I'm your **${domain}** assistant. I'm ready to answer questions based on my knowledge base.`,
timestamp: new Date(),
}]);
} catch (err) {
console.error('Failed to load agent:', err);
}
}, [agentName]);
// Load agent info on mount
useEffect(() => {
loadAgent();
}, [loadAgent]);
// Consolidate scroll effects - with small delay for better timing
useEffect(() => {
const timer = setTimeout(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, 100);
return () => clearTimeout(timer);
}, [messages, loading]);
// TTS functions
const handlePlayTTSInline = useCallback((msgIndex, text) => {
// Stop any existing TTS
if (window.speechSynthesis) {
window.speechSynthesis.cancel();
}
if (!('speechSynthesis' in window)) {
alert('Text-to-speech is not supported in your browser');
return;
}
const utterance = new SpeechSynthesisUtterance(text);
utteranceRef.current = utterance;
utterance.rate = 1.0;
utterance.pitch = 1.0;
const words = text.split(/\s+/).filter(w => w.length > 0);
utterance.onboundary = (event) => {
if (event.name === 'word') {
const charIndex = event.charIndex;
let wordIdx = 0;
let charCount = 0;
for (let i = 0; i < words.length; i++) {
if (charCount >= charIndex) {
wordIdx = i;
break;
}
charCount += words[i].length + 1;
}
setCurrentWordIndex(wordIdx);
}
};
utterance.onstart = () => {
setActiveTTSIndex(msgIndex);
setIsTTSPlaying(true);
setCurrentWordIndex(0);
};
utterance.onend = () => {
setActiveTTSIndex(-1);
setIsTTSPlaying(false);
setCurrentWordIndex(-1);
};
utterance.onerror = () => {
setActiveTTSIndex(-1);
setIsTTSPlaying(false);
setCurrentWordIndex(-1);
};
window.speechSynthesis.speak(utterance);
}, []);
const handlePauseTTS = useCallback(() => {
if (window.speechSynthesis) {
window.speechSynthesis.pause();
setIsTTSPlaying(false);
}
}, []);
const handleResumeTTS = useCallback(() => {
if (window.speechSynthesis) {
window.speechSynthesis.resume();
setIsTTSPlaying(true);
}
}, []);
const handleStopTTS = useCallback(() => {
if (window.speechSynthesis) {
window.speechSynthesis.cancel();
}
setActiveTTSIndex(-1);
setIsTTSPlaying(false);
setCurrentWordIndex(-1);
}, []);
// Render message content with word highlighting
const renderMessageContent = (content, msgIndex) => {
const isThisMessagePlaying = activeTTSIndex === msgIndex;
if (!isThisMessagePlaying || currentWordIndex < 0) {
return ;
}
// Render with word highlighting
const words = content.split(/(\s+)/);
let wordCount = 0;
return (
{words.map((word, idx) => {
if (word.trim().length === 0) {
return {word};
}
const thisWordIndex = wordCount;
wordCount++;
const isCurrentWord = thisWordIndex === currentWordIndex;
const isPastWord = thisWordIndex < currentWordIndex;
// Handle markdown in word
const formattedWord = formatMarkdown(word);
return (
);
})}
);
};
const handleSend = async () => {
if (!input.trim() && !selectedFile && !selectedAudio) return;
const userMessage = {
role: 'user',
content: input,
file: selectedFile?.name,
audio: selectedAudio?.name,
timestamp: new Date(),
multimodal_data: {
image_url: imagePreview // Store preview URL for display
}
};
setMessages((prev) => [...prev, userMessage]);
setInput('');
setLoading(true);
try {
let response;
if (selectedFile || selectedAudio) {
response = await sendMultimodalMessage(
agentName,
input,
selectedAudio,
selectedFile,
true, // include TTS
ttsProvider
);
// Clear selections after sending
setSelectedFile(null);
setSelectedAudio(null);
setImagePreview(null);
} else {
response = await sendMessage(agentName, input, true, ttsProvider);
}
const assistantMessage = {
role: 'assistant',
content: response.answer,
confidence: response.confidence,
inDomain: response.in_domain,
explainability: response.explainability,
tts: response.tts,
timestamp: new Date(),
};
setMessages((prev) => [...prev, assistantMessage]);
} catch (err) {
const errorMessage = {
role: 'assistant',
content: `Sorry, I encountered an error: ${err.response?.data?.detail || err.message}`,
isError: true,
timestamp: new Date(),
};
setMessages((prev) => [...prev, errorMessage]);
} finally {
setLoading(false);
}
};
const handleKeyPress = (e) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleSend();
}
};
const handleFileSelect = (e) => {
const file = e.target.files[0];
if (file) {
setSelectedFile(file);
// Create image preview
if (file.type.startsWith('image/')) {
const reader = new FileReader();
reader.onloadend = () => {
setImagePreview(reader.result);
};
reader.readAsDataURL(file);
}
}
};
const handleAudioSelect = (e) => {
const file = e.target.files[0];
if (file) {
setSelectedAudio(file);
}
};
const handleTranscript = (transcript) => {
setInput(prev => prev ? `${prev} ${transcript}` : transcript);
setIsRecording(false);
};
const handleCancelRecording = () => {
setIsRecording(false);
};
const openExplainability = (data) => {
setExplainabilityModal({ open: true, data });
};
const formatMarkdown = (text) => {
if (!text) return '';
return text
.replace(/\*\*(.*?)\*\*/g, '$1')
.replace(/\*(.*?)\*/g, '$1')
.replace(/\n/g, '
');
};
const isImageFile = (filename) => {
if (!filename) return false;
const ext = filename.split('.').pop().toLowerCase();
return ['jpg', 'jpeg', 'png', 'gif', 'webp'].includes(ext);
};
return (
{/* Animated Background */}
{/* Header - Glassmorphism */}
navigate('/dashboard')}
sx={{
bgcolor: 'rgba(255, 255, 255, 0.05)',
'&:hover': { bgcolor: 'rgba(255, 255, 255, 0.1)' },
borderRadius: '12px'
}}
>
{agentName.replace(/_/g, ' ')}
MEXAR ULTIMATE | {agent?.domain?.toUpperCase() || 'ASSISTANT'}
setAutoPlayTTS(!autoPlayTTS)}
sx={{
bgcolor: autoPlayTTS ? 'rgba(139, 92, 246, 0.2)' : 'rgba(255, 255, 255, 0.05)',
'&:hover': { bgcolor: 'rgba(139, 92, 246, 0.3)' },
borderRadius: '12px'
}}
>
{/* Messages Area */}
{messages.map((msg, index) => (
{msg.role === 'assistant' && (
)}
{/* Image Attachment - ABOVE TEXT like Claude */}
{msg.file && (isImageFile(msg.file) || (msg.multimodal_data && msg.multimodal_data.image_url)) && (
{
const url = msg.multimodal_data?.image_url;
if (url) {
setLightboxImage(url);
setLightboxOpen(true);
}
}}
sx={{
mb: 1.5,
cursor: 'pointer',
borderRadius: '8px',
overflow: 'hidden',
position: 'relative',
'&:hover .zoom-overlay': { opacity: 1 }
}}
>
)}
{/* Text Content */}
{renderMessageContent(msg.content, index)}
{/* Audio Attachment - shown as chip below text */}
{msg.audio && (
}
label={msg.audio}
sx={{
borderColor: msg.role === 'user' ? 'rgba(255,255,255,0.3)' : 'default',
color: msg.role === 'user' ? 'white' : 'default',
bgcolor: 'rgba(255,255,255,0.1)'
}}
variant="outlined"
/>
)}
{/* Metadata footer for assistant */}
{msg.role === 'assistant' && !loading && (
{msg.confidence !== undefined && (
0.7 ? '#22c55e' : '#f59e0b',
boxShadow: msg.confidence > 0.7 ? '0 0 8px #22c55e' : 'none'
}} />
{(msg.confidence * 100).toFixed(0)}% Confidence
)}
{msg.explainability && (
}
onClick={() => openExplainability(msg.explainability)}
sx={{
minWidth: 0, p: '2px 8px',
textTransform: 'none',
fontSize: '0.7rem',
color: 'var(--primary-light)',
bgcolor: 'rgba(139, 92, 246, 0.1)',
borderRadius: '6px',
'&:hover': { bgcolor: 'rgba(139, 92, 246, 0.2)' }
}}
>
Explain reasoning
)}
{msg.timestamp.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}
{/* Listen/Pause/Stop Button */}
{activeTTSIndex === index ? (
{isTTSPlaying ? (
) : (
)}
) : (
handlePlayTTSInline(index, msg.content)}
sx={{
ml: 1,
color: 'text.secondary',
opacity: 0.7,
'&:hover': { opacity: 1, color: 'var(--primary)' }
}}
>
)}
)}
))}
{/* Typing Indicator */}
{loading && (
)}
{/* Floating Input Area */}
{/* Image Preview Card - ABOVE INPUT like screenshot #3 */}
{imagePreview && (
{
setSelectedFile(null);
setImagePreview(null);
}}
sx={{
position: 'absolute',
top: 4,
right: 4,
bgcolor: 'rgba(0, 0, 0, 0.7)',
border: '1px solid rgba(255, 255, 255, 0.2)',
'&:hover': { bgcolor: 'rgba(0, 0, 0, 0.9)' },
width: 28,
height: 28,
zIndex: 10
}}
size="small"
>
{
setLightboxImage(imagePreview);
setLightboxOpen(true);
}}
style={{
maxWidth: '100%',
maxHeight: '200px',
borderRadius: '8px',
objectFit: 'contain',
cursor: 'pointer',
display: 'block'
}}
/>
)}
{/* Input Box */}
{/* Live Audio Recording */}
{isRecording ? (
) : (
{/* Audio Chip Preview */}
{selectedAudio && (
}
label={selectedAudio.name.length > 12 ? selectedAudio.name.slice(0, 12) + '...' : selectedAudio.name}
onDelete={() => setSelectedAudio(null)}
sx={{
bgcolor: 'rgba(6, 182, 212, 0.2)',
borderRadius: '8px',
height: 28,
'& .MuiChip-deleteIcon': {
color: 'rgba(255,255,255,0.7)',
'&:hover': { color: 'white' }
}
}}
/>
)}
{/* File/Image upload button */}
fileInputRef.current?.click()}
sx={{
color: imagePreview ? 'var(--primary)' : 'text.secondary',
'&:hover': { bgcolor: 'rgba(255,255,255,0.05)' }
}}
>
setIsRecording(true)}
sx={{
color: selectedAudio ? 'var(--primary)' : 'text.secondary',
'&:hover': { bgcolor: 'rgba(255,255,255,0.05)' }
}}
>
)}
{/* Text Field */}
setInput(e.target.value)}
onKeyPress={handleKeyPress}
disabled={loading}
variant="standard"
InputProps={{ disableUnderline: true, style: { fontSize: '16px' } }}
sx={{ ml: 1 }}
/>
{/* Send Button */}
{loading ? : }
{/* Modal */}
setExplainabilityModal({ open: false, data: null })}
/>
{/* Image Preview Lightbox */}
);
}
export default Chat;