strain-info / components /ChatInterface.tsx
wuhp's picture
Update components/ChatInterface.tsx
bfb721d verified
import React, { useState, useRef, useEffect } from 'react';
import { Send, Image as ImageIcon, Loader2, Bot, User, X, Leaf, Globe, Search, AlertTriangle, Camera } from 'lucide-react';
import { Message, LoadingState } from '../types';
import { geminiService } from '../services/geminiService';
const ChatInterface: React.FC = () => {
const [messages, setMessages] = useState<Message[]>([
{
id: 'welcome',
role: 'model',
text: "Welcome to Budtender AI. I can help you find strain information or analyze your cannabis products. Upload a photo or use your camera to show me a strain!"
}
]);
const [inputValue, setInputValue] = useState('');
const [selectedImage, setSelectedImage] = useState<File | null>(null);
const [imagePreview, setImagePreview] = useState<string | null>(null);
const [loadingState, setLoadingState] = useState<LoadingState>(LoadingState.IDLE);
// Camera State
const [isCameraOpen, setIsCameraOpen] = useState(false);
const videoRef = useRef<HTMLVideoElement>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
const streamRef = useRef<MediaStream | null>(null);
const messagesEndRef = useRef<HTMLDivElement>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
// Auto-scroll to bottom
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [messages, loadingState]);
// Clean up camera stream when component unmounts
useEffect(() => {
return () => {
stopCamera();
};
}, []);
const startCamera = async () => {
setIsCameraOpen(true);
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: 'environment' } // Prefer back camera on mobile
});
streamRef.current = stream;
if (videoRef.current) {
videoRef.current.srcObject = stream;
}
} catch (err) {
console.error("Error accessing camera:", err);
setIsCameraOpen(false);
alert("Could not access camera. Please allow permissions.");
}
};
const stopCamera = () => {
if (streamRef.current) {
streamRef.current.getTracks().forEach(track => track.stop());
streamRef.current = null;
}
setIsCameraOpen(false);
};
const capturePhoto = () => {
if (videoRef.current && canvasRef.current) {
const video = videoRef.current;
const canvas = canvasRef.current;
// Set canvas dimensions to match video
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
// Draw video frame to canvas
const context = canvas.getContext('2d');
if (context) {
context.drawImage(video, 0, 0, canvas.width, canvas.height);
// Convert to data URL
const dataUrl = canvas.toDataURL('image/jpeg');
setImagePreview(dataUrl);
// Convert to File object (optional, but good for consistency)
fetch(dataUrl)
.then(res => res.blob())
.then(blob => {
const file = new File([blob], "camera-capture.jpg", { type: "image/jpeg" });
setSelectedImage(file);
});
stopCamera();
}
}
};
const handleImageSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
if (e.target.files && e.target.files[0]) {
const file = e.target.files[0];
setSelectedImage(file);
const reader = new FileReader();
reader.onloadend = () => {
setImagePreview(reader.result as string);
};
reader.readAsDataURL(file);
}
};
const clearImage = () => {
setSelectedImage(null);
setImagePreview(null);
if (fileInputRef.current) fileInputRef.current.value = '';
};
const handleSendMessage = async () => {
if ((!inputValue.trim() && !selectedImage) || loadingState !== LoadingState.IDLE) return;
const userText = inputValue.trim();
const currentImage = imagePreview; // Store ref for the message bubble
// Clear inputs immediately
setInputValue('');
clearImage();
setLoadingState(LoadingState.SEARCHING); // Default to searching/thinking since we use Google Search
// Add user message to UI
const newMessage: Message = {
id: Date.now().toString(),
role: 'user',
text: userText,
image: currentImage || undefined
};
setMessages(prev => [...prev, newMessage]);
try {
// Prepare image for API (strip base64 header)
let base64Data: string | undefined = undefined;
if (currentImage) {
base64Data = currentImage.split(',')[1];
}
// Call Gemini
const response = await geminiService.sendMessage(
userText || (base64Data ? "Analyze this image and tell me about it." : "Hello"),
base64Data
);
setMessages(prev => [
...prev,
{
id: (Date.now() + 1).toString(),
role: 'model',
text: response.text,
groundingMetadata: response.groundingMetadata
}
]);
} catch (error: any) {
console.error("Chat Error:", error);
let errorMessage = "I'm sorry, I encountered an error connecting to the knowledge base.";
if (error.message?.includes('API key')) {
errorMessage = "Error: API Key is missing or invalid. Please check your configuration.";
} else if (error.message) {
errorMessage = `Error: ${error.message}`;
}
setMessages(prev => [
...prev,
{
id: (Date.now() + 1).toString(),
role: 'model',
text: errorMessage,
isError: true
}
]);
} finally {
setLoadingState(LoadingState.IDLE);
}
};
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
handleSendMessage();
}
};
const getLoadingText = (state: LoadingState) => {
switch (state) {
case LoadingState.SEARCHING: return "Searching the Web...";
case LoadingState.THINKING: return "Thinking...";
default: return "Processing...";
}
};
return (
<div className="flex flex-col h-full bg-gray-900 relative">
{/* Header */}
<header className="bg-gray-800/50 backdrop-blur-sm border-b border-gray-700 p-4 flex items-center justify-between sticky top-0 z-10">
<div className="flex items-center gap-3">
<div className="w-10 h-10 rounded-full bg-emerald-600 flex items-center justify-center shadow-lg shadow-emerald-900/20">
<Leaf className="w-6 h-6 text-emerald-100" />
</div>
<div>
<h1 className="font-bold text-white text-lg">Budtender AI</h1>
<p className="text-xs text-gray-400">Powered by Gemini & Google Search</p>
</div>
</div>
</header>
{/* Camera Modal */}
{isCameraOpen && (
<div className="fixed inset-0 z-50 bg-black/90 flex flex-col items-center justify-center p-4">
<div className="relative w-full max-w-lg bg-gray-900 rounded-2xl overflow-hidden shadow-2xl border border-gray-800">
<video
ref={videoRef}
autoPlay
playsInline
className="w-full h-[60vh] object-cover bg-black"
/>
<canvas ref={canvasRef} className="hidden" />
<div className="absolute bottom-0 left-0 right-0 p-6 bg-gradient-to-t from-black/80 to-transparent flex items-center justify-between">
<button
onClick={stopCamera}
className="p-3 bg-gray-800/80 text-white rounded-full hover:bg-gray-700 transition-colors"
>
<X size={24} />
</button>
<button
onClick={capturePhoto}
className="w-16 h-16 rounded-full border-4 border-white flex items-center justify-center bg-transparent hover:bg-white/20 transition-all active:scale-95"
>
<div className="w-12 h-12 bg-emerald-500 rounded-full"></div>
</button>
<div className="w-12"></div> {/* Spacer for alignment */}
</div>
</div>
<p className="text-white mt-4 text-sm font-medium">Position plant or product in frame</p>
</div>
)}
{/* Messages Area */}
<div className="flex-1 overflow-y-auto p-4 space-y-6 scrollbar-thin scrollbar-thumb-gray-700 scrollbar-track-transparent">
{messages.map((msg) => (
<div
key={msg.id}
className={`flex items-start gap-3 ${msg.role === 'user' ? 'flex-row-reverse' : 'animate-in fade-in slide-in-from-bottom-2 duration-300'}`}
>
{/* Avatar */}
<div className={`w-8 h-8 rounded-full flex-shrink-0 flex items-center justify-center shadow-lg ${
msg.role === 'model'
? (msg.isError ? 'bg-red-900/50 border border-red-500/30' : 'bg-gradient-to-br from-emerald-600 to-emerald-800')
: 'bg-gray-700'
}`}>
{msg.role === 'model' ? (
msg.isError ? <AlertTriangle size={16} className="text-red-400" /> : <Bot size={16} className="text-white" />
) : (
<User size={16} className="text-gray-300" />
)}
</div>
{/* Bubble */}
<div className={`max-w-[85%] rounded-2xl p-4 shadow-md ${
msg.role === 'user'
? 'bg-emerald-600 text-white rounded-tr-none'
: (msg.isError
? 'bg-red-950/30 border border-red-500/20 text-red-200 rounded-tl-none'
: 'bg-gray-800 text-gray-100 border border-gray-700 rounded-tl-none')
}`}>
{msg.image && (
<div className="mb-3 rounded-lg overflow-hidden border border-white/10 shadow-sm">
<img src={msg.image} alt="User upload" className="max-w-full h-auto max-h-64 object-cover" />
</div>
)}
<div className="whitespace-pre-wrap leading-relaxed">
{msg.text}
</div>
{/* Grounding Sources */}
{msg.groundingMetadata?.groundingChunks && msg.groundingMetadata.groundingChunks.length > 0 && (
<div className="mt-4 pt-3 border-t border-gray-700/50 text-xs">
<p className="font-semibold mb-2 text-emerald-400 flex items-center gap-1.5 uppercase tracking-wider text-[10px]">
<Globe size={12} /> Sources
</p>
<div className="flex flex-wrap gap-2">
{msg.groundingMetadata.groundingChunks.map((chunk: any, i: number) => {
if (chunk.web?.uri) {
return (
<a
key={i}
href={chunk.web.uri}
target="_blank"
rel="noopener noreferrer"
className="bg-gray-900/50 hover:bg-emerald-900/30 border border-gray-700/50 hover:border-emerald-500/30 px-2 py-1.5 rounded-md text-emerald-300/80 hover:text-emerald-300 transition-colors truncate max-w-[200px] flex items-center gap-1.5"
>
<Search size={10} />
{chunk.web.title || new URL(chunk.web.uri).hostname}
</a>
);
}
return null;
})}
</div>
</div>
)}
</div>
</div>
))}
{/* Loading Indicator */}
{loadingState !== LoadingState.IDLE && (
<div className="flex items-center gap-3 animate-pulse">
<div className="w-8 h-8 rounded-full bg-emerald-900/50 border border-emerald-500/30 flex items-center justify-center">
<Globe size={16} className="text-emerald-400 animate-pulse" />
</div>
<div className="text-sm text-gray-400 flex items-center gap-2 bg-gray-800/80 px-4 py-2 rounded-full border border-gray-700/50 shadow-sm">
<Loader2 className="w-3.5 h-3.5 animate-spin text-emerald-500" />
<span className="font-medium tracking-wide text-xs uppercase">
{getLoadingText(loadingState)}
</span>
</div>
</div>
)}
<div ref={messagesEndRef} />
</div>
{/* Input Area */}
<div className="p-4 bg-gray-800/80 backdrop-blur-md border-t border-gray-700">
<div className="max-w-4xl mx-auto">
{imagePreview && (
<div className="mb-3 relative inline-block group">
<div className="absolute inset-0 bg-emerald-500/20 rounded-lg blur-sm group-hover:bg-emerald-500/30 transition-all"></div>
<img src={imagePreview} alt="Preview" className="h-20 w-20 object-cover rounded-lg border border-emerald-500/50 relative z-10" />
<button
onClick={clearImage}
className="absolute -top-2 -right-2 z-20 bg-gray-700 text-white rounded-full p-1 hover:bg-red-500 transition-colors shadow-lg border border-gray-600"
>
<X size={12} />
</button>
</div>
)}
<div className="flex gap-3">
<button
onClick={startCamera}
className="p-3 text-gray-400 hover:text-emerald-400 hover:bg-gray-700/50 rounded-xl transition-colors active:scale-95 transform duration-100"
title="Use Camera"
>
<Camera size={24} />
</button>
<button
onClick={() => fileInputRef.current?.click()}
className="p-3 text-gray-400 hover:text-emerald-400 hover:bg-gray-700/50 rounded-xl transition-colors active:scale-95 transform duration-100"
title="Upload image"
>
<ImageIcon size={24} />
</button>
<input
type="file"
ref={fileInputRef}
className="hidden"
accept="image/*"
onChange={handleImageSelect}
/>
<div className="flex-1 relative group">
<div className="absolute -inset-0.5 bg-gradient-to-r from-emerald-600 to-teal-600 rounded-xl opacity-20 group-focus-within:opacity-100 transition duration-500 blur-sm"></div>
<input
type="text"
value={inputValue}
onChange={(e) => setInputValue(e.target.value)}
onKeyDown={handleKeyDown}
placeholder="Ask about a strain..."
className="relative w-full bg-gray-900 text-white border border-gray-700 rounded-xl py-3 pl-4 pr-12 focus:outline-none focus:border-transparent placeholder-gray-500 transition-all shadow-inner"
/>
<button
onClick={handleSendMessage}
disabled={(!inputValue.trim() && !selectedImage) || loadingState !== LoadingState.IDLE}
className="absolute right-2 top-1/2 -translate-y-1/2 p-2 bg-emerald-600 hover:bg-emerald-500 text-white rounded-lg disabled:opacity-50 disabled:hover:bg-emerald-600 transition-all shadow-lg shadow-emerald-900/50 active:scale-90"
>
<Send size={16} />
</button>
</div>
</div>
</div>
</div>
</div>
);
};
export default ChatInterface;