Yassine Mhirsi commited on
Commit
21d3506
·
1 Parent(s): ee3eb53
src/app/components/chat/AudioPlayer.tsx ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useRef, useEffect } from 'react';
2
+ import { Play, Pause } from 'lucide-react';
3
+
4
+ type AudioPlayerProps = {
5
+ src: string;
6
+ className?: string;
7
+ };
8
+
9
+ const AudioPlayer: React.FC<AudioPlayerProps> = ({ src, className = '' }) => {
10
+ const [isPlaying, setIsPlaying] = useState(false);
11
+ const [duration, setDuration] = useState(0);
12
+ const [currentTime, setCurrentTime] = useState(0);
13
+ const audioRef = useRef<HTMLAudioElement>(null);
14
+ const progressRef = useRef<HTMLDivElement>(null);
15
+
16
+ useEffect(() => {
17
+ const audio = audioRef.current;
18
+ if (!audio) return;
19
+
20
+ const setAudioData = () => {
21
+ setDuration(audio.duration);
22
+ setCurrentTime(audio.currentTime);
23
+ };
24
+
25
+ const setAudioTime = () => setCurrentTime(audio.currentTime);
26
+
27
+ if (audio.readyState >= 1) {
28
+ // Metadata is loaded
29
+ setAudioData();
30
+ }
31
+
32
+ audio.addEventListener('loadeddata', setAudioData);
33
+ audio.addEventListener('timeupdate', setAudioTime);
34
+
35
+ return () => {
36
+ audio.removeEventListener('loadeddata', setAudioData);
37
+ audio.removeEventListener('timeupdate', setAudioTime);
38
+ };
39
+ }, [src]);
40
+
41
+ const togglePlayPause = () => {
42
+ const audio = audioRef.current;
43
+ if (!audio) return;
44
+
45
+ if (isPlaying) {
46
+ audio.pause();
47
+ } else {
48
+ audio.play();
49
+ }
50
+ setIsPlaying(!isPlaying);
51
+ };
52
+
53
+ const handleProgressClick = (e: React.MouseEvent<HTMLDivElement>) => {
54
+ const audio = audioRef.current;
55
+ if (!audio || !progressRef.current) return;
56
+
57
+ const progressBar = progressRef.current;
58
+ const clickX = e.nativeEvent.offsetX;
59
+ const width = progressBar.clientWidth;
60
+ const newTime = (clickX / width) * duration;
61
+
62
+ audio.currentTime = newTime;
63
+ setCurrentTime(newTime);
64
+ };
65
+
66
+ const formatTime = (time: number) => {
67
+ if (isNaN(time)) return '0:00';
68
+
69
+ const minutes = Math.floor(time / 60);
70
+ const seconds = Math.floor(time % 60);
71
+ return `${minutes}:${seconds < 10 ? '0' : ''}${seconds}`;
72
+ };
73
+
74
+ const progress = duration ? (currentTime / duration) * 100 : 0;
75
+
76
+ return (
77
+ <div className={`bg-black/10 dark:bg-gray-800/50 backdrop-blur-sm rounded-xl p-3 ${className}`}>
78
+ <audio
79
+ ref={audioRef}
80
+ src={src}
81
+ onEnded={() => setIsPlaying(false)}
82
+ onPlay={() => setIsPlaying(true)}
83
+ onPause={() => setIsPlaying(false)}
84
+ />
85
+
86
+ <div className="flex items-center gap-3">
87
+ <button
88
+ onClick={togglePlayPause}
89
+ className="flex items-center justify-center w-8 h-8 rounded-full bg-teal-500 hover:bg-teal-600 text-white transition-colors"
90
+ >
91
+ {isPlaying ? (
92
+ <Pause className="w-4 h-4" />
93
+ ) : (
94
+ <Play className="w-4 h-4 ml-0.5" />
95
+ )}
96
+ </button>
97
+
98
+ <div className="flex-1">
99
+ <div
100
+ ref={progressRef}
101
+ onClick={handleProgressClick}
102
+ className="h-1.5 bg-gray-300 dark:bg-gray-700 rounded-full cursor-pointer overflow-hidden"
103
+ >
104
+ <div
105
+ className="h-full bg-teal-500 rounded-full"
106
+ style={{ width: `${progress}%` }}
107
+ />
108
+ </div>
109
+ <div className="flex justify-between text-xs text-gray-500 dark:text-white mt-1">
110
+ <span>{formatTime(currentTime)}</span>
111
+ <span>{formatTime(duration)}</span>
112
+ </div>
113
+ </div>
114
+ </div>
115
+ </div>
116
+ );
117
+ };
118
+
119
+ export default AudioPlayer;
src/app/components/chat/ChatInput.tsx CHANGED
@@ -1,10 +1,11 @@
1
  import React, { useState, useRef, useEffect } from 'react';
2
- import { Plus, ArrowUp, Settings2, Mic, X, Check, Loader2, Search, Sparkles } from 'lucide-react';
3
  import { useMCPTools } from '../../hooks/useMCPTools.ts';
4
  import type { MCPTool } from '../../types/index.ts';
5
 
6
  type ChatInputProps = {
7
  onSubmit?: (message: string, selectedTool?: string | null) => void;
 
8
  placeholder?: string;
9
  };
10
 
@@ -13,7 +14,7 @@ const isMCPTool = (value: any): value is MCPTool => {
13
  return value && typeof value === 'object' && typeof value.name === 'string';
14
  };
15
 
16
- const ChatInput = ({ onSubmit, placeholder = 'Ask a follow-up...' }: ChatInputProps) => {
17
  const [input, setInput] = useState('');
18
  const [isRecording, setIsRecording] = useState(false);
19
  const [showToolsDropdown, setShowToolsDropdown] = useState(false);
@@ -22,9 +23,17 @@ const ChatInput = ({ onSubmit, placeholder = 'Ask a follow-up...' }: ChatInputPr
22
  const [focusedIndex, setFocusedIndex] = useState(-1);
23
  const [dropdownPosition, setDropdownPosition] = useState<'below' | 'above'>('below');
24
  const [dropdownMaxHeight, setDropdownMaxHeight] = useState(320);
 
 
 
 
25
  const dropdownRef = useRef<HTMLDivElement>(null);
26
  const dropdownContentRef = useRef<HTMLDivElement>(null);
27
  const searchInputRef = useRef<HTMLInputElement>(null);
 
 
 
 
28
  const { tools, loading, error, refetch } = useMCPTools();
29
 
30
  const handleSubmit = (e: any) => {
@@ -38,23 +47,234 @@ const ChatInput = ({ onSubmit, placeholder = 'Ask a follow-up...' }: ChatInputPr
38
  }
39
  };
40
 
41
- const handleMicClick = () => {
42
- setIsRecording(true);
43
- setTimeout(() => {
44
- setIsRecording(false);
45
- setInput('speech to text feature');
46
- }, 5000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  };
48
 
49
  const handleCancelRecording = () => {
 
 
 
 
 
 
 
 
 
 
 
50
  setIsRecording(false);
 
 
 
 
 
 
 
 
51
  };
52
 
53
  const handleConfirmRecording = () => {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  setIsRecording(false);
55
- setInput('speech to text feature');
56
  };
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  const WaveAnimation = () => {
59
  const [animationKey, setAnimationKey] = useState(0);
60
 
@@ -230,7 +450,12 @@ const ChatInput = ({ onSubmit, placeholder = 'Ask a follow-up...' }: ChatInputPr
230
  >
231
  {isRecording ? (
232
  <div className="flex items-center justify-between h-12 animate-fade-in w-full">
233
- <WaveAnimation />
 
 
 
 
 
234
  <div className="flex items-center gap-2 ml-4">
235
  <button
236
  type="button"
@@ -248,6 +473,41 @@ const ChatInput = ({ onSubmit, placeholder = 'Ask a follow-up...' }: ChatInputPr
248
  </button>
249
  </div>
250
  </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  ) : (
252
  <div className="animate-fade-in">
253
  <textarea
@@ -453,23 +713,22 @@ const ChatInput = ({ onSubmit, placeholder = 'Ask a follow-up...' }: ChatInputPr
453
 
454
  <button
455
  type="button"
456
- onClick={() => {
457
- if (input.trim()) {
458
- if (onSubmit) {
459
- onSubmit(input, selectedTool);
460
- }
461
- setInput('');
462
- }
463
- }}
464
- disabled={!input.trim()}
465
  className="h-8 px-3 rounded-lg text-sm font-medium hover:opacity-90 transition-all duration-200 hover:scale-105 flex items-center justify-center bg-teal-900 dark:bg-[#032827] text-teal-300 dark:text-[#2DD4BF] disabled:opacity-50 disabled:cursor-not-allowed"
466
  >
467
- Send
468
  </button>
469
  </div>
470
 
471
  <button
472
  type="submit"
 
 
 
 
 
 
 
 
473
  disabled={!input.trim()}
474
  className="h-8 w-8 p-0 bg-zinc-300 dark:bg-zinc-700 hover:bg-zinc-400 dark:hover:bg-zinc-600 disabled:bg-zinc-200 dark:disabled:bg-zinc-800 disabled:text-zinc-400 dark:disabled:text-zinc-500 text-zinc-800 dark:text-white rounded-lg transition-all duration-200 hover:scale-110 disabled:hover:scale-100 flex items-center justify-center disabled:cursor-not-allowed"
475
  >
 
1
  import React, { useState, useRef, useEffect } from 'react';
2
+ import { Plus, ArrowUp, Settings2, Mic, X, Check, Loader2, Search, Sparkles, Play, Pause } from 'lucide-react';
3
  import { useMCPTools } from '../../hooks/useMCPTools.ts';
4
  import type { MCPTool } from '../../types/index.ts';
5
 
6
  type ChatInputProps = {
7
  onSubmit?: (message: string, selectedTool?: string | null) => void;
8
+ onAudioSubmit?: (audioBlob: Blob, selectedTool?: string | null) => void;
9
  placeholder?: string;
10
  };
11
 
 
14
  return value && typeof value === 'object' && typeof value.name === 'string';
15
  };
16
 
17
+ const ChatInput = ({ onSubmit, onAudioSubmit, placeholder = 'Ask a follow-up...' }: ChatInputProps) => {
18
  const [input, setInput] = useState('');
19
  const [isRecording, setIsRecording] = useState(false);
20
  const [showToolsDropdown, setShowToolsDropdown] = useState(false);
 
23
  const [focusedIndex, setFocusedIndex] = useState(-1);
24
  const [dropdownPosition, setDropdownPosition] = useState<'below' | 'above'>('below');
25
  const [dropdownMaxHeight, setDropdownMaxHeight] = useState(320);
26
+ const [audioBlob, setAudioBlob] = useState<Blob | null>(null);
27
+ const [audioUrl, setAudioUrl] = useState<string | null>(null);
28
+ const [isPlaying, setIsPlaying] = useState(false);
29
+ const [recordingTime, setRecordingTime] = useState(0);
30
  const dropdownRef = useRef<HTMLDivElement>(null);
31
  const dropdownContentRef = useRef<HTMLDivElement>(null);
32
  const searchInputRef = useRef<HTMLInputElement>(null);
33
+ const mediaRecorderRef = useRef<MediaRecorder | null>(null);
34
+ const audioChunksRef = useRef<Blob[]>([]);
35
+ const audioRef = useRef<HTMLAudioElement | null>(null);
36
+ const recordingTimerRef = useRef<NodeJS.Timeout | null>(null);
37
  const { tools, loading, error, refetch } = useMCPTools();
38
 
39
  const handleSubmit = (e: any) => {
 
47
  }
48
  };
49
 
50
+ const handleMicClick = async () => {
51
+ try {
52
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
53
+
54
+ // Try to use a supported mime type
55
+ let options: MediaRecorderOptions = {};
56
+ if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) {
57
+ options = { mimeType: 'audio/webm;codecs=opus' };
58
+ } else if (MediaRecorder.isTypeSupported('audio/webm')) {
59
+ options = { mimeType: 'audio/webm' };
60
+ } else if (MediaRecorder.isTypeSupported('audio/mp4')) {
61
+ options = { mimeType: 'audio/mp4' };
62
+ }
63
+
64
+ const mediaRecorder = new MediaRecorder(stream, options);
65
+ mediaRecorderRef.current = mediaRecorder;
66
+ audioChunksRef.current = [];
67
+
68
+ mediaRecorder.ondataavailable = (event) => {
69
+ if (event.data.size > 0) {
70
+ audioChunksRef.current.push(event.data);
71
+ }
72
+ };
73
+
74
+ mediaRecorder.onstop = () => {
75
+ const mimeType = mediaRecorder.mimeType || 'audio/webm';
76
+ const blob = new Blob(audioChunksRef.current, { type: mimeType });
77
+ setAudioBlob(blob);
78
+ const url = URL.createObjectURL(blob);
79
+ setAudioUrl(url);
80
+ // Clean up old audio element
81
+ if (audioRef.current) {
82
+ audioRef.current.pause();
83
+ audioRef.current = null;
84
+ }
85
+ // Stop all tracks to release microphone
86
+ stream.getTracks().forEach(track => track.stop());
87
+ };
88
+
89
+ mediaRecorder.start();
90
+ setIsRecording(true);
91
+ setRecordingTime(0);
92
+
93
+ // Start recording timer
94
+ recordingTimerRef.current = setInterval(() => {
95
+ setRecordingTime(prev => prev + 1);
96
+ }, 1000);
97
+ } catch (error) {
98
+ console.error('Error accessing microphone:', error);
99
+ alert('Could not access microphone. Please check your permissions.');
100
+ }
101
  };
102
 
103
  const handleCancelRecording = () => {
104
+ if (mediaRecorderRef.current && mediaRecorderRef.current.state !== 'inactive') {
105
+ mediaRecorderRef.current.stop();
106
+ }
107
+ if (recordingTimerRef.current) {
108
+ clearInterval(recordingTimerRef.current);
109
+ recordingTimerRef.current = null;
110
+ }
111
+ if (audioRef.current) {
112
+ audioRef.current.pause();
113
+ audioRef.current = null;
114
+ }
115
  setIsRecording(false);
116
+ setIsPlaying(false);
117
+ setRecordingTime(0);
118
+ setAudioBlob(null);
119
+ if (audioUrl) {
120
+ URL.revokeObjectURL(audioUrl);
121
+ setAudioUrl(null);
122
+ }
123
+ audioChunksRef.current = [];
124
  };
125
 
126
  const handleConfirmRecording = () => {
127
+ if (mediaRecorderRef.current && mediaRecorderRef.current.state !== 'inactive') {
128
+ // Set up a temporary onstop handler specifically for submission
129
+ const originalOnStop = mediaRecorderRef.current.onstop;
130
+ mediaRecorderRef.current.onstop = () => {
131
+ // Call the original onStop to ensure all state is properly set
132
+ if (originalOnStop) {
133
+ originalOnStop.call(mediaRecorderRef.current, new Event('stop'));
134
+ }
135
+
136
+ // Create the blob directly from audioChunksRef to ensure we have the data
137
+ const mimeType = mediaRecorderRef.current?.mimeType || 'audio/webm';
138
+ const blob = new Blob(audioChunksRef.current, { type: mimeType });
139
+
140
+ // Submit the audio immediately
141
+ if (blob && onAudioSubmit) {
142
+ onAudioSubmit(blob, selectedTool);
143
+ } else {
144
+ console.error('Audio blob not available for submission', { blob: blob.size > 0, onAudioSubmit });
145
+ }
146
+ };
147
+
148
+ mediaRecorderRef.current.stop();
149
+ }
150
+ if (recordingTimerRef.current) {
151
+ clearInterval(recordingTimerRef.current);
152
+ recordingTimerRef.current = null;
153
+ }
154
+
155
  setIsRecording(false);
 
156
  };
157
 
158
+ const handlePlayPause = async () => {
159
+ if (!audioUrl) {
160
+ console.error('Audio URL not available');
161
+ return;
162
+ }
163
+
164
+ // Ensure audio element exists
165
+ if (!audioRef.current) {
166
+ console.error('Audio element not initialized');
167
+ return;
168
+ }
169
+
170
+ try {
171
+ if (isPlaying) {
172
+ audioRef.current.pause();
173
+ setIsPlaying(false);
174
+ } else {
175
+ // Reset to beginning if needed
176
+ if (audioRef.current.ended) {
177
+ audioRef.current.currentTime = 0;
178
+ }
179
+
180
+ // Play the audio
181
+ const playPromise = audioRef.current.play();
182
+ if (playPromise !== undefined) {
183
+ await playPromise;
184
+ setIsPlaying(true);
185
+ } else {
186
+ setIsPlaying(true);
187
+ }
188
+ }
189
+ } catch (error) {
190
+ console.error('Error playing audio:', error);
191
+ setIsPlaying(false);
192
+ // Check if it's an autoplay policy issue
193
+ if (error instanceof Error && error.name === 'NotAllowedError') {
194
+ alert('Please interact with the page first, then try playing again.');
195
+ } else {
196
+ alert('Could not play audio. Please try again.');
197
+ }
198
+ }
199
+ };
200
+
201
+ const handleSendRecording = () => {
202
+ // Stop audio playback if playing
203
+ if (audioRef.current && isPlaying) {
204
+ audioRef.current.pause();
205
+ setIsPlaying(false);
206
+ }
207
+
208
+ // Submit the recorded audio if available
209
+ if (audioBlob && onAudioSubmit) {
210
+ onAudioSubmit(audioBlob, selectedTool);
211
+ }
212
+
213
+ // Clean up and return to normal chat mode
214
+ handleCancelRecording();
215
+
216
+ // Focus back to text input
217
+ setTimeout(() => {
218
+ const textarea = document.querySelector('textarea');
219
+ if (textarea) textarea.focus();
220
+ }, 100);
221
+ };
222
+
223
+ // Initialize audio element when audioUrl is available
224
+ useEffect(() => {
225
+ if (audioUrl) {
226
+ // Clean up old audio element if URL changed
227
+ if (audioRef.current && audioRef.current.src !== audioUrl) {
228
+ audioRef.current.pause();
229
+ audioRef.current = null;
230
+ }
231
+
232
+ // Create new audio element if it doesn't exist
233
+ if (!audioRef.current) {
234
+ const audio = new Audio(audioUrl);
235
+ audioRef.current = audio;
236
+ audio.volume = 1.0;
237
+
238
+ audio.onended = () => {
239
+ setIsPlaying(false);
240
+ };
241
+
242
+ audio.onerror = (error) => {
243
+ console.error('Audio initialization error:', error);
244
+ setIsPlaying(false);
245
+ };
246
+
247
+ audio.onloadeddata = () => {
248
+ console.log('Audio loaded and ready');
249
+ };
250
+
251
+ audio.oncanplay = () => {
252
+ console.log('Audio can play');
253
+ };
254
+ }
255
+ }
256
+
257
+ return () => {
258
+ // Don't clean up audio element here - let it persist for playback
259
+ };
260
+ }, [audioUrl]);
261
+
262
+ // Cleanup on unmount
263
+ useEffect(() => {
264
+ return () => {
265
+ if (recordingTimerRef.current) {
266
+ clearInterval(recordingTimerRef.current);
267
+ }
268
+ if (audioRef.current) {
269
+ audioRef.current.pause();
270
+ audioRef.current = null;
271
+ }
272
+ if (audioUrl) {
273
+ URL.revokeObjectURL(audioUrl);
274
+ }
275
+ };
276
+ }, [audioUrl]);
277
+
278
  const WaveAnimation = () => {
279
  const [animationKey, setAnimationKey] = useState(0);
280
 
 
450
  >
451
  {isRecording ? (
452
  <div className="flex items-center justify-between h-12 animate-fade-in w-full">
453
+ <div className="flex items-center gap-3 flex-1">
454
+ <WaveAnimation />
455
+ <span className="text-sm text-zinc-600 dark:text-zinc-400 whitespace-nowrap">
456
+ {Math.floor(recordingTime / 60)}:{(recordingTime % 60).toString().padStart(2, '0')}
457
+ </span>
458
+ </div>
459
  <div className="flex items-center gap-2 ml-4">
460
  <button
461
  type="button"
 
473
  </button>
474
  </div>
475
  </div>
476
+ ) : audioBlob && audioUrl ? (
477
+ <div className="flex items-center justify-between h-12 animate-fade-in w-full">
478
+ <div className="flex items-center gap-3 flex-1">
479
+ <button
480
+ type="button"
481
+ onClick={handlePlayPause}
482
+ className="h-8 w-8 p-0 text-zinc-800 dark:text-white hover:text-zinc-900 dark:hover:text-white hover:bg-zinc-200 dark:hover:bg-zinc-700 rounded-lg transition-all duration-200 hover:scale-110 flex items-center justify-center"
483
+ >
484
+ {isPlaying ? (
485
+ <Pause className="h-5 w-5" />
486
+ ) : (
487
+ <Play className="h-5 w-5" />
488
+ )}
489
+ </button>
490
+ <span className="text-sm text-zinc-600 dark:text-zinc-400">
491
+ {isPlaying ? 'Playing...' : 'Tap to replay'}
492
+ </span>
493
+ </div>
494
+ <div className="flex items-center gap-2 ml-4">
495
+ <button
496
+ type="button"
497
+ onClick={handleCancelRecording}
498
+ className="h-8 w-8 p-0 text-zinc-800 dark:text-white hover:text-zinc-900 dark:hover:text-white hover:bg-zinc-200 dark:hover:bg-zinc-700 rounded-lg transition-all duration-200 hover:scale-110 flex items-center justify-center"
499
+ >
500
+ <X className="h-5 w-5" />
501
+ </button>
502
+ <button
503
+ type="button"
504
+ onClick={handleSendRecording}
505
+ className="h-8 w-8 p-0 rounded-lg transition-all duration-200 hover:scale-110 flex items-center justify-center bg-teal-400 dark:bg-[#2DD4BF] text-teal-900 dark:text-[#032827]"
506
+ >
507
+ <ArrowUp className="h-5 w-5" />
508
+ </button>
509
+ </div>
510
+ </div>
511
  ) : (
512
  <div className="animate-fade-in">
513
  <textarea
 
713
 
714
  <button
715
  type="button"
 
 
 
 
 
 
 
 
 
716
  className="h-8 px-3 rounded-lg text-sm font-medium hover:opacity-90 transition-all duration-200 hover:scale-105 flex items-center justify-center bg-teal-900 dark:bg-[#032827] text-teal-300 dark:text-[#2DD4BF] disabled:opacity-50 disabled:cursor-not-allowed"
717
  >
718
+ LLama 4
719
  </button>
720
  </div>
721
 
722
  <button
723
  type="submit"
724
+ onClick={() => {
725
+ if (input.trim()) {
726
+ if (onSubmit) {
727
+ onSubmit(input, selectedTool);
728
+ }
729
+ setInput('');
730
+ }
731
+ }}
732
  disabled={!input.trim()}
733
  className="h-8 w-8 p-0 bg-zinc-300 dark:bg-zinc-700 hover:bg-zinc-400 dark:hover:bg-zinc-600 disabled:bg-zinc-200 dark:disabled:bg-zinc-800 disabled:text-zinc-400 dark:disabled:text-zinc-500 text-zinc-800 dark:text-white rounded-lg transition-all duration-200 hover:scale-110 disabled:hover:scale-100 flex items-center justify-center disabled:cursor-not-allowed"
734
  >
src/app/components/chat/MessageList.tsx CHANGED
@@ -1,6 +1,7 @@
1
  import React, { useEffect, useRef } from 'react';
2
  import type { ChatMessage } from '../../types/chat.types.ts';
3
  import { Loader2, AlertCircle, RotateCcw } from 'lucide-react';
 
4
 
5
  type MessageListProps = {
6
  messages: ChatMessage[];
@@ -59,7 +60,19 @@ const MessageList = ({ messages, isLoading = false, error = null, onRetry }: Mes
59
  : 'bg-gray-100 dark:bg-gray-800 text-gray-900 dark:text-gray-100 mr-12'
60
  }`}
61
  >
62
- <div className="whitespace-pre-wrap break-words">{message.content}</div>
 
 
 
 
 
 
 
 
 
 
 
 
63
  <div
64
  className={`text-xs mt-1 ${
65
  message.role === 'user'
 
1
  import React, { useEffect, useRef } from 'react';
2
  import type { ChatMessage } from '../../types/chat.types.ts';
3
  import { Loader2, AlertCircle, RotateCcw } from 'lucide-react';
4
+ import AudioPlayer from './AudioPlayer.tsx';
5
 
6
  type MessageListProps = {
7
  messages: ChatMessage[];
 
60
  : 'bg-gray-100 dark:bg-gray-800 text-gray-900 dark:text-gray-100 mr-12'
61
  }`}
62
  >
63
+ {message.audioUrl ? (
64
+ // Audio message display - only the player itself
65
+ <div>
66
+ <AudioPlayer
67
+ src={message.audioUrl}
68
+ />
69
+ </div>
70
+ ) : (
71
+ // Regular text message
72
+ <div>
73
+ <div className="whitespace-pre-wrap break-words">{message.content}</div>
74
+ </div>
75
+ )}
76
  <div
77
  className={`text-xs mt-1 ${
78
  message.role === 'user'
src/app/hooks/useChat.ts CHANGED
@@ -1,6 +1,7 @@
1
  import { useState, useCallback } from 'react';
2
  import { sendChatMessageStream } from '../services/groq.service.ts';
3
  import { generateEnhancedArgument } from '../services/argument.service.ts';
 
4
  import type { ChatMessage, ChatState } from '../types/chat.types.ts';
5
 
6
  const generateId = () => Math.random().toString(36).substring(2, 15);
@@ -50,6 +51,76 @@ Example output:
50
  }
51
  }, []);
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  const sendMessage = useCallback(async (content: string, selectedTool?: string | null) => {
54
  // Add user message
55
  const userMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
@@ -62,12 +133,9 @@ Example output:
62
  setState(prev => ({ ...prev, isLoading: true, error: null }));
63
 
64
  try {
65
- // Get all messages including the new user message
66
- const updatedMessages = [...state.messages, { ...userMessage, id: generateId(), timestamp: new Date() }];
67
-
68
  // Check if generate argument tool is selected (check for multiple possible names)
69
  const isGenerateArgumentTool = selectedTool && (
70
- selectedTool.toLowerCase().includes('generate') &&
71
  selectedTool.toLowerCase().includes('argument')
72
  );
73
 
@@ -75,10 +143,10 @@ Example output:
75
  console.log('Generate argument tool detected:', selectedTool);
76
  // Extract topic and position using LLM
77
  const { topic, position } = await extractTopicAndPosition(content);
78
-
79
  // Call the external API
80
  const argumentResponse = await generateEnhancedArgument({ topic, position });
81
-
82
  // Add assistant response with the enhanced argument
83
  const assistantMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
84
  role: 'assistant',
@@ -86,30 +154,61 @@ Example output:
86
  };
87
  addMessage(assistantMessage);
88
  } else {
89
- // Regular chat with streaming
90
- // Create an empty assistant message that will be updated with streaming content
91
- const assistantId = generateId();
92
- const assistantMessage: ChatMessage = {
93
- id: assistantId,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  role: 'assistant',
95
- content: '',
96
- timestamp: new Date(),
97
  };
98
-
99
- setState(prev => ({
100
- ...prev,
101
- messages: [...prev.messages, assistantMessage],
102
- }));
103
-
104
- // Send to Groq API with streaming
105
- await sendChatMessageStream(updatedMessages, (chunk) => {
106
- setState(prev => ({
107
- ...prev,
108
- messages: prev.messages.map(msg =>
109
- msg.id === assistantId ? { ...msg, content: msg.content + chunk } : msg
110
- ),
111
- }));
112
- });
113
  }
114
 
115
  setState(prev => ({ ...prev, isLoading: false }));
@@ -117,7 +216,7 @@ Example output:
117
  const errorMessage = error instanceof Error ? error.message : 'Failed to send message';
118
  setState(prev => ({ ...prev, isLoading: false, error: errorMessage }));
119
  }
120
- }, [state.messages, addMessage, extractTopicAndPosition]);
121
 
122
  const clearMessages = useCallback(() => {
123
  setState(initialState);
@@ -154,6 +253,7 @@ Example output:
154
  isLoading: state.isLoading,
155
  error: state.error,
156
  sendMessage,
 
157
  clearMessages,
158
  retryLastMessage,
159
  };
 
1
  import { useState, useCallback } from 'react';
2
  import { sendChatMessageStream } from '../services/groq.service.ts';
3
  import { generateEnhancedArgument } from '../services/argument.service.ts';
4
+ import { getUserId } from '../utils/index.ts';
5
  import type { ChatMessage, ChatState } from '../types/chat.types.ts';
6
 
7
  const generateId = () => Math.random().toString(36).substring(2, 15);
 
51
  }
52
  }, []);
53
 
54
+ const sendAudioMessage = useCallback(async (audioBlob: Blob, selectedTool?: string | null) => {
55
+ console.log('sendAudioMessage called with:', { size: audioBlob.size, type: audioBlob.type });
56
+
57
+ // Add user audio message
58
+ const userMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
59
+ role: 'user',
60
+ content: 'Audio message sent', // Placeholder text for audio message
61
+ audioUrl: URL.createObjectURL(audioBlob), // Store the audio URL for playback in UI
62
+ };
63
+ addMessage(userMessage);
64
+
65
+ // Set loading state
66
+ setState(prev => ({ ...prev, isLoading: true, error: null }));
67
+
68
+ try {
69
+ // Get user ID
70
+ const userId = getUserId();
71
+ if (!userId) {
72
+ throw new Error('User ID not found. Please register or log in.');
73
+ }
74
+
75
+ // Get webhook URL
76
+ const webhookUrl = process.env.REACT_APP_N8N_WEBHOOK_URL;
77
+ if (!webhookUrl) {
78
+ throw new Error('REACT_APP_N8N_WEBHOOK_URL environment variable is not set');
79
+ }
80
+
81
+ console.log('Sending audio to webhook:', { userId, webhookUrl });
82
+
83
+ // Create form data for audio file upload
84
+ const formData = new FormData();
85
+ const file = new File([audioBlob], 'recording.webm', { type: audioBlob.type });
86
+ formData.append('file', file);
87
+ formData.append('user_id', userId);
88
+
89
+ console.log('FormData created with', formData.get('file') ? 'file' : 'no file');
90
+
91
+ // Call the debate-assistant endpoint with audio file
92
+ const response = await fetch(`${webhookUrl}/debate-assistant`, {
93
+ method: 'POST',
94
+ body: formData,
95
+ });
96
+
97
+ console.log('Response received:', response.status);
98
+
99
+ if (!response.ok) {
100
+ throw new Error(`HTTP error! status: ${response.status}`);
101
+ }
102
+
103
+ const data = await response.json();
104
+ console.log('Response data:', data);
105
+
106
+ // Extract tts_text from response (based on your curl example)
107
+ const ttsText = data?.tts_text || data?.final_argument || data?.result?.result?.[0]?.text || 'No response received';
108
+
109
+ // Add assistant response with the TTS text
110
+ const assistantMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
111
+ role: 'assistant',
112
+ content: ttsText,
113
+ };
114
+ addMessage(assistantMessage);
115
+
116
+ setState(prev => ({ ...prev, isLoading: false }));
117
+ } catch (error) {
118
+ console.error('Error in sendAudioMessage:', error);
119
+ const errorMessage = error instanceof Error ? error.message : 'Failed to send audio message';
120
+ setState(prev => ({ ...prev, isLoading: false, error: errorMessage }));
121
+ }
122
+ }, [addMessage]);
123
+
124
  const sendMessage = useCallback(async (content: string, selectedTool?: string | null) => {
125
  // Add user message
126
  const userMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
 
133
  setState(prev => ({ ...prev, isLoading: true, error: null }));
134
 
135
  try {
 
 
 
136
  // Check if generate argument tool is selected (check for multiple possible names)
137
  const isGenerateArgumentTool = selectedTool && (
138
+ selectedTool.toLowerCase().includes('generate') &&
139
  selectedTool.toLowerCase().includes('argument')
140
  );
141
 
 
143
  console.log('Generate argument tool detected:', selectedTool);
144
  // Extract topic and position using LLM
145
  const { topic, position } = await extractTopicAndPosition(content);
146
+
147
  // Call the external API
148
  const argumentResponse = await generateEnhancedArgument({ topic, position });
149
+
150
  // Add assistant response with the enhanced argument
151
  const assistantMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
152
  role: 'assistant',
 
154
  };
155
  addMessage(assistantMessage);
156
  } else {
157
+ // Regular chat - use n8n webhook /debate-assistant endpoint
158
+ // Extract topic and position from user input
159
+ const { position } = await extractTopicAndPosition(content);
160
+
161
+ // Get user ID
162
+ const userId = getUserId();
163
+ if (!userId) {
164
+ throw new Error('User ID not found. Please register or log in.');
165
+ }
166
+
167
+ // Get webhook URL
168
+ const webhookUrl = process.env.REACT_APP_N8N_WEBHOOK_URL;
169
+ if (!webhookUrl) {
170
+ throw new Error('REACT_APP_N8N_WEBHOOK_URL environment variable is not set');
171
+ }
172
+
173
+ // Call the debate-assistant endpoint
174
+ const response = await fetch(`${webhookUrl}/debate-assistant`, {
175
+ method: 'POST',
176
+ headers: {
177
+ 'Content-Type': 'application/json',
178
+ },
179
+ body: JSON.stringify({
180
+ text: content,
181
+ position: position,
182
+ user_id: userId,
183
+ }),
184
+ });
185
+
186
+ if (!response.ok) {
187
+ throw new Error(`HTTP error! status: ${response.status}`);
188
+ }
189
+
190
+ const data = await response.json();
191
+
192
+ // Extract final_argument from response
193
+ const finalArgument = data?.final_argument || data?.result?.result?.[0]?.text || 'No response received';
194
+
195
+ // Parse if final_argument is a JSON string
196
+ let finalContent = finalArgument;
197
+ try {
198
+ const parsed = JSON.parse(finalArgument);
199
+ if (parsed.argument) {
200
+ finalContent = parsed.argument;
201
+ }
202
+ } catch {
203
+ // Not JSON, use as is
204
+ }
205
+
206
+ // Add assistant response with the final argument
207
+ const assistantMessage: Omit<ChatMessage, 'id' | 'timestamp'> = {
208
  role: 'assistant',
209
+ content: finalContent,
 
210
  };
211
+ addMessage(assistantMessage);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  }
213
 
214
  setState(prev => ({ ...prev, isLoading: false }));
 
216
  const errorMessage = error instanceof Error ? error.message : 'Failed to send message';
217
  setState(prev => ({ ...prev, isLoading: false, error: errorMessage }));
218
  }
219
+ }, [addMessage, extractTopicAndPosition]);
220
 
221
  const clearMessages = useCallback(() => {
222
  setState(initialState);
 
253
  isLoading: state.isLoading,
254
  error: state.error,
255
  sendMessage,
256
+ sendAudioMessage,
257
  clearMessages,
258
  retryLastMessage,
259
  };
src/app/pages/ChatPage.tsx CHANGED
@@ -4,12 +4,16 @@ import MessageList from '../components/chat/MessageList.tsx';
4
  import { useChat } from '../hooks/useChat.ts';
5
 
6
  const ChatPage = () => {
7
- const { messages, isLoading, error, sendMessage, retryLastMessage } = useChat();
8
 
9
  const handleMessageSubmit = (message: string, selectedTool?: string | null) => {
10
  sendMessage(message, selectedTool);
11
  };
12
 
 
 
 
 
13
  const hasConversation = messages.length > 0;
14
 
15
  return (
@@ -29,7 +33,7 @@ const ChatPage = () => {
29
  {/* Input */}
30
  <div className="px-4 py-4">
31
  <div className="max-w-4xl mx-auto">
32
- <ChatInput onSubmit={handleMessageSubmit} placeholder="Type your message..." />
33
  </div>
34
  </div>
35
  </>
@@ -37,7 +41,7 @@ const ChatPage = () => {
37
  /* Centered input when no conversation */
38
  <div className="flex items-center justify-center px-4 pt-20 pb-10 w-full">
39
  <div className="w-full max-w-4xl">
40
- <ChatInput onSubmit={handleMessageSubmit} placeholder="Ask me anything..." />
41
  </div>
42
  </div>
43
  )}
 
4
  import { useChat } from '../hooks/useChat.ts';
5
 
6
  const ChatPage = () => {
7
+ const { messages, isLoading, error, sendMessage, sendAudioMessage, retryLastMessage } = useChat();
8
 
9
  const handleMessageSubmit = (message: string, selectedTool?: string | null) => {
10
  sendMessage(message, selectedTool);
11
  };
12
 
13
+ const handleAudioSubmit = (audioBlob: Blob, selectedTool?: string | null) => {
14
+ sendAudioMessage(audioBlob, selectedTool);
15
+ };
16
+
17
  const hasConversation = messages.length > 0;
18
 
19
  return (
 
33
  {/* Input */}
34
  <div className="px-4 py-4">
35
  <div className="max-w-4xl mx-auto">
36
+ <ChatInput onSubmit={handleMessageSubmit} onAudioSubmit={handleAudioSubmit} placeholder="Type your message..." />
37
  </div>
38
  </div>
39
  </>
 
41
  /* Centered input when no conversation */
42
  <div className="flex items-center justify-center px-4 pt-20 pb-10 w-full">
43
  <div className="w-full max-w-4xl">
44
+ <ChatInput onSubmit={handleMessageSubmit} onAudioSubmit={handleAudioSubmit} placeholder="Ask me anything..." />
45
  </div>
46
  </div>
47
  )}
src/app/types/chat.types.ts CHANGED
@@ -4,6 +4,7 @@ export type ChatMessage = {
4
  id: string;
5
  role: MessageRole;
6
  content: string;
 
7
  timestamp: Date;
8
  };
9
 
 
4
  id: string;
5
  role: MessageRole;
6
  content: string;
7
+ audioUrl?: string; // URL for audio messages
8
  timestamp: Date;
9
  };
10