Back Commit
Browse files
Main2.py
CHANGED
|
@@ -237,77 +237,50 @@ def create_interface():
|
|
| 237 |
refresh_characters_btn.click(fn=lambda: gr.update(value=get_existing_characters()), outputs=[character_list])
|
| 238 |
|
| 239 |
with gr.Tab("Chat with Character"):
|
| 240 |
-
# Top row for "Choose Character" and "Current Chat ID"
|
| 241 |
with gr.Row():
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
chat_messages
|
| 278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
|
| 280 |
-
|
| 281 |
-
audio_file_path = extract_audio_from_video(video_file)
|
| 282 |
-
if audio_file_path:
|
| 283 |
-
video_text = speech_to_text(audio_file_path)
|
| 284 |
-
if video_text:
|
| 285 |
-
final_input += f" {video_text}"
|
| 286 |
-
chat_messages.append({"role": "user", "content": "Video uploaded"})
|
| 287 |
-
else:
|
| 288 |
-
chat_messages.append({"role": "assistant", "content": "Failed to extract audio from video."})
|
| 289 |
-
return chat_messages, current_chat_id, None
|
| 290 |
-
|
| 291 |
-
if not final_input.strip():
|
| 292 |
-
return chat_messages, current_chat_id, "Please provide a message, audio, or video!"
|
| 293 |
-
|
| 294 |
-
response, new_chat_id = chat_with_character(character_name, final_input, user_id, current_chat_id)
|
| 295 |
-
chat_messages.append({"role": "user", "content": final_input})
|
| 296 |
-
chat_messages.append({"role": "assistant", "content": response})
|
| 297 |
-
return chat_messages, new_chat_id, new_chat_id
|
| 298 |
-
|
| 299 |
-
# Handle audio and video button clicks to trigger file uploads
|
| 300 |
-
def upload_audio():
|
| 301 |
-
return gr.update(value=None, visible=True), gr.update(value=None, visible=False)
|
| 302 |
-
|
| 303 |
-
def upload_video():
|
| 304 |
-
return gr.update(value=None, visible=False), gr.update(value=None, visible=True)
|
| 305 |
-
|
| 306 |
-
audio_button.click(fn=upload_audio, outputs=[audio_input, video_input])
|
| 307 |
-
video_button.click(fn=upload_video, outputs=[audio_input, video_input])
|
| 308 |
-
|
| 309 |
-
# Connect the Send button to handle the chat
|
| 310 |
-
chat_btn.click(fn=handle_chat, inputs=[character_dropdown, user_input, audio_input, video_input, user_id, chat_messages, current_chat_id], outputs=[chat_response, current_chat_id, chat_id_display])
|
| 311 |
|
| 312 |
with gr.Tab("Chat History"):
|
| 313 |
with gr.Row():
|
|
|
|
| 237 |
refresh_characters_btn.click(fn=lambda: gr.update(value=get_existing_characters()), outputs=[character_list])
|
| 238 |
|
| 239 |
with gr.Tab("Chat with Character"):
|
|
|
|
| 240 |
with gr.Row():
|
| 241 |
+
character_dropdown = gr.Dropdown(label="Choose Character", choices=[char[0] for char in get_existing_characters()], elem_id="character_dropdown")
|
| 242 |
+
chat_id_display = gr.Textbox(label="Current Chat ID", interactive=False, elem_id="chat_id_display")
|
| 243 |
+
user_input = gr.Textbox(label="Your Message", placeholder="Type your message or use audio/video input", elem_id="user_input", lines=2)
|
| 244 |
+
audio_input = gr.Audio(label="Audio Input", type="filepath", elem_id="audio_input")
|
| 245 |
+
video_input = gr.Video(label="Video Input", elem_id="video_input")
|
| 246 |
+
chat_btn = gr.Button("Send", variant="primary")
|
| 247 |
+
chat_response = gr.Chatbot(label="Chat Responses", elem_id="chat_response", height=500, type="messages") # Increased height here
|
| 248 |
+
|
| 249 |
+
def handle_chat(character_name, user_input, audio_file, video_file, user_id, chat_messages, current_chat_id):
|
| 250 |
+
if not user_id:
|
| 251 |
+
return chat_messages, current_chat_id, "Please sign in with a numeric User ID first!"
|
| 252 |
+
if not character_name:
|
| 253 |
+
return chat_messages, current_chat_id, "Please select a character!"
|
| 254 |
+
final_input = user_input or ""
|
| 255 |
+
|
| 256 |
+
if audio_file:
|
| 257 |
+
audio_text = speech_to_text(audio_file)
|
| 258 |
+
if audio_text:
|
| 259 |
+
final_input += f" {audio_text}"
|
| 260 |
+
else:
|
| 261 |
+
chat_messages.append({"role": "assistant", "content": "Could not understand audio."})
|
| 262 |
+
return chat_messages, current_chat_id, None
|
| 263 |
+
|
| 264 |
+
if video_file:
|
| 265 |
+
audio_file_path = extract_audio_from_video(video_file)
|
| 266 |
+
if audio_file_path:
|
| 267 |
+
video_text = speech_to_text(audio_file_path)
|
| 268 |
+
if video_text:
|
| 269 |
+
final_input += f" {video_text}"
|
| 270 |
+
chat_messages.append({"role": "user", "content": "Video uploaded"})
|
| 271 |
+
else:
|
| 272 |
+
chat_messages.append({"role": "assistant", "content": "Failed to extract audio from video."})
|
| 273 |
+
return chat_messages, current_chat_id, None
|
| 274 |
+
|
| 275 |
+
if not final_input.strip():
|
| 276 |
+
return chat_messages, current_chat_id, "Please provide a message, audio, or video!"
|
| 277 |
+
|
| 278 |
+
response, new_chat_id = chat_with_character(character_name, final_input, user_id, current_chat_id)
|
| 279 |
+
chat_messages.append({"role": "user", "content": final_input})
|
| 280 |
+
chat_messages.append({"role": "assistant", "content": response})
|
| 281 |
+
return chat_messages, new_chat_id, new_chat_id
|
| 282 |
|
| 283 |
+
chat_btn.click(fn=handle_chat, inputs=[character_dropdown, user_input, audio_input, video_input, user_id, chat_messages, current_chat_id], outputs=[chat_response, current_chat_id, chat_id_display])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
|
| 285 |
with gr.Tab("Chat History"):
|
| 286 |
with gr.Row():
|