dragonhearted commited on
Commit
a79c4d0
·
verified ·
1 Parent(s): 604a2a7
Files changed (1) hide show
  1. app.py +120 -0
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import speech_recognition as sr
3
+
4
+ def speech_to_text(audio):
5
+ recognizer = sr.Recognizer()
6
+ try:
7
+ with sr.AudioFile(audio) as source:
8
+ audio_data = recognizer.record(source)
9
+ try:
10
+ text = recognizer.recognize_google(audio_data)
11
+ except sr.UnknownValueError:
12
+ text = "Google Speech Recognition could not understand the audio"
13
+ except sr.RequestError as e:
14
+ text = f"Could not request results from Google Speech Recognition service; {e}"
15
+ except Exception as e:
16
+ text = f"Could not process the audio, please try to record one more time"
17
+ return text
18
+
19
+ def chatbot_response(user_input):
20
+ if user_input.lower() == "hello":
21
+ return "Hi there! How can I help you?"
22
+ elif user_input.lower() == "how are you?":
23
+ return "I'm just a bot, but I'm functioning well! How about you?"
24
+ elif user_input.lower() == "what can you do?":
25
+ return "I can help answer questions, provide information, and assist with various tasks."
26
+ else:
27
+ return "I'm not sure how to respond to that. Can you please ask something else?"
28
+
29
+ def llm_ui():
30
+ with gr.Blocks() as demo:
31
+ gr.Markdown("# AI Chatbot")
32
+
33
+ def respond(message, chat_history):
34
+ bot_message = chatbot_response(message)
35
+ chat_history.append((message, bot_message))
36
+ return "", chat_history
37
+
38
+ def clear_chat(chat_history):
39
+ return []
40
+
41
+ def load_previous_conversation():
42
+ # Logic to load previous conversation
43
+ return []
44
+
45
+ def transcribe_audio(audio):
46
+ text = speech_to_text(audio)
47
+ return text
48
+
49
+ with gr.Row():
50
+ chatbot = gr.Chatbot()
51
+ with gr.Row():
52
+ model_selection = gr.Dropdown(["Model 1", "Model 2", "Model 3"], label="Select Model", scale=3)
53
+ clear_button = gr.Button("Clear Chat", scale=2)
54
+ load_button = gr.Button("Load Previous Conversation", scale=2)
55
+ with gr.Row():
56
+ user_input = gr.Textbox(placeholder="Type your message here...", label="User Input", show_label=False, scale=8)
57
+ send_button = gr.Button("Send", scale=2, elem_id="send-button")
58
+ with gr.Row():
59
+ voice_input = gr.Microphone(type="filepath", label="Voice Input", scale=7)
60
+ voice_button = gr.Button("Use Audio as User Input", scale=3)
61
+ voice_button.click(transcribe_audio, inputs=voice_input, outputs=user_input)
62
+
63
+ send_button.click(respond, [user_input, chatbot], [user_input, chatbot])
64
+ clear_button.click(clear_chat, [chatbot], [chatbot])
65
+ load_button.click(load_previous_conversation, [], [chatbot])
66
+
67
+ demo.css = """
68
+ #send-button {
69
+ background-color: orange;
70
+ }
71
+ """
72
+ return demo
73
+
74
+ def multimodal_llm_ui():
75
+ with gr.Blocks() as demo:
76
+ gr.Markdown("# AI Chatbot")
77
+
78
+ def respond(message, chat_history):
79
+ bot_message = chatbot_response(message)
80
+ chat_history.append((message, bot_message))
81
+ return "", chat_history
82
+
83
+ def clear_chat(chat_history):
84
+ return []
85
+
86
+ def load_previous_conversation():
87
+ # Logic to load previous conversation
88
+ return []
89
+
90
+ def transcribe_audio(audio):
91
+ text = speech_to_text(audio)
92
+ return text
93
+
94
+ with gr.Row():
95
+ chatbot = gr.Chatbot(height=550)
96
+ with gr.Column():
97
+ with gr.Row():
98
+ model_selection = gr.Dropdown(["Model 1", "Model 2", "Model 3"], label="Select Model", scale=3)
99
+ clear_button = gr.Button("Clear Chat", scale=2)
100
+ load_button = gr.Button("Load Previous Conversation", scale=2)
101
+
102
+ image_input = gr.Image(type="filepath", label="Input your Image Here....")
103
+ with gr.Row():
104
+ user_input = gr.Textbox(placeholder="Type your message here...", label="User Input", show_label=False, scale=8)
105
+ send_button = gr.Button("Send", scale=2, elem_id="send-button")
106
+
107
+ with gr.Row():
108
+ voice_input = gr.Microphone(type="filepath", label="Voice Input", scale=7)
109
+ voice_button = gr.Button("Use Audio as User Input", scale=3)
110
+ voice_button.click(transcribe_audio, inputs=voice_input, outputs=user_input)
111
+
112
+ send_button.click(respond, [user_input, chatbot], [user_input, chatbot])
113
+ clear_button.click(clear_chat, [chatbot], [chatbot])
114
+ load_button.click(load_previous_conversation, [], [chatbot])
115
+ return demo
116
+
117
+ demo = gr.TabbedInterface([llm_ui(), multimodal_llm_ui()], ["LLM", "Image + LLM"],
118
+ theme='snehilsanyal/scikit-learn')
119
+
120
+ demo.launch()