AIencoder commited on
Commit
5f5c6c7
·
verified ·
1 Parent(s): d626b78

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -2
app.py CHANGED
@@ -3,6 +3,7 @@ import requests
3
  import json
4
 
5
  OLLAMA_URL = "http://localhost:11434"
 
6
 
7
  MODELS = {
8
  "Qwen2.5-Coder 1.5B (Fastest)": "qwen2.5-coder:1.5b",
@@ -17,6 +18,24 @@ def check_ollama():
17
  except:
18
  return False
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def chat_stream(message, history, model_name, temperature, max_tokens):
21
  if not check_ollama():
22
  yield "⏳ Ollama starting... wait 30 seconds and try again."
@@ -131,9 +150,12 @@ with gr.Blocks(title="Axon v5.1", theme=gr.themes.Soft(primary_hue="purple")) as
131
  with gr.TabItem("💬 Chat"):
132
  chatbot = gr.Chatbot(height=400)
133
  with gr.Row():
134
- msg = gr.Textbox(placeholder="Ask about coding...", show_label=False, scale=9)
 
135
  send = gr.Button("Send", variant="primary", scale=1)
136
- clear = gr.Button("Clear")
 
 
137
  gr.Examples(["Write a Python quicksort function", "Explain async/await in JavaScript"], inputs=msg)
138
 
139
  with gr.TabItem("⚡ Generate"):
@@ -166,6 +188,7 @@ with gr.Blocks(title="Axon v5.1", theme=gr.themes.Soft(primary_hue="purple")) as
166
  msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
167
  send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
168
  clear.click(lambda: [], None, chatbot)
 
169
  gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, max_tokens], gen_output)
170
  explain_btn.click(explain_code, [explain_input, model_dropdown, max_tokens], explain_output)
171
  fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)
 
3
  import json
4
 
5
  OLLAMA_URL = "http://localhost:11434"
6
+ HF_API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
7
 
8
  MODELS = {
9
  "Qwen2.5-Coder 1.5B (Fastest)": "qwen2.5-coder:1.5b",
 
18
  except:
19
  return False
20
 
21
+ def transcribe_audio(audio):
22
+ if audio is None:
23
+ return ""
24
+
25
+ try:
26
+ with open(audio, "rb") as f:
27
+ data = f.read()
28
+
29
+ response = requests.post(HF_API_URL, data=data, timeout=60)
30
+
31
+ if response.status_code == 200:
32
+ result = response.json()
33
+ return result.get("text", "")
34
+ else:
35
+ return f"[STT Error: {response.status_code}]"
36
+ except Exception as e:
37
+ return f"[STT Error: {e}]"
38
+
39
  def chat_stream(message, history, model_name, temperature, max_tokens):
40
  if not check_ollama():
41
  yield "⏳ Ollama starting... wait 30 seconds and try again."
 
150
  with gr.TabItem("💬 Chat"):
151
  chatbot = gr.Chatbot(height=400)
152
  with gr.Row():
153
+ msg = gr.Textbox(placeholder="Ask about coding...", show_label=False, scale=7)
154
+ audio_input = gr.Audio(sources=["microphone"], type="filepath", label="🎤", scale=2)
155
  send = gr.Button("Send", variant="primary", scale=1)
156
+ with gr.Row():
157
+ clear = gr.Button("Clear")
158
+ transcribe_btn = gr.Button("🎤 Transcribe", variant="secondary")
159
  gr.Examples(["Write a Python quicksort function", "Explain async/await in JavaScript"], inputs=msg)
160
 
161
  with gr.TabItem("⚡ Generate"):
 
188
  msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
189
  send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
190
  clear.click(lambda: [], None, chatbot)
191
+ transcribe_btn.click(transcribe_audio, audio_input, msg)
192
  gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, max_tokens], gen_output)
193
  explain_btn.click(explain_code, [explain_input, model_dropdown, max_tokens], explain_output)
194
  fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)