bscs-27-005 commited on
Commit
32031b6
·
verified ·
1 Parent(s): 257e465

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -21,17 +21,15 @@ DATA:
21
  - CONTACT: projects@softstream.tech.
22
  """
23
 
24
- # --- 2. SETUP (SECURE VERSION) ---
25
  hf_token = os.getenv("HF_TOKEN")
26
 
27
  if not hf_token:
28
- # Fallback explanation if token is missing
29
  raise ValueError("Error: HF_TOKEN is missing. Go to Settings > Secrets and add it!")
30
 
31
  client = InferenceClient(api_key=hf_token)
32
 
33
  device = "cuda" if torch.cuda.is_available() else "cpu"
34
- # Using "tiny" is safest for free CPU tiers
35
  whisper_model = whisper.load_model("tiny", device=device)
36
 
37
  def voice_chat(audio, history):
@@ -48,9 +46,9 @@ def voice_chat(audio, history):
48
  # B. Think
49
  history.append({"role": "user", "content": transcription + " (Answer in 1 sentence max)"})
50
 
51
- # --- FIX: SWITCHED MODEL TO PHI-3.5 ---
52
  response = client.chat.completions.create(
53
- model="microsoft/Phi-3.5-mini-instruct",
54
  messages=history,
55
  max_tokens=50,
56
  temperature=0.3
@@ -67,7 +65,6 @@ def voice_chat(audio, history):
67
  return audio_path, ai_text, history
68
 
69
  except Exception as e:
70
- # If Phi-3 also fails, we return the error to the text box to debug
71
  return None, f"Error: {str(e)}", history
72
 
73
  # --- 3. UI ---
@@ -80,7 +77,7 @@ with gr.Blocks(theme="soft") as demo:
80
  input_audio = gr.Audio(sources=["microphone"], type="filepath", label="Ask Question")
81
 
82
  with gr.Row():
83
- output_audio = gr.Audio(label="AI Answer (<10s)", autoplay=True)
84
  output_text = gr.Textbox(label="Transcript")
85
 
86
  clear_btn = gr.Button("Reset")
 
21
  - CONTACT: projects@softstream.tech.
22
  """
23
 
24
+ # --- 2. SETUP ---
25
  hf_token = os.getenv("HF_TOKEN")
26
 
27
  if not hf_token:
 
28
  raise ValueError("Error: HF_TOKEN is missing. Go to Settings > Secrets and add it!")
29
 
30
  client = InferenceClient(api_key=hf_token)
31
 
32
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
33
  whisper_model = whisper.load_model("tiny", device=device)
34
 
35
  def voice_chat(audio, history):
 
46
  # B. Think
47
  history.append({"role": "user", "content": transcription + " (Answer in 1 sentence max)"})
48
 
49
+ # --- FIX: Using Qwen 2.5 (Most reliable free model right now) ---
50
  response = client.chat.completions.create(
51
+ model="Qwen/Qwen2.5-7B-Instruct",
52
  messages=history,
53
  max_tokens=50,
54
  temperature=0.3
 
65
  return audio_path, ai_text, history
66
 
67
  except Exception as e:
 
68
  return None, f"Error: {str(e)}", history
69
 
70
  # --- 3. UI ---
 
77
  input_audio = gr.Audio(sources=["microphone"], type="filepath", label="Ask Question")
78
 
79
  with gr.Row():
80
+ output_audio = gr.Audio(label="AI Answer", autoplay=True)
81
  output_text = gr.Textbox(label="Transcript")
82
 
83
  clear_btn = gr.Button("Reset")