UmerSajid commited on
Commit
be4cc6c
·
verified ·
1 Parent(s): a921529

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -0
app.py CHANGED
@@ -31,11 +31,29 @@ def process_audio_realtime(audio_file):
31
  transcription = whisper_model.transcribe(audio_file)["text"]
32
 
33
  # Step 2: Process transcription using Llama model via Groq API
 
34
  llama_response = client.chat.completions.create(
35
  messages=[{"role": "user", "content": transcription}],
36
  model="llama3-8b-8192", # Replace with your actual Llama model name
37
  stream=False
38
  ).choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # Step 3: Convert Llama response to audio using gTTS
41
  tts = gTTS(text=llama_response, lang="en")
 
31
  transcription = whisper_model.transcribe(audio_file)["text"]
32
 
33
  # Step 2: Process transcription using Llama model via Groq API
34
+ """"
35
  llama_response = client.chat.completions.create(
36
  messages=[{"role": "user", "content": transcription}],
37
  model="llama3-8b-8192", # Replace with your actual Llama model name
38
  stream=False
39
  ).choices[0].message.content
40
+ """
41
+
42
+ # Step 2: Process transcription using Llama model via Groq API
43
+ llama_response = client.chat.completions.create(
44
+ messages=[
45
+ {"role": "system", "content": "You are a helpful assistant. Please provide a concise and accurate response."},
46
+ {"role": "user", "content": transcription}
47
+ ],
48
+ model="llama3-8b-8192", # Replace with your actual Llama model name
49
+ max_tokens=70,
50
+ stream=False
51
+ ).choices[0].message.content
52
+
53
+
54
+
55
+
56
+
57
 
58
  # Step 3: Convert Llama response to audio using gTTS
59
  tts = gTTS(text=llama_response, lang="en")