Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
import re
|
| 4 |
import random
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# uploading and cleaning the knowledge txt file
|
| 7 |
def load_questions(file_path):
|
|
@@ -33,17 +36,33 @@ questions_by_type = {
|
|
| 33 |
"testing", "financial", "automation", "analysis", "regression", "business", "stakeholder"])]
|
| 34 |
}
|
| 35 |
|
|
|
|
| 36 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
def set_type(choice, user_profile):
|
| 40 |
user_profile["interview_type"] = choice
|
| 41 |
return "Great! Whatβs your background and what field/role are you aiming for?", user_profile
|
| 42 |
|
|
|
|
| 43 |
def save_background(info, user_profile):
|
| 44 |
user_profile["field"] = info
|
| 45 |
return "Awesome! Type 'start' below to begin your interview.", user_profile
|
| 46 |
|
|
|
|
| 47 |
def respond(message, chat_history, user_profile):
|
| 48 |
message_lower = message.strip().lower()
|
| 49 |
|
|
@@ -122,6 +141,14 @@ def generate_feedback(user_profile):
|
|
| 122 |
feedback.append(fb)
|
| 123 |
return "\n".join(feedback)
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
# creating the visual elements
|
| 126 |
with gr.Blocks() as demo:
|
| 127 |
user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False})
|
|
@@ -152,9 +179,15 @@ with gr.Blocks() as demo:
|
|
| 152 |
gr.Markdown("### Step 3: Start Interview")
|
| 153 |
chatbot = gr.Chatbot(label="Interview Bot")
|
| 154 |
msg = gr.Textbox(label="Your message")
|
| 155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
|
| 157 |
send_btn.click(respond, inputs=[msg, chat_history, user_profile], outputs=[chatbot], queue=False)
|
| 158 |
send_btn.click(lambda: "", None, msg, queue=False)
|
|
|
|
|
|
|
| 159 |
|
| 160 |
demo.launch()
|
|
|
|
| 1 |
+
# imports
|
| 2 |
import gradio as gr
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
import re
|
| 5 |
import random
|
| 6 |
+
import whisper
|
| 7 |
+
from pydub import AudioSegment
|
| 8 |
|
| 9 |
# uploading and cleaning the knowledge txt file
|
| 10 |
def load_questions(file_path):
|
|
|
|
| 36 |
"testing", "financial", "automation", "analysis", "regression", "business", "stakeholder"])]
|
| 37 |
}
|
| 38 |
|
| 39 |
+
# models
|
| 40 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 41 |
+
whisper_model = whisper.load_model("base")
|
| 42 |
+
|
| 43 |
+
# whisper audio-to-text function
|
| 44 |
+
def transcribe_audio(file_path):
|
| 45 |
+
try:
|
| 46 |
+
print(f"π Processing audio: {file_path}")
|
| 47 |
+
audio = AudioSegment.from_file(file_path)
|
| 48 |
+
converted_path = "converted.wav"
|
| 49 |
+
audio.export(converted_path, format="wav")
|
| 50 |
+
result = whisper_model.transcribe(converted_path, fp16=False)
|
| 51 |
+
return result["text"]
|
| 52 |
+
except Exception as e:
|
| 53 |
+
return f"β ERROR: {str(e)}"
|
| 54 |
+
|
| 55 |
+
# setting up the users profile (step 1)
|
| 56 |
def set_type(choice, user_profile):
|
| 57 |
user_profile["interview_type"] = choice
|
| 58 |
return "Great! Whatβs your background and what field/role are you aiming for?", user_profile
|
| 59 |
|
| 60 |
+
# step 2
|
| 61 |
def save_background(info, user_profile):
|
| 62 |
user_profile["field"] = info
|
| 63 |
return "Awesome! Type 'start' below to begin your interview.", user_profile
|
| 64 |
|
| 65 |
+
# step 3
|
| 66 |
def respond(message, chat_history, user_profile):
|
| 67 |
message_lower = message.strip().lower()
|
| 68 |
|
|
|
|
| 141 |
feedback.append(fb)
|
| 142 |
return "\n".join(feedback)
|
| 143 |
|
| 144 |
+
# handle audio input
|
| 145 |
+
def handle_audio(audio_file, chat_history, user_profile):
|
| 146 |
+
transcribed = transcribe_audio(audio_file)
|
| 147 |
+
if transcribed.startswith("β"):
|
| 148 |
+
chat_history.append(("Audio input", transcribed))
|
| 149 |
+
return chat_history
|
| 150 |
+
return respond(transcribed, chat_history, user_profile)
|
| 151 |
+
|
| 152 |
# creating the visual elements
|
| 153 |
with gr.Blocks() as demo:
|
| 154 |
user_profile = gr.State({"interview_type": "", "field": "", "interview_in_progress": False})
|
|
|
|
| 179 |
gr.Markdown("### Step 3: Start Interview")
|
| 180 |
chatbot = gr.Chatbot(label="Interview Bot")
|
| 181 |
msg = gr.Textbox(label="Your message")
|
| 182 |
+
audio_input = gr.Audio(type="filepath", label="ποΈ Upload or Record your answer")
|
| 183 |
+
|
| 184 |
+
send_btn = gr.Button("Send Text")
|
| 185 |
+
audio_btn = gr.Button("Send Audio")
|
| 186 |
+
|
| 187 |
|
| 188 |
send_btn.click(respond, inputs=[msg, chat_history, user_profile], outputs=[chatbot], queue=False)
|
| 189 |
send_btn.click(lambda: "", None, msg, queue=False)
|
| 190 |
+
audio_btn.click(handle_audio, inputs=[audio_input, chat_history, user_profile], outputs=[chatbot], queue=False)
|
| 191 |
+
|
| 192 |
|
| 193 |
demo.launch()
|