Fatima98's picture
Update app.py
fa7e941 verified
import gradio as gr
import whisper
import torch
from transformers import pipeline
# Load Whisper for transcription
asr_model = whisper.load_model("base")
# Load smaller Zephyr model
feedback_model = pipeline(
"text-generation",
model="tiiuae/falcon-rw-1b",
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
device=0 if torch.cuda.is_available() else -1
)
questions = [
"Why do you want this internship?",
"What are your strengths and weaknesses?",
"Tell me about a time you solved a difficult problem.",
"Where do you see yourself in 5 years?",
"tell me about yourself",
]
current_question = {"index": 0}
def get_question():
q = questions[current_question["index"]]
current_question["index"] = (current_question["index"] + 1) % len(questions)
return q
def process_audio(audio_file, question):
"""Process the audio file to get feedback."""
if audio_file is None:
return "Please record an answer.", ""
try:
# Transcribe the audio
result = asr_model.transcribe(audio_file)
transcript = result["text"]
# New prompt with dynamic question
prompt = (
f"Interview Question: {question}\n"
f"Candidate's Answer: {transcript}\n\n"
"You are an experienced interview evaluator. Review the candidate's response and rate it using the following:\n"
"- Overall Rating: Weak, Average, Good, or Excellent\n"
"- Score: Out of 100\n"
"- Suggestions for improvement (if any)\n\n"
"Please respond in this format:\n"
"Overall Rating: <your rating>\n"
"Score: <your score>\n"
"Suggestions: <your suggestions>\n"
)
# Get feedback from the model
response = feedback_model(prompt, max_length=200)[0]["generated_text"]
return response.strip(), transcript
except Exception as e:
return f"Error: {str(e)}", ""
with gr.Blocks() as demo:
gr.Markdown("## AI Interview Coach (Lightweight Version)")
question_text = gr.Textbox(label="Interview Question", interactive=False)
get_q_btn = gr.Button("Get Interview Question")
with gr.Row():
audio_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Your Answer")
submit_btn = gr.Button("Submit")
feedback = gr.Textbox(label="AI Feedback")
transcript = gr.Textbox(label="Transcript of Your Answer")
get_q_btn.click(get_question, outputs=question_text)
submit_btn.click(process_audio, inputs=audio_input, outputs=[feedback, transcript])
if __name__ == "__main__":
demo.launch()