charantejapolavarapu commited on
Commit
3f76e26
Β·
verified Β·
1 Parent(s): c6314c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -60
app.py CHANGED
@@ -1,94 +1,147 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
 
3
  import random
4
 
5
- # -------------------------------
6
- # Load Models
7
- # -------------------------------
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # Speech to Text Model (Whisper)
10
  asr = pipeline(
11
  "automatic-speech-recognition",
12
  model="openai/whisper-base"
13
  )
14
 
15
- # Text Generation Model (LLM)
16
- generator = pipeline(
17
- "text-generation",
18
- model="mistralai/Mistral-7B-Instruct-v0.1",
19
- device_map="auto"
20
- )
21
-
22
- # -------------------------------
23
  # Question Bank
24
- # -------------------------------
25
-
26
- questions = [
27
- "Explain overfitting in machine learning.",
28
- "What is the difference between supervised and unsupervised learning?",
29
- "Explain gradient descent in simple terms.",
30
- "What is the role of activation functions in neural networks?",
31
- "What is the difference between CNN and RNN?"
32
- ]
33
-
34
- # -------------------------------
35
- # Interview Logic
36
- # -------------------------------
37
-
38
- def start_interview():
39
- question = random.choice(questions)
40
- return question
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def evaluate_answer(audio, question):
43
 
44
- # Convert Speech to Text
 
 
 
45
  result = asr(audio)
46
  user_answer = result["text"]
47
 
48
- # Create evaluation prompt
49
  prompt = f"""
50
- You are a technical interviewer.
51
-
52
- Question: {question}
53
- Candidate Answer: {user_answer}
54
-
55
- Evaluate the answer and give:
56
- 1. Technical Accuracy Score (out of 10)
57
- 2. Clarity Score (out of 10)
58
- 3. Overall Score (out of 10)
59
- 4. Improvement Suggestions
60
-
61
- Keep feedback concise.
62
- """
63
-
64
- # Generate Evaluation
65
- output = generator(
66
- prompt,
67
- max_new_tokens=300,
68
- do_sample=True,
69
- temperature=0.7
70
- )
71
 
72
- feedback = output[0]["generated_text"]
 
73
 
74
- return f"πŸ“ Transcribed Answer:\n{user_answer}\n\nπŸ“Š Evaluation:\n{feedback}"
75
 
 
 
 
76
 
77
- # -------------------------------
78
- # Gradio UI
79
- # -------------------------------
 
 
 
 
80
 
81
  with gr.Blocks() as demo:
82
 
83
  gr.Markdown("# 🎀 Smart Interview Simulator (AI Voice Bot)")
84
- gr.Markdown("Answer the question using your voice.")
 
 
 
 
 
 
85
 
86
  question_output = gr.Textbox(label="Interview Question")
87
 
88
  start_button = gr.Button("Start Interview")
89
- start_button.click(start_interview, outputs=question_output)
90
 
91
- audio_input = gr.Audio(source="microphone", type="filepath")
 
 
 
92
 
93
  submit_button = gr.Button("Submit Answer")
94
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import requests
4
+ import os
5
  import random
6
 
7
+ # ==============================
8
+ # CONFIG
9
+ # ==============================
10
+
11
+ HF_TOKEN = os.getenv("HF_TOKEN")
12
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
13
+
14
+ headers = {
15
+ "Authorization": f"Bearer {HF_TOKEN}"
16
+ }
17
+
18
+ # ==============================
19
+ # Load Whisper (Lightweight)
20
+ # ==============================
21
 
 
22
  asr = pipeline(
23
  "automatic-speech-recognition",
24
  model="openai/whisper-base"
25
  )
26
 
27
+ # ==============================
 
 
 
 
 
 
 
28
  # Question Bank
29
+ # ==============================
30
+
31
+ questions = {
32
+ "Easy": [
33
+ "What is Machine Learning?",
34
+ "Explain supervised learning.",
35
+ "What is overfitting?"
36
+ ],
37
+ "Medium": [
38
+ "Explain bias vs variance tradeoff.",
39
+ "What is gradient descent?",
40
+ "Difference between CNN and RNN?"
41
+ ],
42
+ "Hard": [
43
+ "Explain backpropagation mathematically.",
44
+ "What is attention mechanism?",
45
+ "Explain transformers architecture."
46
+ ]
47
+ }
48
+
49
+ # ==============================
50
+ # Generate Question
51
+ # ==============================
52
+
53
+ def start_interview(level):
54
+ return random.choice(questions[level])
55
+
56
+ # ==============================
57
+ # Call LLM via API
58
+ # ==============================
59
+
60
+ def query_llm(prompt):
61
+
62
+ payload = {
63
+ "inputs": prompt,
64
+ "parameters": {
65
+ "max_new_tokens": 300,
66
+ "temperature": 0.7
67
+ }
68
+ }
69
+
70
+ response = requests.post(API_URL, headers=headers, json=payload)
71
+
72
+ if response.status_code == 200:
73
+ return response.json()[0]["generated_text"]
74
+ else:
75
+ return "Error contacting LLM API."
76
+
77
+ # ==============================
78
+ # Evaluate Answer
79
+ # ==============================
80
 
81
  def evaluate_answer(audio, question):
82
 
83
+ if audio is None:
84
+ return "Please record your answer."
85
+
86
+ # Speech to Text
87
  result = asr(audio)
88
  user_answer = result["text"]
89
 
90
+ # Prompt Engineering
91
  prompt = f"""
92
+ You are a strict technical interviewer.
93
+
94
+ Question:
95
+ {question}
96
+
97
+ Candidate Answer:
98
+ {user_answer}
99
+
100
+ Evaluate and give:
101
+
102
+ 1. Technical Accuracy Score (0-10)
103
+ 2. Clarity Score (0-10)
104
+ 3. Depth Score (0-10)
105
+ 4. Overall Score (0-10)
106
+ 5. Improvement Suggestions (short and clear)
 
 
 
 
 
 
107
 
108
+ Be concise and structured.
109
+ """
110
 
111
+ feedback = query_llm(prompt)
112
 
113
+ return f"""
114
+ πŸ“ Transcribed Answer:
115
+ {user_answer}
116
 
117
+ πŸ“Š Evaluation:
118
+ {feedback}
119
+ """
120
+
121
+ # ==============================
122
+ # UI
123
+ # ==============================
124
 
125
  with gr.Blocks() as demo:
126
 
127
  gr.Markdown("# 🎀 Smart Interview Simulator (AI Voice Bot)")
128
+ gr.Markdown("Select difficulty β†’ Answer using voice β†’ Get AI feedback")
129
+
130
+ level_dropdown = gr.Dropdown(
131
+ ["Easy", "Medium", "Hard"],
132
+ value="Medium",
133
+ label="Select Difficulty"
134
+ )
135
 
136
  question_output = gr.Textbox(label="Interview Question")
137
 
138
  start_button = gr.Button("Start Interview")
139
+ start_button.click(start_interview, inputs=level_dropdown, outputs=question_output)
140
 
141
+ audio_input = gr.Audio(
142
+ type="filepath",
143
+ label="Record Your Answer"
144
+ )
145
 
146
  submit_button = gr.Button("Submit Answer")
147