AnnaMathews commited on
Commit
f33df00
·
verified ·
1 Parent(s): 6962000

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -68
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import os
3
- import re
4
  from langchain_community.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain_community.vectorstores import FAISS
@@ -9,55 +8,56 @@ from groq import Groq
9
  from dotenv import load_dotenv
10
  from faster_whisper import WhisperModel
11
  from elevenlabs.client import ElevenLabs
12
- from gtts import gTTS
13
  import tempfile
14
 
15
- # Load environment variables from .env
16
  load_dotenv()
17
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
18
- ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
19
 
20
- # Check API keys
21
- if not GROQ_API_KEY or not ELEVENLABS_API_KEY:
22
- raise EnvironmentError("Missing API keys. Please create a .env file with GROQ_API_KEY and ELEVENLABS_API_KEY.")
23
 
24
  # Initialize clients
25
  groq_client = Groq(api_key=GROQ_API_KEY)
26
  elevenlabs_client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
27
- whisper_model = WhisperModel("small", device="cpu", compute_type="int8")
28
 
29
- def clean_markdown(text):
30
- return re.sub(r'[*_#`]+', '', text)
31
 
32
  def summarize_resume(resume_text):
 
33
  prompt = f"""Create a concise summary of this resume highlighting:
34
  1. Professional title/role
35
  2. Years of experience
36
  3. Core skills/competencies
37
  4. Education background
38
  5. Notable achievements
39
-
40
  Resume:
41
  {resume_text[:3000]}... [truncated]"""
 
42
  response = groq_client.chat.completions.create(
43
  messages=[{"role": "user", "content": prompt}],
44
  model="llama3-70b-8192",
45
  temperature=0.3,
46
  )
47
- return clean_markdown(response.choices[0].message.content)
48
 
49
  def calculate_ats_score(resume_text):
 
50
  prompt = f"""Analyze this resume and calculate an ATS score (0-100) considering:
51
  1. Keyword optimization (20 pts)
52
  2. Section organization (20 pts)
53
  3. Experience quality (20 pts)
54
  4. Education completeness (20 pts)
55
  5. Readability (20 pts)
56
-
57
  Return ONLY the numerical score and nothing else.
58
-
59
  Resume:
60
  {resume_text[:3000]}... [truncated]"""
 
61
  response = groq_client.chat.completions.create(
62
  messages=[{"role": "user", "content": prompt}],
63
  model="llama3-70b-8192",
@@ -66,125 +66,159 @@ def calculate_ats_score(resume_text):
66
  try:
67
  return int(response.choices[0].message.content.strip())
68
  except:
69
- return 50
70
 
71
  def process_resume(file):
 
72
  try:
 
73
  loader = PyPDFLoader(file.name)
74
  docs = RecursiveCharacterTextSplitter(
75
  chunk_size=1000,
76
  chunk_overlap=200,
77
  separators=["\n\n", "\n", " ", ""]
78
  ).split_documents(loader.load())
 
 
79
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
80
  FAISS.from_documents(docs, embeddings).save_local("resume_index")
 
 
81
  full_text = "\n".join([doc.page_content for doc in docs])
82
  gr.Info("✅ Resume processed successfully!")
83
  return summarize_resume(full_text), f"ATS Score: {calculate_ats_score(full_text)}/100"
 
84
  except Exception as e:
85
- gr.Warning(f"❌ Error: {e}")
86
- return f"Error: {e}", "ATS Score: N/A"
87
 
88
  def transcribe_audio(audio_path):
89
- if not audio_path:
90
- return "No audio recorded"
91
  segments, _ = whisper_model.transcribe(audio_path)
92
  return " ".join([segment.text for segment in segments])
93
 
94
  def generate_question(resume_text):
 
95
  prompt = f"""Generate one general interview question focusing on:
96
  - Teamwork experiences
97
  - Challenges overcome
98
  - Learning experiences
99
  - Career motivations
100
  - Problem-solving examples
101
-
102
  Make it conversational and open-ended.
103
-
104
  Resume Excerpt:
105
  {resume_text[:2000]}... [truncated]"""
 
106
  response = groq_client.chat.completions.create(
107
  messages=[{"role": "user", "content": prompt}],
108
  model="llama3-70b-8192",
109
  temperature=0.7,
110
  )
111
- return clean_markdown(response.choices[0].message.content)
112
 
113
  def evaluate_response(question, response_text):
 
114
  prompt = f"""Evaluate this interview response on:
115
  1. Clarity (1-5)
116
  2. Confidence (1-5)
117
  3. Relevance (1-5)
118
  4. Suggested improvements
119
-
120
  Question: {question}
121
  Response: {response_text}"""
 
122
  evaluation = groq_client.chat.completions.create(
123
  messages=[{"role": "user", "content": prompt}],
124
  model="llama3-70b-8192",
125
  temperature=0.2,
126
  )
127
- return clean_markdown(evaluation.choices[0].message.content)
128
 
129
- def gtts_speak(text):
 
130
  try:
131
  if not text.strip():
132
- raise ValueError("Empty text")
133
- tts = gTTS(text, lang="en", tld="com")
 
 
 
 
 
 
 
134
  with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as tmp:
135
- tts.save(tmp.name)
136
- return tmp.name
 
 
 
 
137
  except Exception as e:
138
- gr.Warning(f"gTTS Error: {e}")
139
  return None
140
 
 
141
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
142
- gr.Markdown("<h1 style='font-size: 3em; text-align: center;'>🚀 Ready Set Hire</h1>")
143
-
 
144
  with gr.Tab("📄 Resume Analysis"):
145
  with gr.Row():
146
  with gr.Column():
147
- resume_upload = gr.File(label="📄 Upload Resume (PDF)", file_types=[".pdf"])
148
- process_btn = gr.Button("🔍 Analyze Resume", variant="primary")
 
 
 
 
149
  with gr.Column():
150
- resume_summary = gr.Textbox(label="📝 Resume Summary", lines=10)
151
- hear_summary_btn = gr.Button("🔊 Hear Summary")
152
- summary_audio = gr.Audio(visible=True)
153
- ats_score = gr.Textbox(label="📊 ATS Compatibility Score", interactive=False)
154
- process_btn.click(fn=process_resume, inputs=resume_upload, outputs=[resume_summary, ats_score])
155
- hear_summary_btn.click(fn=gtts_speak, inputs=resume_summary, outputs=summary_audio)
156
-
 
 
 
 
 
157
  with gr.Tab("🎤 Mock Interview"):
158
  with gr.Row():
159
  with gr.Column():
160
- audio_input = gr.Audio(
161
- label="🎤 Record Your Response",
162
- sources=["microphone"],
163
- type="filepath",
164
- interactive=True
165
- )
166
- transcribe_btn = gr.Button("📝 Transcribe Response")
167
- question_box = gr.Textbox(label="❓ Current Question")
168
- generate_btn = gr.Button("🤖 Generate New Question")
169
- gtts_question_btn = gr.Button("🔊 Hear Question")
170
- question_audio = gr.Audio(visible=True)
171
  with gr.Column():
172
- transcription = gr.Textbox(label="💬 Your Response")
173
- evaluation = gr.Textbox(label="📝 Feedback", lines=8)
174
- gtts_feedback_btn = gr.Button("🔊 Hear Feedback")
175
- feedback_audio = gr.Audio(visible=True)
176
-
177
- transcribe_btn.click(fn=transcribe_audio, inputs=audio_input, outputs=transcription)
178
- generate_btn.click(fn=generate_question, inputs=resume_summary, outputs=question_box)
179
- transcription.change(fn=evaluate_response, inputs=[question_box, transcription], outputs=evaluation)
180
- gtts_question_btn.click(fn=gtts_speak, inputs=question_box, outputs=question_audio)
181
- gtts_feedback_btn.click(fn=gtts_speak, inputs=evaluation, outputs=feedback_audio)
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
- gr.Markdown("""
184
- <div style='text-align:center; margin-top:2em; color:gray'>
185
- 🚀 Built by Cognify.AI
186
- </div>
187
- """)
188
 
189
  if __name__ == "__main__":
190
  demo.launch()
 
1
  import gradio as gr
2
  import os
 
3
  from langchain_community.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.vectorstores import FAISS
 
8
  from dotenv import load_dotenv
9
  from faster_whisper import WhisperModel
10
  from elevenlabs.client import ElevenLabs
11
+ from elevenlabs import play
12
  import tempfile
13
 
14
+ # Load environment variables
15
  load_dotenv()
 
 
16
 
17
+ # Initialize APIs
18
+ GROQ_API_KEY = "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"
19
+ ELEVENLABS_API_KEY = "ap2_69e1e821-6ea7-4fa0-88dc-ba54f2ac246c"
20
 
21
  # Initialize clients
22
  groq_client = Groq(api_key=GROQ_API_KEY)
23
  elevenlabs_client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
 
24
 
25
+ # Initialize Whisper model
26
+ whisper_model = WhisperModel("small", device="cpu", compute_type="int8")
27
 
28
  def summarize_resume(resume_text):
29
+ """Generate a concise summary of key resume points"""
30
  prompt = f"""Create a concise summary of this resume highlighting:
31
  1. Professional title/role
32
  2. Years of experience
33
  3. Core skills/competencies
34
  4. Education background
35
  5. Notable achievements
36
+
37
  Resume:
38
  {resume_text[:3000]}... [truncated]"""
39
+
40
  response = groq_client.chat.completions.create(
41
  messages=[{"role": "user", "content": prompt}],
42
  model="llama3-70b-8192",
43
  temperature=0.3,
44
  )
45
+ return response.choices[0].message.content
46
 
47
  def calculate_ats_score(resume_text):
48
+ """Calculate ATS score based on resume content"""
49
  prompt = f"""Analyze this resume and calculate an ATS score (0-100) considering:
50
  1. Keyword optimization (20 pts)
51
  2. Section organization (20 pts)
52
  3. Experience quality (20 pts)
53
  4. Education completeness (20 pts)
54
  5. Readability (20 pts)
55
+
56
  Return ONLY the numerical score and nothing else.
57
+
58
  Resume:
59
  {resume_text[:3000]}... [truncated]"""
60
+
61
  response = groq_client.chat.completions.create(
62
  messages=[{"role": "user", "content": prompt}],
63
  model="llama3-70b-8192",
 
66
  try:
67
  return int(response.choices[0].message.content.strip())
68
  except:
69
+ return 50 # Default if parsing fails
70
 
71
  def process_resume(file):
72
+ """Process uploaded resume PDF"""
73
  try:
74
+ # Load and process PDF
75
  loader = PyPDFLoader(file.name)
76
  docs = RecursiveCharacterTextSplitter(
77
  chunk_size=1000,
78
  chunk_overlap=200,
79
  separators=["\n\n", "\n", " ", ""]
80
  ).split_documents(loader.load())
81
+
82
+ # Create vector store
83
  embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
84
  FAISS.from_documents(docs, embeddings).save_local("resume_index")
85
+
86
+ # Generate outputs
87
  full_text = "\n".join([doc.page_content for doc in docs])
88
  gr.Info("✅ Resume processed successfully!")
89
  return summarize_resume(full_text), f"ATS Score: {calculate_ats_score(full_text)}/100"
90
+
91
  except Exception as e:
92
+ gr.Warning(f"❌ Error: {str(e)}")
93
+ return f"Error: {str(e)}", "ATS Score: N/A"
94
 
95
  def transcribe_audio(audio_path):
96
+ """Convert speech to text using Whisper"""
 
97
  segments, _ = whisper_model.transcribe(audio_path)
98
  return " ".join([segment.text for segment in segments])
99
 
100
  def generate_question(resume_text):
101
+ """Generate general interview questions based on resume"""
102
  prompt = f"""Generate one general interview question focusing on:
103
  - Teamwork experiences
104
  - Challenges overcome
105
  - Learning experiences
106
  - Career motivations
107
  - Problem-solving examples
108
+
109
  Make it conversational and open-ended.
110
+
111
  Resume Excerpt:
112
  {resume_text[:2000]}... [truncated]"""
113
+
114
  response = groq_client.chat.completions.create(
115
  messages=[{"role": "user", "content": prompt}],
116
  model="llama3-70b-8192",
117
  temperature=0.7,
118
  )
119
+ return response.choices[0].message.content
120
 
121
  def evaluate_response(question, response_text):
122
+ """Evaluate interview response"""
123
  prompt = f"""Evaluate this interview response on:
124
  1. Clarity (1-5)
125
  2. Confidence (1-5)
126
  3. Relevance (1-5)
127
  4. Suggested improvements
128
+
129
  Question: {question}
130
  Response: {response_text}"""
131
+
132
  evaluation = groq_client.chat.completions.create(
133
  messages=[{"role": "user", "content": prompt}],
134
  model="llama3-70b-8192",
135
  temperature=0.2,
136
  )
137
+ return evaluation.choices[0].message.content
138
 
139
+ def speak_feedback(text):
140
+ """Convert text feedback to speech"""
141
  try:
142
  if not text.strip():
143
+ raise ValueError("Empty feedback text")
144
+
145
+ audio = elevenlabs_client.generate(
146
+ text=text,
147
+ voice="Rachel",
148
+ model="eleven_monolingual_v2"
149
+ )
150
+
151
+ # Create a temporary file
152
  with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as tmp:
153
+ for chunk in audio:
154
+ if chunk:
155
+ tmp.write(chunk)
156
+ tmp_path = tmp.name
157
+
158
+ return tmp_path
159
  except Exception as e:
160
+ gr.Warning(f"TTS Error: {str(e)}")
161
  return None
162
 
163
+ # Gradio Interface
164
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
165
+ gr.Markdown("## Ready Set Hire")
166
+ gr.Markdown("Upload your resume and practice general interview questions with AI feedback")
167
+
168
  with gr.Tab("📄 Resume Analysis"):
169
  with gr.Row():
170
  with gr.Column():
171
+ resume_upload = gr.File(
172
+ label="Upload Resume (PDF)",
173
+ file_types=[".pdf"],
174
+ elem_id="resume-upload"
175
+ )
176
+ process_btn = gr.Button("Analyze Resume", variant="primary")
177
  with gr.Column():
178
+ resume_summary = gr.Textbox(label="Resume Summary", lines=10)
179
+ ats_score = gr.Textbox(
180
+ label="ATS Compatibility Score",
181
+ interactive=False,
182
+ elem_classes=["ats-score"]
183
+ )
184
+ process_btn.click(
185
+ fn=process_resume,
186
+ inputs=resume_upload,
187
+ outputs=[resume_summary, ats_score]
188
+ )
189
+
190
  with gr.Tab("🎤 Mock Interview"):
191
  with gr.Row():
192
  with gr.Column():
193
+ audio_input = gr.Audio(sources=["microphone"], type="filepath")
194
+ transcribe_btn = gr.Button("Transcribe Response", variant="primary")
195
+ question_box = gr.Textbox(label="Current Question")
196
+ generate_btn = gr.Button("Generate New Question")
 
 
 
 
 
 
 
197
  with gr.Column():
198
+ transcription = gr.Textbox(label="Your Response")
199
+ evaluation = gr.Textbox(label="Feedback", lines=8)
200
+ feedback_audio = gr.Audio(label="Feedback Audio", visible=False)
201
+
202
+ # Event handlers
203
+ transcribe_btn.click(
204
+ fn=transcribe_audio,
205
+ inputs=audio_input,
206
+ outputs=transcription
207
+ )
208
+
209
+ generate_btn.click(
210
+ fn=generate_question,
211
+ inputs=resume_summary,
212
+ outputs=question_box
213
+ )
214
+
215
+ gr.on(
216
+ triggers=[transcription.change],
217
+ fn=evaluate_response,
218
+ inputs=[question_box, transcription],
219
+ outputs=evaluation
220
+ )
221
 
 
 
 
 
 
222
 
223
  if __name__ == "__main__":
224
  demo.launch()