Amal Nimmy Lal commited on
Commit
782bef2
·
2 Parent(s): b1f94b3 53157e1
Files changed (2) hide show
  1. .env.example +1 -0
  2. core/input_comp_gen.py +26 -25
.env.example CHANGED
@@ -1 +1,2 @@
1
  GEMINI_API_KEY=your_api_key_here
 
 
1
  GEMINI_API_KEY=your_api_key_here
2
+ GROQ_API_KEY=your_api_key_here
core/input_comp_gen.py CHANGED
@@ -4,13 +4,7 @@ import os
4
  import json # Added import for JSON parsing
5
  from dotenv import load_dotenv
6
  from openai import OpenAI
7
-
8
- load_dotenv()
9
- api_key = os.getenv("GEMINI_API_KEY")
10
- client = OpenAI(
11
- api_key=api_key,
12
- base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
13
- )
14
 
15
  def read_resume(file):
16
  reader = PyPDF2.PdfReader(file)
@@ -32,30 +26,37 @@ def read_job_description_txt(file):
32
  return file.read().decode("utf-8").strip()
33
 
34
  def generate_question(resume_text, job_desc_text, job_role):
35
- prompt = f"""
36
- You are an interview coach. Based on this resume and job description, generate exactly 5 interview questions for the role "{job_role}".
37
-
38
- Resume:
 
 
 
 
 
 
 
 
 
39
  {resume_text}
40
 
41
- Job Description:
42
  {job_desc_text}
43
 
44
- Generate 5 relevant interview questions. Include follow-up prompts in parentheses if needed.
45
- You must respond with ONLY a valid JSON array of strings. No explanations, no markdown, just the JSON array.
 
 
 
46
 
47
- Example format: ["Question 1 here", "Question 2 here", "Question 3 here", "Question 4 here", "Question 5 here"]
48
  """
49
- response = client.chat.completions.create(
50
- model="gemini-2.0-flash-lite",
51
- messages=[
52
- {"role": "system", "content": "You are a JSON generator. You only respond with valid JSON arrays. Never include explanations or markdown formatting."},
53
- {"role": "user", "content": prompt}
54
- ],
55
- temperature=0.7,
56
-
57
- )
58
- text = response.choices[0].message.content.strip()
59
  #st.write("Raw API Response:", text) # Debug line
60
 
61
  try:
 
4
  import json # Added import for JSON parsing
5
  from dotenv import load_dotenv
6
  from openai import OpenAI
7
+ from model import generate_response
 
 
 
 
 
 
8
 
9
  def read_resume(file):
10
  reader = PyPDF2.PdfReader(file)
 
26
  return file.read().decode("utf-8").strip()
27
 
28
  def generate_question(resume_text, job_desc_text, job_role):
29
+ system_prompt = "You are an experienced technical interviewer and JSON generator. You create professional interview questions and respond only with valid JSON arrays. Never include explanations, markdown formatting, or any text outside the JSON array."
30
+
31
+ user_prompt = f"""
32
+ You are conducting a technical interview for the "{job_role}" position.
33
+
34
+ Based on the candidate's resume and job requirements, generate exactly 5 technical interview questions that:
35
+ 1. Test relevant technical skills mentioned in the job description
36
+ 2. Assess the candidate's experience from their resume
37
+ 3. Include appropriate follow-up questions to dive deeper
38
+ 4. Progress from foundational to advanced concepts
39
+ 5. Are specific to the role and industry
40
+
41
+ Resume Background:
42
  {resume_text}
43
 
44
+ Job Requirements:
45
  {job_desc_text}
46
 
47
+ Generate 5 technical interview questions with follow-up probes. Format each question to include the main question and potential follow-up in parentheses.
48
+
49
+ Example format for a software role:
50
+ - "Explain how you implemented [specific technology from resume]. (Follow-up: What challenges did you face and how did you optimize performance?)"
51
+ - "The job requires [specific requirement]. Walk me through how you would approach this. (Follow-up: How would you handle scalability concerns?)"
52
 
53
+ You must respond with ONLY a valid JSON array of strings. No explanations, no markdown, just the JSON array.
54
  """
55
+ # Use generate_response instead of direct client call
56
+ response = generate_response(system_prompt, user_prompt, temp=0.7)
57
+
58
+
59
+ text = response.strip()
 
 
 
 
 
60
  #st.write("Raw API Response:", text) # Debug line
61
 
62
  try: