LovnishVerma commited on
Commit
1f85315
·
verified ·
1 Parent(s): ecceb5d

Update parser_logic.py

Browse files
Files changed (1) hide show
  1. parser_logic.py +54 -27
parser_logic.py CHANGED
@@ -28,17 +28,51 @@ def extract_text_from_stream(file_bytes: bytes) -> str:
28
  raise ValueError("Failed to extract text from PDF.")
29
  return text
30
 
31
- def analyze_resume(resume_text: str, job_description: str = None) -> dict:
32
- """
33
- Analyzes resume. If JD is provided, performs matching.
34
  """
35
-
36
- # Base prompt (Extraction only)
37
- base_instructions = """
38
- Extract structured data from the resume.
39
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # Extended prompt (Matching)
 
 
 
 
 
42
  if job_description:
43
  prompt = f"""
44
  Act as a strict AI Recruiter. Compare the Resume against the Job Description.
@@ -68,7 +102,6 @@ def analyze_resume(resume_text: str, job_description: str = None) -> dict:
68
  {resume_text[:10000]}
69
  """
70
  else:
71
- # Fallback to simple extraction if no JD
72
  prompt = f"""
73
  Extract structured data from the resume. Return JSON:
74
  {{
@@ -85,21 +118,15 @@ def analyze_resume(resume_text: str, job_description: str = None) -> dict:
85
  {resume_text[:10000]}
86
  """
87
 
88
- # Model Strategy: Try Flash first, fallback to Pro
89
- models = ['gemini-1.5-flash', 'gemini-pro']
90
-
91
- for model_name in models:
92
- try:
93
- model = genai.GenerativeModel(model_name)
94
- response = model.generate_content(prompt)
95
-
96
- # Clean JSON
97
- raw = response.text.strip()
98
- clean_json = re.sub(r'```json\s*|```', '', raw, flags=re.MULTILINE).strip()
99
- return json.loads(clean_json)
100
-
101
- except Exception as e:
102
- logger.warning(f"Model {model_name} failed: {e}")
103
- if model_name == models[-1]:
104
- return {"error": f"Analysis failed. Detail: {str(e)}"}
105
- continue
 
28
  raise ValueError("Failed to extract text from PDF.")
29
  return text
30
 
31
+ def get_available_model_name():
 
 
32
  """
33
+ Dynamically finds a working model from the user's account.
 
 
 
34
  """
35
+ try:
36
+ available_models = []
37
+ for m in genai.list_models():
38
+ if 'generateContent' in m.supported_generation_methods:
39
+ available_models.append(m.name)
40
+
41
+ if not available_models:
42
+ logger.error("No models found.")
43
+ return None
44
+
45
+ # Priority list: Try to find these specific powerful models first
46
+ preferred_order = [
47
+ "models/gemini-1.5-flash",
48
+ "models/gemini-1.5-pro",
49
+ "models/gemini-pro",
50
+ "models/gemini-1.0-pro"
51
+ ]
52
+
53
+ # 1. Check if any preferred model is in the available list
54
+ for preferred in preferred_order:
55
+ if preferred in available_models:
56
+ logger.info(f"Selected Preferred Model: {preferred}")
57
+ return preferred
58
+
59
+ # 2. If none of the preferred ones exist, take the first available one
60
+ fallback = available_models[0]
61
+ logger.warning(f"Preferred models missing. Falling back to: {fallback}")
62
+ return fallback
63
+
64
+ except Exception as e:
65
+ logger.error(f"Error listing models: {e}")
66
+ return None
67
+
68
+ def analyze_resume(resume_text: str, job_description: str = None) -> dict:
69
 
70
+ # 1. FIND A WORKING MODEL (The Critical Fix)
71
+ model_name = get_available_model_name()
72
+ if not model_name:
73
+ return {"error": "CRITICAL: No available AI models found for this API Key."}
74
+
75
+ # 2. CONSTRUCT PROMPT
76
  if job_description:
77
  prompt = f"""
78
  Act as a strict AI Recruiter. Compare the Resume against the Job Description.
 
102
  {resume_text[:10000]}
103
  """
104
  else:
 
105
  prompt = f"""
106
  Extract structured data from the resume. Return JSON:
107
  {{
 
118
  {resume_text[:10000]}
119
  """
120
 
121
+ # 3. GENERATE CONTENT
122
+ try:
123
+ model = genai.GenerativeModel(model_name)
124
+ response = model.generate_content(prompt)
125
+
126
+ raw = response.text.strip()
127
+ clean_json = re.sub(r'```json\s*|```', '', raw, flags=re.MULTILINE).strip()
128
+ return json.loads(clean_json)
129
+
130
+ except Exception as e:
131
+ logger.error(f"Analysis failed with model {model_name}: {e}")
132
+ return {"error": f"Analysis failed using {model_name}. Detail: {str(e)}"}