mkshari commited on
Commit
1d56429
Β·
verified Β·
1 Parent(s): 30b9fd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -24
app.py CHANGED
@@ -12,35 +12,42 @@ import torch
12
  from transformers import pipeline
13
 
14
  # Initialize Models once at startup
15
- print("πŸš€ Initializing SETHU AI Intelligence Engine...")
16
  try:
17
- nlp = spacy.load("en_core_web_sm")
18
- except:
19
- print("πŸ“₯ Downloading spaCy model...")
20
- os.system(f"{sys.executable} -m spacy download en_core_web_sm")
21
- nlp = spacy.load("en_core_web_sm")
22
-
23
- # 1. Semantic Engine (Embeddings)
24
- print("🧠 Loading Semantic Matcher (all-MiniLM-L6-v2)...")
25
- model = SentenceTransformer('all-MiniLM-L6-v2')
26
 
27
- # 2. Strategic Reasoner (Analysis)
28
- print("πŸ€– Loading Strategic Reasoner (FLAN-T5-Base)...")
29
  try:
30
- # Upgraded from 'small' to 'base' for better logic
31
- llm_reasoner = pipeline("text2text-generation", model="google/flan-t5-base", device_map="auto")
32
  except Exception as e:
33
  print(f"⚠️ Reasoning LLM failed: {e}")
34
  llm_reasoner = None
35
 
36
- # 3. Interview Coach (Conversational)
37
- print("🎀 Loading Interview Coach (LaMini-GPT-124M)...")
38
  try:
39
- llm_coach = pipeline("text-generation", model="MBZUAI/LaMini-GPT-124M", device_map="auto")
40
  except Exception as e:
41
  print(f"⚠️ Coach LLM failed: {e}")
42
  llm_coach = None
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  TECH_SKILLS = [
45
  "python", "javascript", "react", "fastapi", "aws", "docker", "kubernetes", "sql",
46
  "git", "ml", "nlp", "tensorflow", "pytorch", "java", "golang", "postgresql",
@@ -194,8 +201,12 @@ def main_process(resume_file, jd_text, progress=gr.Progress()):
194
  gap_skills = sorted(list(j_skills - r_skills))
195
  print(f" - Identified {len(match_skills)} matches and {len(gap_skills)} gaps.")
196
 
197
- progress(0.4, desc="πŸ“ Printing Semantic Distance...")
198
  print("Stage 3: Embedding Computation...")
 
 
 
 
199
  emb1 = model.encode(resume_text, convert_to_tensor=True)
200
  emb2 = model.encode(jd_text, convert_to_tensor=True)
201
  score = round(util.pytorch_cos_sim(emb1, emb2).item() * 100, 1)
@@ -203,28 +214,37 @@ def main_process(resume_file, jd_text, progress=gr.Progress()):
203
 
204
  progress(0.7, desc="πŸ€– Syncing Neural Consensus...")
205
  print("Stage 4: LLM Ensemble Reasoning...")
206
- ensemble_insight = ""
 
207
  # Model 1: Reasoner (FLAN-T5)
208
  if llm_reasoner:
209
  try:
210
  print(" - Querying Reasoner (FLAN-T5)...")
211
  prompt_t5 = f"Analyze resume relevance. Score: {score}%. Gaps: {', '.join(gap_skills)}. Summarize fit."
212
  t5_out = llm_reasoner(prompt_t5, max_length=100)[0]['generated_text']
213
- ensemble_insight += f"**Reasoner**: {t5_out}\n\n"
214
  except Exception as e:
215
  print(f" - T5 Inference Error: {e}")
 
 
 
216
 
217
  # Model 2: Coach (LaMini)
218
  if llm_coach:
219
  try:
220
  print(" - Querying Coach (LaMini)...")
221
- prompt_lamini = f"As a career coach, give advice for this profile matching a role at {score}% match."
222
- lamini_out = llm_coach(prompt_lamini, max_length=100)[0]['generated_text']
223
- ensemble_insight += f"**Coach**: {lamini_out}"
 
 
224
  except Exception as e:
225
  print(f" - LaMini Inference Error: {e}")
 
 
 
226
 
227
- ai_analysis = ensemble_insight if ensemble_insight else f"Neural consensus reached alignment at {score}%."
228
 
229
  # Heuristic metrics
230
  print("Stage 5: Calculating Dashboard Metrics...")
 
12
  from transformers import pipeline
13
 
14
  # Initialize Models once at startup
15
+ print("πŸš€ [1/4] Initializing Semantic Matcher (all-MiniLM-L6-v2)...")
16
  try:
17
+ model = SentenceTransformer('all-MiniLM-L6-v2', device='cpu')
18
+ except Exception as e:
19
+ print(f"❌ Semantic Matcher failed: {e}")
20
+ model = None
 
 
 
 
 
21
 
22
+ print("πŸš€ [2/4] Initializing Reasoning LLM (FLAN-T5-Base)...")
 
23
  try:
24
+ # Use CPU by default for stability on Windows unless explicitly requested
25
+ llm_reasoner = pipeline("text2text-generation", model="google/flan-t5-base", device=-1)
26
  except Exception as e:
27
  print(f"⚠️ Reasoning LLM failed: {e}")
28
  llm_reasoner = None
29
 
30
+ print("πŸš€ [3/4] Initializing Interview Coach (LaMini-GPT)...")
 
31
  try:
32
+ llm_coach = pipeline("text-generation", model="MBZUAI/LaMini-GPT-124M", device=-1)
33
  except Exception as e:
34
  print(f"⚠️ Coach LLM failed: {e}")
35
  llm_coach = None
36
 
37
+ print("πŸš€ [4/4] Loading NLP Entities (spaCy)...")
38
+ try:
39
+ nlp = spacy.load("en_core_web_sm")
40
+ except:
41
+ print("πŸ“₯ Downloading spaCy model...")
42
+ os.system(f"{sys.executable} -m spacy download en_core_web_sm")
43
+ try:
44
+ nlp = spacy.load("en_core_web_sm")
45
+ except:
46
+ nlp = None
47
+ print("❌ NLP Load failed completely.")
48
+
49
+ print("✨ Intelligence Engine Ready.")
50
+
51
  TECH_SKILLS = [
52
  "python", "javascript", "react", "fastapi", "aws", "docker", "kubernetes", "sql",
53
  "git", "ml", "nlp", "tensorflow", "pytorch", "java", "golang", "postgresql",
 
201
  gap_skills = sorted(list(j_skills - r_skills))
202
  print(f" - Identified {len(match_skills)} matches and {len(gap_skills)} gaps.")
203
 
204
+ progress(0.4, desc="πŸ“ Computing Semantic Distance...")
205
  print("Stage 3: Embedding Computation...")
206
+ if not model:
207
+ print("❌ Error: Semantic model not loaded.")
208
+ return ["Model Load Error", "N/A", None, None, "The semantic matching engine failed to initialize. Try restarting.", "N/A", [], gr.update(visible=False)]
209
+
210
  emb1 = model.encode(resume_text, convert_to_tensor=True)
211
  emb2 = model.encode(jd_text, convert_to_tensor=True)
212
  score = round(util.pytorch_cos_sim(emb1, emb2).item() * 100, 1)
 
214
 
215
  progress(0.7, desc="πŸ€– Syncing Neural Consensus...")
216
  print("Stage 4: LLM Ensemble Reasoning...")
217
+ ensemble_insight = []
218
+
219
  # Model 1: Reasoner (FLAN-T5)
220
  if llm_reasoner:
221
  try:
222
  print(" - Querying Reasoner (FLAN-T5)...")
223
  prompt_t5 = f"Analyze resume relevance. Score: {score}%. Gaps: {', '.join(gap_skills)}. Summarize fit."
224
  t5_out = llm_reasoner(prompt_t5, max_length=100)[0]['generated_text']
225
+ ensemble_insight.append(f"**Reasoner**: {t5_out}")
226
  except Exception as e:
227
  print(f" - T5 Inference Error: {e}")
228
+ else:
229
+ print(" - Reasoner LLM not available. Skipping.")
230
+ ensemble_insight.append("**Reasoner**: Neural reasoning offline. Using heuristic fallback.")
231
 
232
  # Model 2: Coach (LaMini)
233
  if llm_coach:
234
  try:
235
  print(" - Querying Coach (LaMini)...")
236
+ prompt_lamini = f"Career coach advice for {score}% match."
237
+ lamini_out = llm_coach(prompt_lamini, max_length=100, truncation=True)[0]['generated_text']
238
+ # Clean up if output contains prompt
239
+ clean_lamini = lamini_out.split("match.")[-1].strip() if "match." in lamini_out else lamini_out
240
+ ensemble_insight.append(f"**Coach**: {clean_lamini}")
241
  except Exception as e:
242
  print(f" - LaMini Inference Error: {e}")
243
+ else:
244
+ print(" - Coach LLM not available. Skipping.")
245
+ ensemble_insight.append("**Coach**: Coaching strategy offline.")
246
 
247
+ ai_analysis = "\n\n".join(ensemble_insight) if ensemble_insight else f"Neural consensus reached alignment at {score}%."
248
 
249
  # Heuristic metrics
250
  print("Stage 5: Calculating Dashboard Metrics...")