rohit8 commited on
Commit
07c2bee
Β·
verified Β·
1 Parent(s): 1d7561a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -70
app.py CHANGED
@@ -1,73 +1,83 @@
1
  # app.py - Fixed version with proper adapter loading
2
  import gradio as gr
3
  import torch
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from peft import PeftModel, PeftConfig
6
  import os
7
 
8
  print("πŸš€ ATS Resume Optimizer - Starting...")
9
 
10
- # Load model
11
- print("\nπŸ“₯ Loading tokenizer...")
12
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
13
- tokenizer.pad_token = tokenizer.eos_token
14
 
15
- print("πŸ“₯ Loading base model (this takes 2-3 minutes)...")
16
- base_model = AutoModelForCausalLM.from_pretrained(
17
- "mistralai/Mistral-7B-Instruct-v0.2",
18
- torch_dtype=torch.float16,
19
- device_map="auto",
20
- low_cpu_mem_usage=True,
21
- )
22
 
23
- print("πŸ“₯ Loading fine-tuned adapters...")
24
  try:
25
- # Try loading with PeftConfig first
26
  peft_config = PeftConfig.from_pretrained(".")
27
- print(f"βœ… Adapter config loaded: {peft_config.peft_type}")
28
 
29
- # Load the model with adapters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  model = PeftModel.from_pretrained(
31
- base_model,
32
  ".",
33
- is_trainable=False,
34
- config=peft_config
35
  )
36
-
37
- # Merge adapters with base model for faster inference
38
- print("πŸ”„ Merging adapters with base model...")
39
- model = model.merge_and_unload()
40
-
41
  model.eval()
 
42
  print("βœ… Fine-tuned model loaded successfully!")
43
  MODEL_LOADED = True
44
 
45
  except Exception as e:
46
- print(f"⚠️ Could not load fine-tuned adapters: {e}")
47
- print("πŸ“ This means the demo will use the base Mistral model")
48
- print(" (not your fine-tuned version)")
49
- model = base_model
50
- model.eval()
 
 
 
 
 
 
 
 
 
51
  MODEL_LOADED = False
52
 
53
  def analyze_resume(resume_text, job_description):
54
  """Generate ATS analysis"""
55
 
56
  if not MODEL_LOADED:
57
- return """⚠️ **Fine-Tuned Model Not Available**
58
 
59
- The adapter files are not loading correctly. The demo is currently using
60
- the base Mistral-7B model (not fine-tuned for ATS analysis).
61
 
62
- **For the developer:**
63
- Check that adapter files are uploaded correctly:
64
- - adapter_config.json
65
- - adapter_model.safetensors (or .bin)
66
 
67
- The adapters were saved from your training but aren't loading properly
68
- due to a path mismatch in the model structure.
69
 
70
- **Temporary workaround:** Use the Colab Gradio demo instead, which works perfectly.
71
  """
72
 
73
  if not resume_text or len(resume_text.strip()) < 50:
@@ -92,7 +102,10 @@ JOB DESCRIPTION:
92
 
93
  try:
94
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
95
- inputs = {k: v.to(model.device) for k, v in inputs.items()}
 
 
 
96
 
97
  with torch.no_grad():
98
  outputs = model.generate(
@@ -102,17 +115,19 @@ JOB DESCRIPTION:
102
  top_p=0.9,
103
  do_sample=True,
104
  pad_token_id=tokenizer.eos_token_id,
 
105
  )
106
 
107
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
108
 
 
109
  if "[/INST]" in response:
110
  response = response.split("[/INST]")[1].strip()
111
 
112
  return response
113
 
114
  except Exception as e:
115
- return f"❌ Error: {str(e)}\n\nTry with a shorter resume or job description."
116
 
117
  # Sample data
118
  SAMPLE_RESUME = """Sarah Johnson
@@ -122,17 +137,27 @@ PROFESSIONAL SUMMARY
122
  Software Engineer with 3+ years of experience in full-stack development.
123
 
124
  TECHNICAL SKILLS
125
- Python, JavaScript, TypeScript, React, Node.js, Git, Docker
 
 
 
 
126
 
127
- PROFESSIONAL EXPERIENCE
128
 
129
- Software Engineer | TechCorp Inc. | 2021 - Present
130
  β€’ Built web applications serving 100K+ users
131
  β€’ Improved performance by 40%
132
- β€’ Worked in Agile teams
 
 
 
 
 
 
133
 
134
  EDUCATION
135
- BS in Computer Science | State University | 2020
136
  """
137
 
138
  SAMPLE_JOB = """Position: Senior Full Stack Developer
@@ -142,11 +167,16 @@ Required Skills:
142
  β€’ Node.js, Express
143
  β€’ MongoDB or PostgreSQL
144
  β€’ REST API design
145
- β€’ Git, Docker
146
- β€’ AWS or cloud platforms
147
  β€’ Agile methodologies
148
 
149
  Experience: 3-5 years
 
 
 
 
 
 
150
  """
151
 
152
  # Gradio interface
@@ -155,31 +185,27 @@ with gr.Blocks(title="ATS Resume Optimizer", theme=gr.themes.Soft()) as demo:
155
  gr.Markdown("""
156
  # 🎯 ATS Resume Optimizer
157
 
158
- ### Fine-Tuned Mistral-7B for Resume Analysis
159
 
 
 
 
 
160
  """)
161
 
162
  if not MODEL_LOADED:
163
  gr.Markdown("""
164
- ⚠️ **Note:** The fine-tuned adapter is not loading properly.
165
- This demo is using the base model. For the full fine-tuned experience,
166
- please use the [Colab demo](#).
167
  """)
168
 
169
- gr.Markdown("""
170
- Get instant feedback on your resume:
171
- - βœ… **ATS Compatibility Score**
172
- - πŸ” **Missing Keywords**
173
- - πŸ’‘ **Optimization Suggestions**
174
-
175
- ---
176
- """)
177
 
178
  with gr.Row():
179
  with gr.Column():
180
- gr.Markdown("### πŸ“„ Resume")
181
  resume_input = gr.Textbox(
182
- label="Paste Your Resume",
183
  placeholder="Copy and paste your resume...",
184
  lines=12,
185
  value=SAMPLE_RESUME
@@ -205,26 +231,32 @@ with gr.Blocks(title="ATS Resume Optimizer", theme=gr.themes.Soft()) as demo:
205
 
206
  gr.Markdown("""
207
  ---
208
- ### πŸ’‘ About This Project
 
 
 
 
 
209
 
210
- This tool was built by fine-tuning Mistral-7B using QLoRA on 2,000 resume-job pairs.
211
 
212
- **Technical Details:**
213
- - Model: Mistral-7B-Instruct-v0.2 + QLoRA adapters
214
- - Training: 250 steps, final loss 0.254
215
- - Framework: Transformers, PEFT, Gradio
216
 
217
- **Note:** First analysis may take 2-3 minutes as the model loads into memory.
218
 
219
  ---
220
 
221
- **Created by:** [Your Name] | [GitHub](#) | [LinkedIn](#)
 
222
  """)
223
 
 
224
  analyze_btn.click(
225
  fn=analyze_resume,
226
  inputs=[resume_input, job_input],
227
  outputs=output
228
  )
229
 
 
230
  demo.launch()
 
1
  # app.py - Fixed version with proper adapter loading
2
  import gradio as gr
3
  import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
  from peft import PeftModel, PeftConfig
6
  import os
7
 
8
  print("πŸš€ ATS Resume Optimizer - Starting...")
9
 
10
+ # Check adapter files
11
+ print("\nπŸ“‹ Files in current directory:")
12
+ for f in os.listdir("."):
13
+ print(f" - {f}")
14
 
15
+ # Load model with proper config
16
+ print("\nπŸ“₯ Loading model configuration...")
 
 
 
 
 
17
 
 
18
  try:
19
+ # Load PEFT config first to understand the adapter structure
20
  peft_config = PeftConfig.from_pretrained(".")
21
+ print("βœ… Adapter config loaded")
22
 
23
+ # Load tokenizer
24
+ print("\nπŸ“₯ Loading tokenizer...")
25
+ tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
26
+ tokenizer.pad_token = tokenizer.eos_token
27
+ print("βœ… Tokenizer loaded")
28
+
29
+ # Load base model
30
+ print("\nπŸ“₯ Loading base model (this takes 2-3 minutes)...")
31
+ model = AutoModelForCausalLM.from_pretrained(
32
+ peft_config.base_model_name_or_path,
33
+ torch_dtype=torch.float16,
34
+ device_map="auto",
35
+ low_cpu_mem_usage=True,
36
+ )
37
+ print("βœ… Base model loaded")
38
+
39
+ # Load adapters with proper config
40
+ print("\nπŸ“₯ Loading your fine-tuned adapters...")
41
  model = PeftModel.from_pretrained(
42
+ model,
43
  ".",
44
+ config=peft_config,
 
45
  )
 
 
 
 
 
46
  model.eval()
47
+
48
  print("βœ… Fine-tuned model loaded successfully!")
49
  MODEL_LOADED = True
50
 
51
  except Exception as e:
52
+ print(f"❌ Error loading adapters: {e}")
53
+ print("\n⚠️ Falling back to base model only")
54
+
55
+ # Fallback to base model
56
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
57
+ tokenizer.pad_token = tokenizer.eos_token
58
+
59
+ model = AutoModelForCausalLM.from_pretrained(
60
+ "mistralai/Mistral-7B-Instruct-v0.2",
61
+ torch_dtype=torch.float16,
62
+ device_map="auto",
63
+ low_cpu_mem_usage=True,
64
+ )
65
+
66
  MODEL_LOADED = False
67
 
68
  def analyze_resume(resume_text, job_description):
69
  """Generate ATS analysis"""
70
 
71
  if not MODEL_LOADED:
72
+ return """⚠️ **Using Base Model Only**
73
 
74
+ The fine-tuned adapters couldn't be loaded. The model will still work but responses may be less specific to ATS optimization.
 
75
 
76
+ To see the full fine-tuned version, please contact the developer.
 
 
 
77
 
78
+ ---
 
79
 
80
+ **Analyzing with base Mistral-7B...**
81
  """
82
 
83
  if not resume_text or len(resume_text.strip()) < 50:
 
102
 
103
  try:
104
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
105
+
106
+ # Move to device
107
+ if torch.cuda.is_available():
108
+ inputs = {k: v.cuda() for k, v in inputs.items()}
109
 
110
  with torch.no_grad():
111
  outputs = model.generate(
 
115
  top_p=0.9,
116
  do_sample=True,
117
  pad_token_id=tokenizer.eos_token_id,
118
+ eos_token_id=tokenizer.eos_token_id,
119
  )
120
 
121
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
122
 
123
+ # Extract response
124
  if "[/INST]" in response:
125
  response = response.split("[/INST]")[1].strip()
126
 
127
  return response
128
 
129
  except Exception as e:
130
+ return f"❌ Error: {str(e)}\n\nPlease try with shorter text."
131
 
132
  # Sample data
133
  SAMPLE_RESUME = """Sarah Johnson
 
137
  Software Engineer with 3+ years of experience in full-stack development.
138
 
139
  TECHNICAL SKILLS
140
+ Languages: Python, JavaScript, TypeScript
141
+ Frontend: React, HTML5, CSS3
142
+ Backend: Node.js, Express
143
+ Databases: PostgreSQL, MongoDB
144
+ Tools: Git, Docker, AWS
145
 
146
+ EXPERIENCE
147
 
148
+ Software Engineer | TechCorp | 2021 - Present
149
  β€’ Built web applications serving 100K+ users
150
  β€’ Improved performance by 40%
151
+ β€’ Implemented CI/CD pipelines
152
+ β€’ Collaborated in Agile teams
153
+
154
+ Junior Developer | StartupXYZ | 2020 - 2021
155
+ β€’ Developed REST APIs
156
+ β€’ Created responsive UIs
157
+ β€’ Fixed bugs and added features
158
 
159
  EDUCATION
160
+ BS Computer Science | State University | 2020
161
  """
162
 
163
  SAMPLE_JOB = """Position: Senior Full Stack Developer
 
167
  β€’ Node.js, Express
168
  β€’ MongoDB or PostgreSQL
169
  β€’ REST API design
170
+ β€’ Git, Docker, AWS
 
171
  β€’ Agile methodologies
172
 
173
  Experience: 3-5 years
174
+
175
+ Responsibilities:
176
+ β€’ Design and develop web applications
177
+ β€’ Write clean, maintainable code
178
+ β€’ Code reviews and mentoring
179
+ β€’ Architecture decisions
180
  """
181
 
182
  # Gradio interface
 
185
  gr.Markdown("""
186
  # 🎯 ATS Resume Optimizer
187
 
188
+ ### AI-Powered Resume Analysis
189
 
190
+ Get instant feedback on your resume:
191
+ - βœ… **ATS Compatibility Score**
192
+ - πŸ” **Missing Keywords**
193
+ - πŸ’‘ **Optimization Suggestions**
194
  """)
195
 
196
  if not MODEL_LOADED:
197
  gr.Markdown("""
198
+ > ⚠️ **Note:** Currently running with base model. Fine-tuned adapters couldn't be loaded.
199
+ > The tool will still provide useful analysis but may be less specific.
 
200
  """)
201
 
202
+ gr.Markdown("---")
 
 
 
 
 
 
 
203
 
204
  with gr.Row():
205
  with gr.Column():
206
+ gr.Markdown("### πŸ“„ Your Resume")
207
  resume_input = gr.Textbox(
208
+ label="Paste Resume",
209
  placeholder="Copy and paste your resume...",
210
  lines=12,
211
  value=SAMPLE_RESUME
 
231
 
232
  gr.Markdown("""
233
  ---
234
+ ### πŸ’‘ How to Use
235
+
236
+ 1. **Paste your resume** in the left box (or try the sample)
237
+ 2. **Paste job description** in the right box
238
+ 3. Click **"Analyze Resume"**
239
+ 4. Wait 1-2 minutes for analysis
240
 
241
+ ### πŸ”¬ About This Tool
242
 
243
+ Built with Mistral-7B language model for intelligent resume analysis.
244
+ Identifies missing keywords and provides actionable suggestions.
 
 
245
 
246
+ **First analysis takes longer** as the model loads into memory.
247
 
248
  ---
249
 
250
+ πŸ’» **Tech Stack:** PyTorch β€’ Transformers β€’ PEFT β€’ Gradio
251
+ πŸ”— **Links:** [GitHub](#) | [LinkedIn](#) | [Portfolio](#)
252
  """)
253
 
254
+ # Event
255
  analyze_btn.click(
256
  fn=analyze_resume,
257
  inputs=[resume_input, job_input],
258
  outputs=output
259
  )
260
 
261
+ print("\nπŸš€ Launching Gradio interface...")
262
  demo.launch()