File size: 7,522 Bytes
1d7561a
614bfad
 
07c2bee
1d7561a
bfb7c63
614bfad
bfb7c63
 
07c2bee
 
 
 
c0f64e6
07c2bee
 
c0f64e6
bfb7c63
07c2bee
1d7561a
07c2bee
1d7561a
07c2bee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7c63
07c2bee
1d7561a
07c2bee
bfb7c63
 
07c2bee
1d7561a
bfb7c63
1d7561a
bfb7c63
07c2bee
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7c63
c0f64e6
 
 
614bfad
bfb7c63
07c2bee
bfb7c63
07c2bee
1d7561a
07c2bee
1d7561a
07c2bee
bfb7c63
07c2bee
bfb7c63
 
c0f64e6
 
614bfad
c0f64e6
 
614bfad
1d7561a
bfb7c63
 
 
c0f64e6
614bfad
c0f64e6
bfb7c63
614bfad
c0f64e6
bfb7c63
614bfad
c0f64e6
614bfad
bfb7c63
 
07c2bee
 
 
 
bfb7c63
 
 
 
 
 
 
 
 
07c2bee
bfb7c63
 
 
 
07c2bee
bfb7c63
 
 
 
c0f64e6
bfb7c63
07c2bee
c0f64e6
 
bfb7c63
 
c0f64e6
bfb7c63
1d7561a
c0f64e6
bfb7c63
07c2bee
 
 
 
 
bfb7c63
07c2bee
bfb7c63
07c2bee
1d7561a
 
07c2bee
 
 
 
 
 
 
c0f64e6
 
07c2bee
c0f64e6
 
bfb7c63
 
 
 
 
 
 
07c2bee
bfb7c63
 
 
07c2bee
 
 
 
 
 
c0f64e6
614bfad
c0f64e6
bfb7c63
 
 
 
 
07c2bee
1d7561a
07c2bee
 
 
 
1d7561a
 
 
 
07c2bee
 
1d7561a
bfb7c63
07c2bee
bfb7c63
 
 
07c2bee
bfb7c63
07c2bee
1d7561a
bfb7c63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07c2bee
 
 
 
 
 
bfb7c63
07c2bee
bfb7c63
07c2bee
 
bfb7c63
07c2bee
bfb7c63
 
 
07c2bee
 
bfb7c63
 
07c2bee
bfb7c63
 
 
 
 
614bfad
07c2bee
614bfad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
# app.py - Fixed version with proper adapter loading
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import PeftModel, PeftConfig
import os

print("πŸš€ ATS Resume Optimizer - Starting...")

# Check adapter files
print("\nπŸ“‹ Files in current directory:")
for f in os.listdir("."):
    print(f"  - {f}")

# Load model with proper config
print("\nπŸ“₯ Loading model configuration...")

try:
    # Load PEFT config first to understand the adapter structure
    peft_config = PeftConfig.from_pretrained(".")
    print("βœ… Adapter config loaded")
    
    # Load tokenizer
    print("\nπŸ“₯ Loading tokenizer...")
    tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
    tokenizer.pad_token = tokenizer.eos_token
    print("βœ… Tokenizer loaded")
    
    # Load base model
    print("\nπŸ“₯ Loading base model (this takes 2-3 minutes)...")
    model = AutoModelForCausalLM.from_pretrained(
        peft_config.base_model_name_or_path,
        torch_dtype=torch.float16,
        device_map="auto",
        low_cpu_mem_usage=True,
    )
    print("βœ… Base model loaded")
    
    # Load adapters with proper config
    print("\nπŸ“₯ Loading your fine-tuned adapters...")
    model = PeftModel.from_pretrained(
        model,
        ".",
        config=peft_config,
    )
    model.eval()
    
    print("βœ… Fine-tuned model loaded successfully!")
    MODEL_LOADED = True
    
except Exception as e:
    print(f"❌ Error loading adapters: {e}")
    print("\n⚠️ Falling back to base model only")
    
    # Fallback to base model
    tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
    tokenizer.pad_token = tokenizer.eos_token
    
    model = AutoModelForCausalLM.from_pretrained(
        "mistralai/Mistral-7B-Instruct-v0.2",
        torch_dtype=torch.float16,
        device_map="auto",
        low_cpu_mem_usage=True,
    )
    
    MODEL_LOADED = False

def analyze_resume(resume_text, job_description):
    """Generate ATS analysis"""
    
    if not MODEL_LOADED:
        return """⚠️ **Using Base Model Only**
        
The fine-tuned adapters couldn't be loaded. The model will still work but responses may be less specific to ATS optimization.

To see the full fine-tuned version, please contact the developer.

---

**Analyzing with base Mistral-7B...**
"""
    
    if not resume_text or len(resume_text.strip()) < 50:
        return "⚠️ Please enter a resume (at least 50 characters)"
    
    if not job_description or len(job_description.strip()) < 30:
        return "⚠️ Please enter a job description (at least 30 characters)"
    
    # Truncate to fit context
    resume_text = resume_text[:1500]
    job_description = job_description[:800]
    
    prompt = f"""<s>[INST] Analyze this resume for ATS compatibility with the job description. Provide an ATS score, identify missing keywords, and suggest improvements.

RESUME:
{resume_text}

JOB DESCRIPTION:
{job_description} [/INST]

"""
    
    try:
        inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
        
        # Move to device
        if torch.cuda.is_available():
            inputs = {k: v.cuda() for k, v in inputs.items()}
        
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_new_tokens=800,
                temperature=0.7,
                top_p=0.9,
                do_sample=True,
                pad_token_id=tokenizer.eos_token_id,
                eos_token_id=tokenizer.eos_token_id,
            )
        
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # Extract response
        if "[/INST]" in response:
            response = response.split("[/INST]")[1].strip()
        
        return response
    
    except Exception as e:
        return f"❌ Error: {str(e)}\n\nPlease try with shorter text."

# Sample data
SAMPLE_RESUME = """Sarah Johnson
Email: sarah.j@email.com | Phone: (555) 234-5678

PROFESSIONAL SUMMARY
Software Engineer with 3+ years of experience in full-stack development.

TECHNICAL SKILLS
Languages: Python, JavaScript, TypeScript
Frontend: React, HTML5, CSS3
Backend: Node.js, Express
Databases: PostgreSQL, MongoDB
Tools: Git, Docker, AWS

EXPERIENCE

Software Engineer | TechCorp | 2021 - Present
β€’ Built web applications serving 100K+ users
β€’ Improved performance by 40%
β€’ Implemented CI/CD pipelines
β€’ Collaborated in Agile teams

Junior Developer | StartupXYZ | 2020 - 2021
β€’ Developed REST APIs
β€’ Created responsive UIs
β€’ Fixed bugs and added features

EDUCATION
BS Computer Science | State University | 2020
"""

SAMPLE_JOB = """Position: Senior Full Stack Developer

Required Skills:
β€’ React, TypeScript, JavaScript
β€’ Node.js, Express
β€’ MongoDB or PostgreSQL
β€’ REST API design
β€’ Git, Docker, AWS
β€’ Agile methodologies

Experience: 3-5 years

Responsibilities:
β€’ Design and develop web applications
β€’ Write clean, maintainable code
β€’ Code reviews and mentoring
β€’ Architecture decisions
"""

# Gradio interface
with gr.Blocks(title="ATS Resume Optimizer", theme=gr.themes.Soft()) as demo:
    
    gr.Markdown("""
    # 🎯 ATS Resume Optimizer
    
    ### AI-Powered Resume Analysis
    
    Get instant feedback on your resume:
    - βœ… **ATS Compatibility Score**
    - πŸ” **Missing Keywords**
    - πŸ’‘ **Optimization Suggestions**
    """)
    
    if not MODEL_LOADED:
        gr.Markdown("""
        > ⚠️ **Note:** Currently running with base model. Fine-tuned adapters couldn't be loaded.
        > The tool will still provide useful analysis but may be less specific.
        """)
    
    gr.Markdown("---")
    
    with gr.Row():
        with gr.Column():
            gr.Markdown("### πŸ“„ Your Resume")
            resume_input = gr.Textbox(
                label="Paste Resume",
                placeholder="Copy and paste your resume...",
                lines=12,
                value=SAMPLE_RESUME
            )
            
        with gr.Column():
            gr.Markdown("### πŸ’Ό Job Description")
            job_input = gr.Textbox(
                label="Paste Job Description",
                placeholder="Copy and paste job description...",
                lines=12,
                value=SAMPLE_JOB
            )
    
    analyze_btn = gr.Button("πŸš€ Analyze Resume", variant="primary", size="lg")
    
    gr.Markdown("### πŸ“Š Analysis Results")
    output = gr.Textbox(
        label="ATS Analysis",
        lines=15,
        show_copy_button=True
    )
    
    gr.Markdown("""
    ---
    ### πŸ’‘ How to Use
    
    1. **Paste your resume** in the left box (or try the sample)
    2. **Paste job description** in the right box
    3. Click **"Analyze Resume"**
    4. Wait 1-2 minutes for analysis
    
    ### πŸ”¬ About This Tool
    
    Built with Mistral-7B language model for intelligent resume analysis.
    Identifies missing keywords and provides actionable suggestions.
    
    **First analysis takes longer** as the model loads into memory.
    
    ---
    
    πŸ’» **Tech Stack:** PyTorch β€’ Transformers β€’ PEFT β€’ Gradio  
    πŸ”— **Links:** [GitHub](#) | [LinkedIn](#) | [Portfolio](#)
    """)
    
    # Event
    analyze_btn.click(
        fn=analyze_resume,
        inputs=[resume_input, job_input],
        outputs=output
    )

print("\nπŸš€ Launching Gradio interface...")
demo.launch()