ananttripathiak commited on
Commit
cf1be5e
·
verified ·
1 Parent(s): 08af218

Create main.py

Browse files
Files changed (1) hide show
  1. api/main.py +276 -0
api/main.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FastAPI Backend
3
+ Main API endpoints for Resume Analyzer.
4
+ """
5
+
6
+ from fastapi import FastAPI, File, UploadFile, Form, HTTPException
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+ from fastapi.responses import JSONResponse
9
+ from pydantic import BaseModel
10
+ from typing import Optional, List
11
+ import os
12
+ import tempfile
13
+ import logging
14
+
15
+ from src.resume_parser import ResumeParser
16
+ from src.nlp_processor import NLPProcessor
17
+ from src.job_matcher import JobMatcher
18
+ from src.ats_scorer import ATSScorer
19
+ from src.utils import setup_logging, log_analysis, validate_file_upload
20
+
21
+ # Setup logging
22
+ setup_logging()
23
+ logger = logging.getLogger(__name__)
24
+
25
+ # Initialize FastAPI app
26
+ app = FastAPI(
27
+ title="Resume Analyzer API",
28
+ description="AI-powered resume analysis and job matching API",
29
+ version="1.0.0"
30
+ )
31
+
32
+ # Add CORS middleware
33
+ app.add_middleware(
34
+ CORSMiddleware,
35
+ allow_origins=["*"],
36
+ allow_credentials=True,
37
+ allow_methods=["*"],
38
+ allow_headers=["*"],
39
+ )
40
+
41
+ # Initialize components
42
+ resume_parser = ResumeParser()
43
+ nlp_processor = NLPProcessor()
44
+ job_matcher = JobMatcher()
45
+ ats_scorer = ATSScorer()
46
+
47
+ logger.info("FastAPI application initialized")
48
+
49
+
50
+ # Pydantic models
51
+ class AnalysisResponse(BaseModel):
52
+ success: bool
53
+ data: dict
54
+ message: str
55
+
56
+
57
+ class HealthResponse(BaseModel):
58
+ status: str
59
+ version: str
60
+
61
+
62
+ @app.get("/", response_model=HealthResponse)
63
+ async def root():
64
+ """Health check endpoint."""
65
+ return {
66
+ "status": "healthy",
67
+ "version": "1.0.0"
68
+ }
69
+
70
+
71
+ @app.get("/health", response_model=HealthResponse)
72
+ async def health_check():
73
+ """Detailed health check."""
74
+ return {
75
+ "status": "healthy",
76
+ "version": "1.0.0"
77
+ }
78
+
79
+
80
+ @app.post("/api/analyze", response_model=AnalysisResponse)
81
+ async def analyze_resume(
82
+ file: UploadFile = File(...),
83
+ job_description: Optional[str] = Form(None)
84
+ ):
85
+ """
86
+ Analyze uploaded resume.
87
+
88
+ Args:
89
+ file: Resume file (PDF, DOCX, or TXT)
90
+ job_description: Optional job description for matching
91
+
92
+ Returns:
93
+ Analysis results including ATS score, skills, and matches
94
+ """
95
+ try:
96
+ # Save uploaded file temporarily
97
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
98
+ content = await file.read()
99
+ tmp_file.write(content)
100
+ tmp_file_path = tmp_file.name
101
+
102
+ # Validate file
103
+ validate_file_upload(tmp_file_path)
104
+
105
+ # Parse resume
106
+ logger.info(f"Parsing resume: {file.filename}")
107
+ parsed_data = resume_parser.parse_file(tmp_file_path)
108
+ resume_text = parsed_data['cleaned_text']
109
+
110
+ # Extract skills
111
+ logger.info("Extracting skills")
112
+ skills = nlp_processor.extract_skills(resume_text)
113
+
114
+ # Extract experience
115
+ experiences = nlp_processor.extract_experience(resume_text)
116
+ experience_years = nlp_processor.calculate_experience_years(resume_text)
117
+
118
+ # Extract education
119
+ education = nlp_processor.extract_education(resume_text)
120
+
121
+ # Calculate ATS score
122
+ logger.info("Calculating ATS score")
123
+ ats_score = ats_scorer.calculate_score(resume_text, job_description)
124
+
125
+ # Job matching (if job description provided)
126
+ job_matches = None
127
+ skill_gap = None
128
+
129
+ if job_description:
130
+ logger.info("Performing job matching")
131
+ similarity = job_matcher.calculate_similarity(resume_text, job_description)
132
+
133
+ # Extract skills from job description
134
+ jd_skills = nlp_processor.extract_skills(job_description)
135
+ all_resume_skills = [skill for skills_list in skills.values() for skill in skills_list]
136
+ all_jd_skills = [skill for skills_list in jd_skills.values() for skill in skills_list]
137
+
138
+ # Skill gap analysis
139
+ skill_gap = job_matcher.analyze_skill_match(all_resume_skills, all_jd_skills)
140
+
141
+ # Generate recommendations
142
+ recommendations = job_matcher.generate_recommendations(
143
+ resume_text, job_description, all_resume_skills
144
+ )
145
+
146
+ job_matches = {
147
+ 'similarity_score': similarity,
148
+ 'match_percentage': similarity * 100,
149
+ 'recommendations': recommendations
150
+ }
151
+
152
+ # Log analysis
153
+ log_analysis(
154
+ file.filename,
155
+ ats_score['overall_score'],
156
+ job_matches['similarity_score'] if job_matches else None
157
+ )
158
+
159
+ # Clean up temp file
160
+ os.unlink(tmp_file_path)
161
+
162
+ # Prepare response
163
+ response_data = {
164
+ 'file_name': file.filename,
165
+ 'metadata': parsed_data['metadata'],
166
+ 'word_count': parsed_data['word_count'],
167
+ 'skills': skills,
168
+ 'experience_years': experience_years,
169
+ 'experiences': experiences[:3], # Limit to top 3
170
+ 'education': education,
171
+ 'ats_score': ats_score,
172
+ 'job_match': job_matches,
173
+ 'skill_gap': skill_gap
174
+ }
175
+
176
+ return {
177
+ 'success': True,
178
+ 'data': response_data,
179
+ 'message': 'Analysis completed successfully'
180
+ }
181
+
182
+ except Exception as e:
183
+ logger.error(f"Analysis failed: {e}", exc_info=True)
184
+ raise HTTPException(status_code=500, detail=str(e))
185
+
186
+
187
+ @app.post("/api/match-jobs")
188
+ async def match_jobs(
189
+ file: UploadFile = File(...),
190
+ job_titles: List[str] = Form(...)
191
+ ):
192
+ """
193
+ Match resume against multiple job titles.
194
+
195
+ Args:
196
+ file: Resume file
197
+ job_titles: List of job titles to match against
198
+
199
+ Returns:
200
+ Ranked list of job matches
201
+ """
202
+ try:
203
+ # Save and parse resume
204
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.filename)[1]) as tmp_file:
205
+ content = await file.read()
206
+ tmp_file.write(content)
207
+ tmp_file_path = tmp_file.name
208
+
209
+ parsed_data = resume_parser.parse_file(tmp_file_path)
210
+ resume_text = parsed_data['cleaned_text']
211
+
212
+ # Create job descriptions from titles
213
+ jobs = [{'title': title, 'description': title} for title in job_titles]
214
+
215
+ # Match jobs
216
+ matches = job_matcher.match_jobs(resume_text, jobs, top_k=len(jobs))
217
+
218
+ # Clean up
219
+ os.unlink(tmp_file_path)
220
+
221
+ return {
222
+ 'success': True,
223
+ 'data': {
224
+ 'matches': matches
225
+ },
226
+ 'message': 'Job matching completed'
227
+ }
228
+
229
+ except Exception as e:
230
+ logger.error(f"Job matching failed: {e}", exc_info=True)
231
+ raise HTTPException(status_code=500, detail=str(e))
232
+
233
+
234
+ @app.get("/api/stats")
235
+ async def get_stats():
236
+ """Get API usage statistics."""
237
+ try:
238
+ # Read analysis logs
239
+ log_file = "logs/analysis_log.jsonl"
240
+ if not os.path.exists(log_file):
241
+ return {
242
+ 'success': True,
243
+ 'data': {
244
+ 'total_analyses': 0,
245
+ 'average_ats_score': 0
246
+ },
247
+ 'message': 'No analyses yet'
248
+ }
249
+
250
+ import json
251
+ analyses = []
252
+ with open(log_file, 'r') as f:
253
+ for line in f:
254
+ analyses.append(json.loads(line))
255
+
256
+ total = len(analyses)
257
+ avg_ats = sum(a['ats_score'] for a in analyses) / total if total > 0 else 0
258
+
259
+ return {
260
+ 'success': True,
261
+ 'data': {
262
+ 'total_analyses': total,
263
+ 'average_ats_score': round(avg_ats, 2)
264
+ },
265
+ 'message': 'Statistics retrieved'
266
+ }
267
+
268
+ except Exception as e:
269
+ logger.error(f"Stats retrieval failed: {e}")
270
+ raise HTTPException(status_code=500, detail=str(e))
271
+
272
+
273
+ if __name__ == "__main__":
274
+ import uvicorn
275
+ uvicorn.run(app, host="0.0.0.0", port=8000)
276
+