Spaces:
Sleeping
Sleeping
Commit ·
58e308d
1
Parent(s): 4209761
hotfix: added stub response
Browse files- .python-version +1 -0
- Process/views.py +198 -21
- app.py +0 -1
- main.py +6 -0
- pyproject.toml +7 -0
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.12
|
Process/views.py
CHANGED
|
@@ -64,33 +64,210 @@ def process_resume(request):
|
|
| 64 |
logger.info("Processing resume request")
|
| 65 |
data = json.loads(request.body)
|
| 66 |
|
| 67 |
-
user_id = data.get("user_id")
|
| 68 |
-
file_link = data.get("file_link")
|
| 69 |
-
job_description = data.get("job_description")
|
| 70 |
-
logger.info(f"Received data for user_id: {user_id}")
|
| 71 |
|
| 72 |
-
if not all([user_id, file_link, job_description]):
|
| 73 |
-
|
| 74 |
-
|
| 75 |
|
| 76 |
-
logger.info("Extracting Text from the pdf")
|
| 77 |
-
resume = extract_text_from_pdf(file_link)
|
| 78 |
-
logger.info(f"Text extracted from the pdf : {resume}")
|
| 79 |
|
| 80 |
-
logger.info("Extracting resume details")
|
| 81 |
-
st_data = extract_resume_details(resume)
|
| 82 |
-
logger.info("Resume details extraction completed")
|
| 83 |
|
| 84 |
-
logger.info("Generating ATS score")
|
| 85 |
-
ats_score = generate_ats_score(st_data, job_description)
|
| 86 |
-
logger.info("ATS score generation completed")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
response_data = {
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
logger.info("Sending successful response")
|
| 95 |
return JsonResponse(response_data, status=200)
|
| 96 |
except json.JSONDecodeError as e:
|
|
|
|
| 64 |
logger.info("Processing resume request")
|
| 65 |
data = json.loads(request.body)
|
| 66 |
|
| 67 |
+
# user_id = data.get("user_id")
|
| 68 |
+
# file_link = data.get("file_link")
|
| 69 |
+
# job_description = data.get("job_description")
|
| 70 |
+
# logger.info(f"Received data for user_id: {user_id}")
|
| 71 |
|
| 72 |
+
# if not all([user_id, file_link, job_description]):
|
| 73 |
+
# logger.warning("Missing required fields in request")
|
| 74 |
+
# return JsonResponse({"error": "Missing requiredz fields"}, status=400)
|
| 75 |
|
| 76 |
+
# logger.info("Extracting Text from the pdf")
|
| 77 |
+
# resume = extract_text_from_pdf(file_link)
|
| 78 |
+
# logger.info(f"Text extracted from the pdf : {resume}")
|
| 79 |
|
| 80 |
+
# logger.info("Extracting resume details")
|
| 81 |
+
# st_data = extract_resume_details(resume)
|
| 82 |
+
# logger.info("Resume details extraction completed")
|
| 83 |
|
| 84 |
+
# logger.info("Generating ATS score")
|
| 85 |
+
# ats_score = generate_ats_score(st_data, job_description)
|
| 86 |
+
# logger.info("ATS score generation completed")
|
| 87 |
+
|
| 88 |
+
# response_data = {
|
| 89 |
+
# "user_id": user_id,
|
| 90 |
+
# "similarity": "100.00",
|
| 91 |
+
# "ats_score": ats_score,
|
| 92 |
+
# "structured_data": st_data,
|
| 93 |
+
# }
|
| 94 |
|
| 95 |
response_data = {
|
| 96 |
+
"user_id": 12345,
|
| 97 |
+
"user_name": "John Doe",
|
| 98 |
+
"similarity": 0.23571285605430603,
|
| 99 |
+
"ats_score": {
|
| 100 |
+
"ats_score": 88.0,
|
| 101 |
+
"detailed_scores": {
|
| 102 |
+
"skills_match": 90.0,
|
| 103 |
+
"experience_relevance": 85.0,
|
| 104 |
+
"education_relevance": 90.0,
|
| 105 |
+
"overall_formatting": 100
|
| 106 |
+
},
|
| 107 |
+
"feedback": {
|
| 108 |
+
"strengths": [
|
| 109 |
+
"Strong AI and software engineering skills",
|
| 110 |
+
"Highly relevant project experience for AI software development",
|
| 111 |
+
"Strong education background with good GPA",
|
| 112 |
+
"Clean and ATS-friendly formatting"
|
| 113 |
+
],
|
| 114 |
+
"improvements": [
|
| 115 |
+
"Add clearer mapping of projects to end-to-end software engineering use cases",
|
| 116 |
+
"Explicitly mention system design and production-scale deployment experience"
|
| 117 |
+
]
|
| 118 |
+
},
|
| 119 |
+
"detailed_feedback": {
|
| 120 |
+
"skills_match": {
|
| 121 |
+
"matching_elements": [
|
| 122 |
+
"Python",
|
| 123 |
+
"FastAPI",
|
| 124 |
+
"Django",
|
| 125 |
+
"Machine Learning",
|
| 126 |
+
"NLP",
|
| 127 |
+
"LLMs",
|
| 128 |
+
"PyTorch",
|
| 129 |
+
"Docker",
|
| 130 |
+
"AWS",
|
| 131 |
+
"React.js",
|
| 132 |
+
"PostgreSQL",
|
| 133 |
+
"MongoDB"
|
| 134 |
+
],
|
| 135 |
+
"missing_elements": [
|
| 136 |
+
"Explicit mention of microservices architecture",
|
| 137 |
+
"Explicit mention of CI/CD pipelines in production"
|
| 138 |
+
],
|
| 139 |
+
"explanation": "The candidate demonstrates a strong match for an AI Software Developer role with expertise in Python, backend frameworks, AI/ML, LLMs, and cloud-native tools. Skills align well with building, deploying, and optimizing AI-driven systems. Adding clearer mentions of microservices and CI/CD in real-world production contexts would further strengthen the profile."
|
| 140 |
+
},
|
| 141 |
+
"experience_relevance": {
|
| 142 |
+
"matching_elements": [
|
| 143 |
+
"Built AI-driven fashion visualization and automation pipelines",
|
| 144 |
+
"Migrated backend systems from Flask to FastAPI with performance improvements",
|
| 145 |
+
"Developed NLP-based summarization systems using PEGASUS, BERTsum, and BART",
|
| 146 |
+
"Built speech-to-text systems with performance optimization",
|
| 147 |
+
"Implemented machine learning models from scratch (MNIST classifier)"
|
| 148 |
+
],
|
| 149 |
+
"missing_elements": [
|
| 150 |
+
"Explicit ownership of large-scale production deployments",
|
| 151 |
+
"Long-term industry experience beyond internships"
|
| 152 |
+
],
|
| 153 |
+
"explanation": "The experience strongly aligns with an AI Software Developer role, covering AI research, backend engineering, system optimization, and applied ML. Internships demonstrate hands-on impact, performance gains, and real-world deployment. While the experience is strong, longer-term full-time production ownership would further improve relevance."
|
| 154 |
+
},
|
| 155 |
+
"education_relevance": {
|
| 156 |
+
"matching_elements": [
|
| 157 |
+
"MTech (Integrated) in Computer Science and Engineering",
|
| 158 |
+
"Strong GPA: 8.59",
|
| 159 |
+
"AI-focused coursework and projects"
|
| 160 |
+
],
|
| 161 |
+
"missing_elements": [],
|
| 162 |
+
"explanation": "The education background is highly relevant for an AI Software Developer role, providing strong foundations in computer science, AI, and engineering concepts."
|
| 163 |
+
},
|
| 164 |
+
"overall_formatting": {
|
| 165 |
+
"matching_elements": [
|
| 166 |
+
"Clear section headings",
|
| 167 |
+
"Name, email, phone, and GitHub present",
|
| 168 |
+
"Well-structured bullet points",
|
| 169 |
+
"Consistent formatting suitable for ATS"
|
| 170 |
+
],
|
| 171 |
+
"missing_elements": [],
|
| 172 |
+
"explanation": "The resume formatting is clean, structured, and fully ATS-compatible, enabling easy parsing by automated systems."
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
},
|
| 176 |
+
"structured_data": {
|
| 177 |
+
"name": "KB Harish",
|
| 178 |
+
"email": "harishkb20205@gmail.com",
|
| 179 |
+
"github": "https://github.com/HARISH20205",
|
| 180 |
+
"phone": "+91-824-805-2926",
|
| 181 |
+
"skills": ["Python", "Java", "C", "C++", "JavaScript", "TypeScript",
|
| 182 |
+
"HTML", "CSS", "React.js", "Tailwind CSS", "Firebase",
|
| 183 |
+
"Django", "FastAPI", "Flask", "Express.js", "MongoDB", "PostgreSQL", "Prisma",
|
| 184 |
+
"ML", "NLP", "LLMs (Fine-tuning, Inference, Optimization)", "CNN", "YOLO", "ViT", "PyTorch",
|
| 185 |
+
"AWS", "Docker", "Hugging Face", "Jenkins", "Selenium", "CI/CD",
|
| 186 |
+
"Git", "Linux/Unix", "ROS2", "Raspberry Pi 5"]
|
| 187 |
+
},
|
| 188 |
+
"experience": [
|
| 189 |
+
{
|
| 190 |
+
"title": "Genrative AI Intern",
|
| 191 |
+
"company": "TITAN Company Limited",
|
| 192 |
+
"start_date": "Jun 2025",
|
| 193 |
+
"end_date": "Jul 2025",
|
| 194 |
+
"description": [
|
| 195 |
+
"Built an AI-driven fashion visualization pipeline with Runway ML and automated retail ops using n8n, reducing catalog time by 60%, manual effort by 70%, and speeding up product launches by 3x.",
|
| 196 |
+
"Migrated Taneira’s backend from Flask to FastAPI, improving API response time by 40%, scaling throughput by 2.5x, and integrating a modular RAG-based chatbot for AI-driven support."
|
| 197 |
+
]
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"title": "AI Research and Development Intern",
|
| 201 |
+
"company": "eBramha Techworks Private Limited",
|
| 202 |
+
"start_date": "Jun 2024",
|
| 203 |
+
"end_date": "Oct 2024",
|
| 204 |
+
"description": [
|
| 205 |
+
"Conducted comprehensive analysis of advanced NLP models like PEGASUS, BERTsum, and BART; applied insights to optimize summarization tasks, improving accuracy by 25% in real-world use cases.",
|
| 206 |
+
"Developed a speech-to-text system, reducing processing time by 40%, and Constructed an MNIST digit classifier with 95% accuracy using gradient descent and one-hot encoding."
|
| 207 |
+
]
|
| 208 |
+
}
|
| 209 |
+
],
|
| 210 |
+
"education": [
|
| 211 |
+
{
|
| 212 |
+
"institution": "Vellore Institute of Technology (VIT), Vellore, India",
|
| 213 |
+
"degree": "MTech (Integrated) in Computer Science and Engineering",
|
| 214 |
+
"gpa": "8.59",
|
| 215 |
+
"start_date": "Aug 2022",
|
| 216 |
+
"end_date": "Jul 2027"
|
| 217 |
+
}
|
| 218 |
+
],
|
| 219 |
+
"certifications": [
|
| 220 |
+
"Coursera: Supervised Machine Learning: Regression and Classification",
|
| 221 |
+
"Coursera: Advanced Learning Algorithms",
|
| 222 |
+
"Coursera: Generative AI with Large Language Models"
|
| 223 |
+
],
|
| 224 |
+
"areas_of_interest": "",
|
| 225 |
+
"projects": [
|
| 226 |
+
{
|
| 227 |
+
"project": "FrugalSOT",
|
| 228 |
+
"name": "FrugalSOT",
|
| 229 |
+
"description": [
|
| 230 |
+
"Architected an LLM system using Ollama and Raspberry Pi 5, starting with device capability detection to dynamically select models suited for hardware, reducing latency by 70%.",
|
| 231 |
+
"Reduced computational costs by 15% by integrating intelligent model selection with fallback mechanisms, ensuring seamless task execution.",
|
| 232 |
+
"Implemented an adaptive thresholding mechanism that updates thresholds based on prompt history, improving model selection efficiency."
|
| 233 |
+
],
|
| 234 |
+
"link": "https://github.com/HARISH20205/FrugalSOT.git"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"project": "PHYDRA",
|
| 238 |
+
"name": "PHYDRA",
|
| 239 |
+
"description": [
|
| 240 |
+
"Designed a highly scalable Stowage Management System for the ISS, capable of processing over 10 million items in under 5 seconds, ensuring exceptional operational efficiency.",
|
| 241 |
+
"Engineered a high-performance backend using FastAPI, optimized with Python and C++ subprocess orchestration, reducing compute time by 95% and integrating Prisma for seamless database compatibility.",
|
| 242 |
+
"Built an interactive frontend using React and Tailwind CSS for a user-friendly interface."
|
| 243 |
+
],
|
| 244 |
+
"link": "https://github.com/Mantissagithub/PHYDRA.git"
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"project": "LaunchLLM",
|
| 248 |
+
"name": "LaunchLLM",
|
| 249 |
+
"description": [
|
| 250 |
+
"Orchestrated a frictionless LLM deployment toolchain supporting open-source models and GPU customization, reducing manual configuration by over 90% and accelerating time-to-production.",
|
| 251 |
+
"Containerized SGLang using Docker to ensure reproducible, low-latency inference environments, achieving model initialization times under 5 seconds for most mid-tier models.",
|
| 252 |
+
"Developed a FastAPI middleware for efficient routing and metrics, enabling scalable backend infrastructure."
|
| 253 |
+
],
|
| 254 |
+
"link": ""
|
| 255 |
+
}
|
| 256 |
+
],
|
| 257 |
+
"languages": "",
|
| 258 |
+
"awards_and_achievements": "",
|
| 259 |
+
"volunteer_experience": "",
|
| 260 |
+
"hobbies_and_interests": "",
|
| 261 |
+
"publications": "",
|
| 262 |
+
"conferences_and_presentations": "",
|
| 263 |
+
"patents": "",
|
| 264 |
+
"professional_affiliations": "",
|
| 265 |
+
"portfolio_links": [
|
| 266 |
+
"https://frugalsot.vercel.app/",
|
| 267 |
+
"https://www.linkedin.com/in/harish-kb-9417ba252/"
|
| 268 |
+
],
|
| 269 |
+
"summary_or_objective": ""
|
| 270 |
+
}
|
| 271 |
logger.info("Sending successful response")
|
| 272 |
return JsonResponse(response_data, status=200)
|
| 273 |
except json.JSONDecodeError as e:
|
app.py
CHANGED
|
@@ -29,7 +29,6 @@ if __name__ == "__main__":
|
|
| 29 |
logger.info(f"Working directory: {os.getcwd()}")
|
| 30 |
|
| 31 |
# Start the Django development server
|
| 32 |
-
# For better production deployment, consider using gunicorn
|
| 33 |
logger.info("Launching Django server")
|
| 34 |
subprocess.run(
|
| 35 |
["python", "manage.py", "runserver", f"0.0.0.0:{port}"],
|
|
|
|
| 29 |
logger.info(f"Working directory: {os.getcwd()}")
|
| 30 |
|
| 31 |
# Start the Django development server
|
|
|
|
| 32 |
logger.info("Launching Django server")
|
| 33 |
subprocess.run(
|
| 34 |
["python", "manage.py", "runserver", f"0.0.0.0:{port}"],
|
main.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def main():
|
| 2 |
+
print("Hello from resume-ats!")
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
main()
|
pyproject.toml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "resume-ats"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "Add your description here"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.12"
|
| 7 |
+
dependencies = []
|