Resume-ATS / Process /views.py
HARISH20205's picture
hotfix: added stub response
58e308d
import logging
import traceback
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
from transformers import AutoTokenizer, AutoModel
import torch
import os
from .ats_parser import extract_resume_details
from .utils import generate_ats_score
from .response import get_response
from .extract import extract_text_from_pdf
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
model_name = "sentence-transformers/all-MiniLM-L6-v2"
logger.info(f"Loading model: {model_name}")
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
logger.info("Model loaded successfully")
except Exception as e:
logger.error(f"Failed to load model: {e}")
logger.debug(traceback.format_exc())
def get_embeddings(texts):
try:
logger.debug(f"Generating embeddings for {len(texts)} texts")
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
with torch.no_grad():
model_output = model(**inputs)
embeddings = model_output.last_hidden_state.mean(dim=1)
return embeddings
except Exception as e:
logger.error(f"Error generating embeddings: {e}")
logger.debug(traceback.format_exc())
return None
def calculate_similarity(job_description, resume_text):
try:
logger.info("Calculating similarity between job description and resume")
jd_embedding = get_embeddings([job_description])
resume_embedding = get_embeddings([resume_text])
jd_embedding = jd_embedding / jd_embedding.norm(dim=1, keepdim=True)
resume_embedding = resume_embedding / resume_embedding.norm(dim=1, keepdim=True)
similarity = torch.mm(jd_embedding, resume_embedding.T).item()
return similarity
except Exception as e:
logger.error(f"Error calculating similarity: {e}")
logger.debug(traceback.format_exc())
return 0.0
@csrf_exempt
def process_resume(request):
if request.method == "POST":
try:
logger.info("Processing resume request")
data = json.loads(request.body)
# user_id = data.get("user_id")
# file_link = data.get("file_link")
# job_description = data.get("job_description")
# logger.info(f"Received data for user_id: {user_id}")
# if not all([user_id, file_link, job_description]):
# logger.warning("Missing required fields in request")
# return JsonResponse({"error": "Missing requiredz fields"}, status=400)
# logger.info("Extracting Text from the pdf")
# resume = extract_text_from_pdf(file_link)
# logger.info(f"Text extracted from the pdf : {resume}")
# logger.info("Extracting resume details")
# st_data = extract_resume_details(resume)
# logger.info("Resume details extraction completed")
# logger.info("Generating ATS score")
# ats_score = generate_ats_score(st_data, job_description)
# logger.info("ATS score generation completed")
# response_data = {
# "user_id": user_id,
# "similarity": "100.00",
# "ats_score": ats_score,
# "structured_data": st_data,
# }
response_data = {
"user_id": 12345,
"user_name": "John Doe",
"similarity": 0.23571285605430603,
"ats_score": {
"ats_score": 88.0,
"detailed_scores": {
"skills_match": 90.0,
"experience_relevance": 85.0,
"education_relevance": 90.0,
"overall_formatting": 100
},
"feedback": {
"strengths": [
"Strong AI and software engineering skills",
"Highly relevant project experience for AI software development",
"Strong education background with good GPA",
"Clean and ATS-friendly formatting"
],
"improvements": [
"Add clearer mapping of projects to end-to-end software engineering use cases",
"Explicitly mention system design and production-scale deployment experience"
]
},
"detailed_feedback": {
"skills_match": {
"matching_elements": [
"Python",
"FastAPI",
"Django",
"Machine Learning",
"NLP",
"LLMs",
"PyTorch",
"Docker",
"AWS",
"React.js",
"PostgreSQL",
"MongoDB"
],
"missing_elements": [
"Explicit mention of microservices architecture",
"Explicit mention of CI/CD pipelines in production"
],
"explanation": "The candidate demonstrates a strong match for an AI Software Developer role with expertise in Python, backend frameworks, AI/ML, LLMs, and cloud-native tools. Skills align well with building, deploying, and optimizing AI-driven systems. Adding clearer mentions of microservices and CI/CD in real-world production contexts would further strengthen the profile."
},
"experience_relevance": {
"matching_elements": [
"Built AI-driven fashion visualization and automation pipelines",
"Migrated backend systems from Flask to FastAPI with performance improvements",
"Developed NLP-based summarization systems using PEGASUS, BERTsum, and BART",
"Built speech-to-text systems with performance optimization",
"Implemented machine learning models from scratch (MNIST classifier)"
],
"missing_elements": [
"Explicit ownership of large-scale production deployments",
"Long-term industry experience beyond internships"
],
"explanation": "The experience strongly aligns with an AI Software Developer role, covering AI research, backend engineering, system optimization, and applied ML. Internships demonstrate hands-on impact, performance gains, and real-world deployment. While the experience is strong, longer-term full-time production ownership would further improve relevance."
},
"education_relevance": {
"matching_elements": [
"MTech (Integrated) in Computer Science and Engineering",
"Strong GPA: 8.59",
"AI-focused coursework and projects"
],
"missing_elements": [],
"explanation": "The education background is highly relevant for an AI Software Developer role, providing strong foundations in computer science, AI, and engineering concepts."
},
"overall_formatting": {
"matching_elements": [
"Clear section headings",
"Name, email, phone, and GitHub present",
"Well-structured bullet points",
"Consistent formatting suitable for ATS"
],
"missing_elements": [],
"explanation": "The resume formatting is clean, structured, and fully ATS-compatible, enabling easy parsing by automated systems."
}
}
},
"structured_data": {
"name": "KB Harish",
"email": "harishkb20205@gmail.com",
"github": "https://github.com/HARISH20205",
"phone": "+91-824-805-2926",
"skills": ["Python", "Java", "C", "C++", "JavaScript", "TypeScript",
"HTML", "CSS", "React.js", "Tailwind CSS", "Firebase",
"Django", "FastAPI", "Flask", "Express.js", "MongoDB", "PostgreSQL", "Prisma",
"ML", "NLP", "LLMs (Fine-tuning, Inference, Optimization)", "CNN", "YOLO", "ViT", "PyTorch",
"AWS", "Docker", "Hugging Face", "Jenkins", "Selenium", "CI/CD",
"Git", "Linux/Unix", "ROS2", "Raspberry Pi 5"]
},
"experience": [
{
"title": "Genrative AI Intern",
"company": "TITAN Company Limited",
"start_date": "Jun 2025",
"end_date": "Jul 2025",
"description": [
"Built an AI-driven fashion visualization pipeline with Runway ML and automated retail ops using n8n, reducing catalog time by 60%, manual effort by 70%, and speeding up product launches by 3x.",
"Migrated Taneira’s backend from Flask to FastAPI, improving API response time by 40%, scaling throughput by 2.5x, and integrating a modular RAG-based chatbot for AI-driven support."
]
},
{
"title": "AI Research and Development Intern",
"company": "eBramha Techworks Private Limited",
"start_date": "Jun 2024",
"end_date": "Oct 2024",
"description": [
"Conducted comprehensive analysis of advanced NLP models like PEGASUS, BERTsum, and BART; applied insights to optimize summarization tasks, improving accuracy by 25% in real-world use cases.",
"Developed a speech-to-text system, reducing processing time by 40%, and Constructed an MNIST digit classifier with 95% accuracy using gradient descent and one-hot encoding."
]
}
],
"education": [
{
"institution": "Vellore Institute of Technology (VIT), Vellore, India",
"degree": "MTech (Integrated) in Computer Science and Engineering",
"gpa": "8.59",
"start_date": "Aug 2022",
"end_date": "Jul 2027"
}
],
"certifications": [
"Coursera: Supervised Machine Learning: Regression and Classification",
"Coursera: Advanced Learning Algorithms",
"Coursera: Generative AI with Large Language Models"
],
"areas_of_interest": "",
"projects": [
{
"project": "FrugalSOT",
"name": "FrugalSOT",
"description": [
"Architected an LLM system using Ollama and Raspberry Pi 5, starting with device capability detection to dynamically select models suited for hardware, reducing latency by 70%.",
"Reduced computational costs by 15% by integrating intelligent model selection with fallback mechanisms, ensuring seamless task execution.",
"Implemented an adaptive thresholding mechanism that updates thresholds based on prompt history, improving model selection efficiency."
],
"link": "https://github.com/HARISH20205/FrugalSOT.git"
},
{
"project": "PHYDRA",
"name": "PHYDRA",
"description": [
"Designed a highly scalable Stowage Management System for the ISS, capable of processing over 10 million items in under 5 seconds, ensuring exceptional operational efficiency.",
"Engineered a high-performance backend using FastAPI, optimized with Python and C++ subprocess orchestration, reducing compute time by 95% and integrating Prisma for seamless database compatibility.",
"Built an interactive frontend using React and Tailwind CSS for a user-friendly interface."
],
"link": "https://github.com/Mantissagithub/PHYDRA.git"
},
{
"project": "LaunchLLM",
"name": "LaunchLLM",
"description": [
"Orchestrated a frictionless LLM deployment toolchain supporting open-source models and GPU customization, reducing manual configuration by over 90% and accelerating time-to-production.",
"Containerized SGLang using Docker to ensure reproducible, low-latency inference environments, achieving model initialization times under 5 seconds for most mid-tier models.",
"Developed a FastAPI middleware for efficient routing and metrics, enabling scalable backend infrastructure."
],
"link": ""
}
],
"languages": "",
"awards_and_achievements": "",
"volunteer_experience": "",
"hobbies_and_interests": "",
"publications": "",
"conferences_and_presentations": "",
"patents": "",
"professional_affiliations": "",
"portfolio_links": [
"https://frugalsot.vercel.app/",
"https://www.linkedin.com/in/harish-kb-9417ba252/"
],
"summary_or_objective": ""
}
logger.info("Sending successful response")
return JsonResponse(response_data, status=200)
except json.JSONDecodeError as e:
logger.error(f"Invalid JSON received: {e}")
return JsonResponse({"error": "Invalid JSON format"}, status=400)
except Exception as e:
error_msg = f"Error processing resume: {e}"
logger.error(error_msg)
logger.debug(traceback.format_exc())
return JsonResponse({"error": error_msg}, status=500)
else:
logger.warning(f"Unsupported method: {request.method}")
return JsonResponse({"message": "Only POST requests are allowed"}, status=405)
def verify_api(request):
logger.info(f"API verification request received via {request.method}")
if request.method == "GET":
return JsonResponse({"message": "yaay working-GET "}, status=200)
else:
logger.warning(f"Unsupported method for API verification: {request.method}")
return JsonResponse({"error": "Only GET requests are allowed"}, status=405)
def home(request):
logger.info(f"Home request received via {request.method}")
if request.method == "GET":
return JsonResponse({"message": "Welcome To Resume-ATS"}, status=200)
else:
logger.warning(f"Unsupported method for home: {request.method}")
return JsonResponse({"error": "Only GET requests are allowed"}, status=405)