MossaicMan's picture
Upload 2 files
1280884 verified
# app.py (Hugging Face Space)
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
# ---- import your real logic ----
from model.semantic.skills import extract_skills
from model.semantic.embeddings import embed_texts
from model.semantic.similarity import semantic_similarity
from model.semantic.scoring import compute_scores
from model.semantic.jd import prepare_jd_chunks
from model.llm.prompt import build_prompt
from model.llm.client import run_llm
from model.llm.parser import parse_llm_output
from model.bias.detector import run_bias_rules
from model.bias.prompt import build_bias_prompt
from model.bias.parser import parse_bias_output
app = FastAPI()
# -------- Request schema --------
class ResumeRequest(BaseModel):
resume_text: str
resume_sections: dict
job_description: str
# -------- Health check --------
@app.get("/health")
def health():
return {"status": "ok"}
# -------- Inference endpoint --------
@app.post("/analyze")
def analyze_resume(req: ResumeRequest):
structured_resume = req.resume_sections
resume_skills = extract_skills(structured_resume.get("skills", []))
jd_skills = extract_skills([req.job_description])
resume_chunks = (
structured_resume.get("experience", [])
+ structured_resume.get("projects", [])
)
if not resume_chunks:
raise HTTPException(status_code=400, detail="No experience or projects")
resume_vecs = embed_texts(resume_chunks)
jd_vecs = embed_texts(prepare_jd_chunks(req.job_description))
semantic_score = semantic_similarity(resume_vecs, jd_vecs)
scores = compute_scores(resume_skills, jd_skills, semantic_score)
try:
prompt = build_prompt(
structured_resume=structured_resume,
skills_found=list(resume_skills),
scores=scores,
job_description=req.job_description
)
explanation = parse_llm_output(run_llm(prompt))
except Exception as e:
explanation = {"error": str(e)}
bias_findings = run_bias_rules(
resume_text=req.resume_text,
jd_text=req.job_description,
scores=scores
)
if bias_findings:
bias_report = parse_bias_output(
run_llm(build_bias_prompt(bias_findings))
)
else:
bias_report = {
"detected_biases": [],
"risk_level": "low",
"explanations": [],
"mitigation_suggestions": []
}
return {
"scores": scores,
"semantic_score": semantic_score,
"explanation": explanation,
"bias_report": bias_report
}