RecrAI-backend / llm_client.py
ViniciusKhan's picture
Add application file
f2d72fd
import json
import os
from typing import Dict, Any
from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate
from models_schemas import AnalyzeResponse
from prompts import PROMPT_TEMPLATE, SCHEMA_JSON, PROMPT_SCORE
GROQ_MODEL_ID = os.getenv("GROQ_MODEL_ID", "deepseek-r1-distill-llama-70b")
TEMPERATURE = float(os.getenv("TEMPERATURE", "0.7"))
def _load_llm():
return ChatGroq(
model=GROQ_MODEL_ID,
temperature=TEMPERATURE,
max_tokens=None,
timeout=None,
max_retries=2,
)
def _strip_think(text: str) -> str:
if "</think>" in text:
return text.split("</think>")[-1].strip()
return text.strip()
def _force_schema_fields(d: Dict[str, Any]) -> Dict[str, Any]:
keys = [
"name","area","summary","skills","education","interview_questions",
"strengths","areas_for_development","important_considerations",
"final_recommendations","score"
]
for k in keys:
if k not in d:
d[k] = [] if k in {
"skills","interview_questions","strengths",
"areas_for_development","important_considerations"
} else (0.0 if k == "score" else "")
return d
def analyze_cv_with_llm(cv_text: str, job_details: str) -> AnalyzeResponse:
llm = _load_llm()
prompt = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
chain = prompt | llm
output = chain.invoke({
"schema": SCHEMA_JSON,
"prompt_score": PROMPT_SCORE,
"cv": cv_text,
"job": job_details
})
raw = _strip_think(output.content)
start = raw.find("{")
end = raw.rfind("}")
if start == -1 or end == -1:
return AnalyzeResponse()
json_str = raw[start:end+1]
try:
data = json.loads(json_str)
except Exception:
return AnalyzeResponse()
data = _force_schema_fields(data)
return AnalyzeResponse(**data)