Smart_CV_Analyzer / utils.py
Danial7's picture
Update utils.py
697eef7 verified
import requests
import spacy
from transformers import pipeline
# Load spaCy model for NLP tasks
nlp_spacy = spacy.load("en_core_web_sm")
# Initialize lightweight LLM pipeline (Falcon 1B)
llm = pipeline("text-generation", model="tiiuae/falcon-rw-1b", device="cpu", max_new_tokens=512)
def generate_llm_response(prompt: str) -> str:
# Generate response from Falcon 1B
response = llm(prompt, do_sample=True, temperature=0.7)
return response[0]['generated_text']
def get_skills_suggestions(cv_text: str) -> list:
prompt = f"Extract and list relevant professional skills from this CV text:\n{cv_text}"
result = generate_llm_response(prompt)
skills = [s.strip() for s in result.split('\n') if s.strip()]
return skills
def get_certifications_suggestions(skills: list) -> list:
prompt = f"Suggest certifications relevant to these skills:\n{', '.join(skills)}"
result = generate_llm_response(prompt)
certs = [c.strip() for c in result.split('\n') if c.strip()]
return certs
def get_scholarships_suggestions(field: str) -> list:
prompt = f"List scholarships available for the field: {field}"
result = generate_llm_response(prompt)
scholarships = [s.strip() for s in result.split('\n') if s.strip()]
return scholarships
def get_education_opportunities(field: str) -> list:
prompt = f"Suggest education opportunities and relevant courses for the field: {field}"
result = generate_llm_response(prompt)
educations = [e.strip() for e in result.split('\n') if e.strip()]
return educations
def get_visa_opportunities(country: str) -> list:
prompt = f"List visa and immigration options for skilled professionals in {country}"
result = generate_llm_response(prompt)
visas = [v.strip() for v in result.split('\n') if v.strip()]
return visas
def get_job_listings(keywords: str, location: str, limit: int = 5) -> list:
# For now, we will generate job suggestions with LLM (mock API)
prompt = f"List {limit} recent job openings for '{keywords}' in {location}, include job title and brief description."
result = generate_llm_response(prompt)
jobs = [j.strip() for j in result.split('\n') if j.strip()]
return jobs
def score_cv(cv_text: str) -> int:
# Simple heuristic scoring based on keyword richness
tokens = nlp_spacy(cv_text.lower())
unique_tokens = set([token.lemma_ for token in tokens if not token.is_stop and token.is_alpha])
score = min(100, len(unique_tokens)) # max 100 points
return score