Spaces:
Sleeping
Sleeping
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_core.messages import HumanMessage | |
| from utils import readPrompts | |
| import os | |
| import base64 | |
| import pdfplumber | |
| from io import BytesIO | |
| class ResumeEditService: | |
| def __init__(self): | |
| prompts = readPrompts(path=os.path.join(os.getcwd(), "prompts.yaml")) | |
| self.prompt_template = prompts.get("prompt") | |
| self.llm = self._initialize_llm() | |
| def _initialize_llm(self): | |
| """Initialize the Google Generative AI model""" | |
| return ChatGoogleGenerativeAI( | |
| model="gemini-3-flash-preview", | |
| temperature=0.3, | |
| google_api_key=os.getenv("GOOGLE_API_KEY") | |
| ) | |
| def _extract_text_from_pdf_base64(self, base64_pdf: str) -> str: | |
| """ | |
| Extract text from base64 encoded PDF | |
| Args: | |
| base64_pdf: Base64 encoded PDF string | |
| Returns: | |
| Extracted text from the PDF | |
| """ | |
| # Decode base64 to bytes | |
| pdf_bytes = base64.b64decode(base64_pdf) | |
| # Create a BytesIO object | |
| pdf_file = BytesIO(pdf_bytes) | |
| # Extract text using PdfPlumber | |
| text = "" | |
| with pdfplumber.open(pdf_file) as pdf: | |
| for page in pdf.pages: | |
| text += page.extract_text() or "" | |
| return text.strip() | |
| def generate_optimized_resume(self, resume_base64: str, job_description: str, current_date: str) -> str: | |
| """ | |
| Generate optimized resume based on job description | |
| Args: | |
| resume_base64: Base64 encoded PDF resume | |
| job_description: The target job description | |
| current_date: Current date for timeline adjustments | |
| Returns: | |
| Raw response from the LLM (LaTeX code in markdown format) | |
| """ | |
| # Extract text from PDF | |
| current_resume = self._extract_text_from_pdf_base64(resume_base64) | |
| # Manual string replacement (avoiding template parsing issues) | |
| prompt = self.prompt_template.replace("{{CURRENT_DATE}}", current_date) | |
| prompt = prompt.replace("{{JOB_DESCRIPTION}}", job_description) | |
| prompt = prompt.replace("{{CURRENT_RESUME}}", current_resume) | |
| # Invoke the LLM directly | |
| response = self.llm.invoke([HumanMessage(content=prompt)]) | |
| return response.content[0]["text"] |