Spaces:
Runtime error
Runtime error
| # Update imports | |
| import streamlit as st | |
| import requests | |
| import json | |
| import webbrowser | |
| from io import StringIO | |
| from bs4 import BeautifulSoup | |
| import google.generativeai as genai | |
| import os | |
| # Initialize session state | |
| if 'original_resume' not in st.session_state: | |
| st.session_state['original_resume'] = None | |
| if 'keywords' not in st.session_state: | |
| st.session_state['keywords'] = None | |
| if 'tailored_resume' not in st.session_state: | |
| st.session_state['tailored_resume'] = None | |
| def scrape_website(url): | |
| response = requests.get(url) | |
| response.raise_for_status() | |
| soup = BeautifulSoup(response.text, 'html.parser') | |
| return soup.get_text() | |
| def trim_text(text, max_length=3000): | |
| """Trim text while preserving important content""" | |
| if len(text) <= max_length: | |
| return text | |
| # Keep first and last parts | |
| half_length = max_length // 2 | |
| return text[:half_length] + "..." + text[-half_length:] | |
| # Configure Gemini | |
| def init_gemini(api_key): | |
| genai.configure(api_key=api_key) | |
| generation_config = { | |
| "temperature": 0.7, | |
| "top_p": 0.95, | |
| "top_k": 40, | |
| "max_output_tokens": 8192, | |
| "response_mime_type": "application/json", | |
| } | |
| return genai.GenerativeModel( | |
| model_name="gemini-2.0-flash-exp", | |
| generation_config=generation_config | |
| ) | |
| def extract_keywords(job_description, model): | |
| prompt = f"""Extract key ATS keywords from job posting. Return JSON with 'high', 'medium', 'low' arrays: | |
| {job_description}""" | |
| response = model.generate_content(prompt) | |
| return json.loads(response.text) | |
| def validate_resume_schema(resume_data, original_schema): | |
| """Validate and enforce resume schema consistency""" | |
| def get_schema_structure(data): | |
| if isinstance(data, dict): | |
| return {k: get_schema_structure(v) for k, v in data.items()} | |
| elif isinstance(data, list): | |
| return [get_schema_structure(data[0])] if data else [] | |
| else: | |
| return type(data).__name__ | |
| original_structure = get_schema_structure(original_schema) | |
| new_structure = get_schema_structure(resume_data) | |
| return original_structure == new_structure | |
| def adapt_resume(resume_data, keywords, job_description, model, max_retries=3): | |
| original_schema = resume_data.copy() | |
| for attempt in range(max_retries): | |
| try: | |
| prompt = f"""As a CV expert, optimize the provided resume JSON for the target role. | |
| Enhance sections (summary, experience, volunteer, interests, awards, projects, skills) by incorporating provided keywords: | |
| - High priority (3x weight) | |
| - Medium priority (2x weight) | |
| - Low priority (1x weight) | |
| Rules: | |
| - Keep all original facts and information | |
| - Maintain exact JSON structure and all existing keys | |
| - Use natural language from the keywords list | |
| - Do not add fictional content | |
| Base Schema: {json.dumps(original_schema)} | |
| Keywords: {json.dumps(keywords)} | |
| Job Description: {job_description}""" | |
| response = model.generate_content(prompt) | |
| tailored_resume = json.loads(response.text) | |
| if validate_resume_schema(tailored_resume, original_schema): | |
| return tailored_resume | |
| except Exception as e: | |
| if attempt == max_retries - 1: | |
| raise e | |
| raise ValueError("Schema validation failed") | |
| def calculate_resume_match(resume_data, keywords): | |
| """Calculate match score between resume and keywords""" | |
| resume_text = json.dumps(resume_data).lower() | |
| total_score = 0 | |
| matches = {'high': [], 'medium': [], 'low': []} | |
| # Weight multipliers for different priority levels | |
| weights = {"high": 3, "medium": 2, "low": 1} | |
| # Ensure keywords has the expected structure | |
| if not all(key in keywords for key in ['high', 'medium', 'low']): | |
| raise ValueError("Keywords must contain 'high', 'medium', and 'low' arrays") | |
| for priority in ['high', 'medium', 'low']: | |
| priority_score = 0 | |
| priority_matches = [] | |
| for word in keywords[priority]: | |
| word = word.lower() | |
| if word in resume_text: | |
| priority_score += weights[priority] | |
| priority_matches.append(word) | |
| matches[priority] = priority_matches | |
| total_score += priority_score | |
| # Normalize score to 0-100 | |
| max_possible = sum(len(keywords[p]) * weights[p] for p in ['high', 'medium', 'low']) | |
| normalized_score = (total_score / max_possible * 100) if max_possible > 0 else 0 | |
| return normalized_score, matches | |
| def create_match_visualization(original_score, tailored_score, keywords, original_matches, tailored_matches): | |
| """Create visualization showing resume match comparison""" | |
| # Overall score comparison | |
| st.markdown("### π Resume Match Analysis") | |
| # Score metrics side by side | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.metric( | |
| "Original Resume Match Score", | |
| f"{original_score:.1f}%" | |
| ) | |
| with col2: | |
| st.metric( | |
| "Tailored Resume Match Score", | |
| f"{tailored_score:.1f}%", | |
| delta=f"+{tailored_score - original_score:.1f}%" | |
| ) | |
| # Keyword analysis by priority | |
| st.markdown("### π― Keyword Matches") | |
| tabs = st.tabs(["High Priority π΄", "Medium Priority π‘", "Low Priority π’"]) | |
| for idx, priority in enumerate(['high', 'medium', 'low']): | |
| with tabs[idx]: | |
| col1, col2 = st.columns(2) | |
| orig_matches = set(original_matches[priority]) | |
| new_matches = set(tailored_matches[priority]) | |
| added = new_matches - orig_matches | |
| # Original matches | |
| with col1: | |
| st.markdown("#### Original Matching Keywords") | |
| if orig_matches: | |
| for keyword in orig_matches: | |
| st.markdown(f"β `{keyword}`") | |
| else: | |
| st.info("No matches found") | |
| # New matches | |
| with col2: | |
| st.markdown("#### Added the following Keywords") | |
| if added: | |
| for keyword in added: | |
| st.markdown(f"β `{keyword}`") | |
| else: | |
| st.info("No new matches") | |
| # Page config | |
| st.set_page_config(page_title="Resume Tailor", page_icon="π", layout="wide") | |
| # Header | |
| st.title("π Curriculum Customization Tool") | |
| st.markdown("### Transform your resume for your dream job") | |
| # Sidebar with API key | |
| with st.sidebar: | |
| st.markdown("### π How to Use") | |
| st.markdown(""" | |
| 1. **Prepare Your Resume**: | |
| - Create a basic resume at [rxresu.me](https://rxresu.me) | |
| - Export it as JSON (not PDF) | |
| 2. **Get Job Details**: | |
| - Copy the job posting URL | |
| 3. **Use the Tool**: | |
| - Upload your resume JSON | |
| - Paste the job URL | |
| - Click 'Tailor Resume' | |
| 4. **Final Steps**: | |
| - Download the tailored JSON | |
| - Import back to rxresu.me | |
| - Export as PDF for application | |
| """) | |
| st.markdown("### βΉοΈ About") | |
| st.markdown(""" | |
| This tool uses Google's Gemini model to optimize your resume for ATS systems. | |
| π Open Source: Feel free to modify and adapt this tool to your needs. | |
| The source code is available and customizable. | |
| π§ Contact: For questions or suggestions, reach out to: | |
| ai@holaivan.tech | |
| """) | |
| # Disclaimer | |
| st.warning(""" | |
| β οΈ **Disclaimer** | |
| This tool is for educational purposes only. | |
| AI-based tools can produce unexpected results. | |
| Always verify the output before using. | |
| """) | |
| api_key = st.secrets["google_api_key"] | |
| if not api_key: | |
| st.error("API key not found in secrets. Please add your API key to the secrets.") | |
| # Main input section | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| job_url = st.text_input("Job Posting URL", placeholder="https://...") | |
| with col2: | |
| resume_file = st.file_uploader("Upload Resume (JSON)", type="json") | |
| if resume_file: | |
| resume_str = StringIO(resume_file.getvalue().decode("utf-8")) | |
| st.session_state['original_resume'] = json.load(resume_str) | |
| # Process button | |
| if st.button("π― Tailor Resume", type="primary", use_container_width=True): | |
| if job_url and api_key and resume_file: | |
| try: | |
| with st.status("π Processing...") as status: | |
| # Initialize Gemini | |
| model = init_gemini(api_key) | |
| # Rest of the processing remains the same, just using model instead of client | |
| status.update(label="Analyzing job posting...") | |
| job_description = scrape_website(job_url) | |
| keywords = extract_keywords(job_description, model) | |
| st.session_state['keywords'] = keywords | |
| status.update(label="Tailoring resume...") | |
| tailored_resume = adapt_resume( | |
| st.session_state['original_resume'], | |
| keywords, | |
| job_description, | |
| model | |
| ) | |
| st.session_state['tailored_resume'] = tailored_resume | |
| status.update(label="β Done!", state="complete") | |
| # Results section | |
| st.markdown("---") | |
| st.markdown("## π Results") | |
| # Calculate and display scores | |
| original_score, original_matches = calculate_resume_match( | |
| st.session_state['original_resume'], | |
| st.session_state['keywords'] | |
| ) | |
| tailored_score, tailored_matches = calculate_resume_match( | |
| st.session_state['tailored_resume'], | |
| st.session_state['keywords'] | |
| ) | |
| create_match_visualization( | |
| original_score, | |
| tailored_score, | |
| st.session_state['keywords'], | |
| original_matches, | |
| tailored_matches | |
| ) | |
| # Download section | |
| st.markdown("### π₯ Download") | |
| if st.download_button( | |
| "β¬οΈ Download Tailored Resume", | |
| data=json.dumps(st.session_state['tailored_resume'], indent=4), | |
| file_name="tailored_resume.json", | |
| mime="application/json", | |
| use_container_width=True | |
| ): | |
| webbrowser.open_new_tab("https://rxresu.me/") | |
| st.info("π Resume Builder opened in new tab") | |
| except Exception as e: | |
| st.error(f"An error occurred: {str(e)}") | |
| else: | |
| st.error("Please provide all required inputs") |