import streamlit as st import os import traceback from dotenv import load_dotenv import PyPDF2 import docx import markdown import re from datetime import datetime, timezone import uuid # import csv # Not strictly needed if only using HF Datasets for feedback import pandas as pd import plotly.express as px import io import tempfile import time from fpdf import FPDF import json from st_copy_to_clipboard import st_copy_to_clipboard # For copy to clipboard functionality from huggingface_hub import HfApi, hf_hub_download, HfFolder # For HF Datasets from huggingface_hub.utils import RepositoryNotFoundError, EntryNotFoundError # For HF Datasets # Load environment variables load_dotenv() # --- Constants related to HF Dataset --- HF_FEEDBACK_DATASET_REPO_ID = os.getenv("HF_FEEDBACK_DATASET_REPO_ID", "Krepselis/feedback_log") # Default if not in .env HF_FEEDBACK_FILENAME_IN_REPO = os.getenv("HF_FEEDBACK_FILENAME_IN_REPO", "feedback_log.parquet") # Default HF_TOKEN_FOR_DATASET = os.getenv("HF_FEEDBACK_DATASET_TOKEN") # Needs to be set for writing # --- Import your custom modules --- try: from cv_match import find_similar_jobs, generate_embedding_for_skills from extract_skills_from_cv_file import get_extracted_skills_from_file from cover_letter_generator import CoverLetterGenerator except ImportError as import_err: st.error(f"**Initialization Error:** Could not import custom modules: {import_err}") st.info("Ensure 'cv_match.py', 'extract_skills_from_cv_file.py', and 'cover_letter_generator.py' are in the app folder.") st.stop() except Exception as general_import_err: st.error(f"**Initialization Error:** Unexpected error importing custom modules: {general_import_err}") traceback.print_exc() st.stop() # Page config st.set_page_config( page_title="CV Job Matcher | Denmark", page_icon="🎯", layout="wide", initial_sidebar_state="expanded" ) # --- Constants and Normalization Data --- APP_DIR = os.path.dirname(os.path.abspath(__file__)) SIMILARITY_THRESHOLD = 40.0 # Default similarity threshold MAX_JOBS_TO_DISPLAY_PER_PAGE = 5 TOP_N_RESULTS_FROM_SEARCH = int(os.getenv('TOP_N_RESULTS_FOR_APP_QUERY', '200')) # How many to fetch initially CANONICAL_LANGUAGES_FOR_FILTER = ["English", "Danish", "German", "Spanish", "French", "Norwegian", "Swedish"] # --- Helper Functions --- def get_job_languages_from_metadata(job_match_dict): # Now expects the whole job_match dict extracted_languages_set = set() # Direct list of languages (if LLM extracts this way or from pre-processing) # This key 'language_requirements' comes from the LLM extraction process and is stored in Chroma metadata direct_langs = job_match_dict.get("language_requirements") # From LLM extraction (now top-level in job_match_dict) if isinstance(direct_langs, list): for lang_entry in direct_langs: if isinstance(lang_entry, str) and lang_entry.strip(): lang_name = lang_entry.strip() for canonical in CANONICAL_LANGUAGES_FOR_FILTER: if canonical.lower() == lang_name.lower(): extracted_languages_set.add(canonical) break elif isinstance(lang_entry, dict) and "language" in lang_entry: # if it's a list of dicts lang_name = lang_entry.get("language") if isinstance(lang_name, str) and lang_name.strip(): for canonical in CANONICAL_LANGUAGES_FOR_FILTER: if canonical.lower() == lang_name.strip().lower(): extracted_languages_set.add(canonical) break # Check for specific proficiency fields (e.g., lang_danish_proficiency) # These would also be top-level in job_match_dict if they came from Chroma metadata for key, value in job_match_dict.items(): if key.startswith("lang_") and key.endswith("_proficiency"): lang_name_from_key = key.replace("lang_", "").replace("_proficiency", "") for canonical in CANONICAL_LANGUAGES_FOR_FILTER: if canonical.lower() == lang_name_from_key.lower(): extracted_languages_set.add(canonical) break # Use detected ad language detected_ad_lang_raw = job_match_dict.get("detected_ad_language") # Assuming lowercase key if isinstance(detected_ad_lang_raw, str) and detected_ad_lang_raw.strip() and detected_ad_lang_raw.lower() != "unknown": for canonical in CANONICAL_LANGUAGES_FOR_FILTER: if canonical.lower() == detected_ad_lang_raw.strip().lower(): extracted_languages_set.add(canonical) break return sorted(list(extracted_languages_set)) @st.cache_resource def get_cover_letter_generator(): try: return CoverLetterGenerator() except Exception as e: st.error(f"Cover Letter Generator Error: {e}. Check OPENAI_API_KEY."); return None cover_letter_gen = get_cover_letter_generator() class PDF(FPDF): def header(self): pass def footer(self): pass def create_pdf_from_text(text_content): try: pdf = PDF(); pdf.add_page() font_path = os.path.join(APP_DIR, "DejaVuSans.ttf") font_name = "DejaVu" try: if os.path.exists(font_path): pdf.add_font(font_name, "", font_path, uni=True) else: raise RuntimeError(f"Font file {font_path} not found. Ensure it's in: {APP_DIR}") pdf.set_font(font_name, size=11) except RuntimeError: print(f"Warning: Custom font {font_name} at {font_path} not found. Falling back to Arial.") try: pdf.set_font("Arial", size=11) # Common fallback except RuntimeError: pdf.set_font("Helvetica", size=11) # Another common fallback # Ensure text is UTF-8 encoded for FPDF when using TrueType Unicode fonts encoded_text = text_content.encode('latin-1', 'replace').decode('latin-1') if font_name != "DejaVu" else text_content pdf.multi_cell(0, 5, encoded_text) # Use multi_cell for better text wrapping pdf_output_bytes = pdf.output(dest='S') if not pdf_output_bytes: st.error("PDF generation resulted in empty output."); return None return pdf_output_bytes except Exception as e: st.error(f"Error generating PDF: {e}"); print(f"PDF error: {traceback.format_exc()}"); return None def read_cv_file(uploaded_file): if not uploaded_file: return None try: file_name=uploaded_file.name; file_ext=os.path.splitext(file_name)[1].lower(); cv_text="" with st.spinner(f"Reading `{file_name}`..."): content_bytes = uploaded_file.getvalue() if file_ext=='.pdf': r=PyPDF2.PdfReader(io.BytesIO(content_bytes)); cv_text="".join([(p.extract_text() or "") for p in r.pages]) elif file_ext=='.docx': d=docx.Document(io.BytesIO(content_bytes)); cv_text="\n".join([p.text for p in d.paragraphs]) elif file_ext=='.md': h=markdown.markdown(content_bytes.decode("utf-8",errors="ignore")); cv_text=re.sub('<[^>]*>',' ',h).strip() elif file_ext=='.txt': cv_text=content_bytes.decode("utf-8",errors="ignore") else: st.error(f"Unsupported file type: `{file_ext}`."); return None final_text=cv_text.strip() if not final_text: st.warning("No text extracted from CV."); return None return final_text except Exception as e: st.error(f"Error reading '{uploaded_file.name}'."); print(f"Read error: {traceback.format_exc()}"); return None def initialize_hf_dataset_feedback_file(): if not HF_TOKEN_FOR_DATASET: print("ERROR: HF_TOKEN_FOR_DATASET not set. Cannot initialize HF Dataset for feedback.") # st.error("Feedback system (Dataset) not configured: Missing token.") # Avoid st calls here if run at startup return False try: hf_hub_download( repo_id=HF_FEEDBACK_DATASET_REPO_ID, filename=HF_FEEDBACK_FILENAME_IN_REPO, repo_type="dataset", token=HF_TOKEN_FOR_DATASET ) print(f"INFO: Feedback file '{HF_FEEDBACK_FILENAME_IN_REPO}' found in dataset '{HF_FEEDBACK_DATASET_REPO_ID}'.") return True except EntryNotFoundError: print(f"INFO: Feedback file '{HF_FEEDBACK_FILENAME_IN_REPO}' not found in dataset. Will be created on first feedback.") # Create an empty file with header header = ["timestamp", "session_id", "cv_upload_time", "job_chroma_id", "predicted_score", "rank_displayed", "rating"] empty_df = pd.DataFrame(columns=header) api = HfApi(token=HF_TOKEN_FOR_DATASET) with io.BytesIO() as parquet_buffer: empty_df.to_parquet(parquet_buffer, index=False) parquet_buffer.seek(0) try: api.upload_file( path_or_fileobj=parquet_buffer, path_in_repo=HF_FEEDBACK_FILENAME_IN_REPO, repo_id=HF_FEEDBACK_DATASET_REPO_ID, repo_type="dataset", commit_message="Initialize empty feedback log" ) print(f"INFO: Initialized empty feedback file '{HF_FEEDBACK_FILENAME_IN_REPO}' in dataset.") return True except Exception as e_upload: print(f"ERROR initializing empty feedback file in dataset: {e_upload}") return False except RepositoryNotFoundError: # st.error(f"Feedback Dataset Repo '{HF_FEEDBACK_DATASET_REPO_ID}' not found. Please create it on Hugging Face Hub.") print(f"ERROR: Feedback Dataset Repo '{HF_FEEDBACK_DATASET_REPO_ID}' not found.") return False except Exception as e: # st.error(f"Error checking feedback dataset: {e}") print(f"ERROR checking feedback dataset: {e}") traceback.print_exc() return False def record_feedback_hf_dataset(session_id_val, cv_upload_time_val, job_chroma_id_val, rating_val, predicted_score_val, rank_displayed_val): if not HF_TOKEN_FOR_DATASET: st.error("Feedback system (Dataset) not configured: Missing token."); return False api = HfApi(token=HF_TOKEN_FOR_DATASET) feedback_entry = { "timestamp": datetime.now(timezone.utc).isoformat(), "session_id": session_id_val or "", "cv_upload_time": cv_upload_time_val or "", "job_chroma_id": job_chroma_id_val or "", "predicted_score": predicted_score_val if predicted_score_val is not None else pd.NA, "rank_displayed": rank_displayed_val if rank_displayed_val is not None else pd.NA, "rating": rating_val or "" } new_feedback_df = pd.DataFrame([feedback_entry]) try: try: existing_file_path = hf_hub_download(repo_id=HF_FEEDBACK_DATASET_REPO_ID, filename=HF_FEEDBACK_FILENAME_IN_REPO, repo_type="dataset", token=HF_TOKEN_FOR_DATASET) existing_df = pd.read_parquet(existing_file_path); updated_df = pd.concat([existing_df, new_feedback_df], ignore_index=True) except EntryNotFoundError: print(f"INFO: '{HF_FEEDBACK_FILENAME_IN_REPO}' not found for append. Creating new."); updated_df = new_feedback_df except Exception as e_read: print(f"Error reading existing feedback parquet: {e_read}. Creating new one."); updated_df = new_feedback_df with io.BytesIO() as parquet_buffer: updated_df.to_parquet(parquet_buffer, index=False); parquet_buffer.seek(0) api.upload_file(path_or_fileobj=parquet_buffer, path_in_repo=HF_FEEDBACK_FILENAME_IN_REPO, repo_id=HF_FEEDBACK_DATASET_REPO_ID, repo_type="dataset", commit_message=f"Append feedback for job {job_chroma_id_val}") st.toast("Feedback saved to Dataset!", icon="πŸ’Ύ"); print(f"INFO: Recorded feedback to HF Dataset: {feedback_entry}"); return True except Exception as e: st.error(f"Error saving feedback to Dataset: {e}"); print(f"ERROR recording feedback to HF Dataset: {e}"); traceback.print_exc(); return False def load_and_process_feedback_hf_dataset(): internal_df_columns = ["ts", "sid", "cv_ts", "jid", "pred_score", "rank_disp", "rt"] default_result = {"aggregates": {"per_job": {}, "total_up": 0, "total_down": 0}, "dataframe": pd.DataFrame(columns=internal_df_columns)} if not HF_TOKEN_FOR_DATASET: print("DEBUG: load_and_process_feedback_hf_dataset - HF_TOKEN_FOR_DATASET not set."); return default_result try: print(f"DEBUG: Attempting to download {HF_FEEDBACK_FILENAME_IN_REPO} from {HF_FEEDBACK_DATASET_REPO_ID}") with st.spinner("Loading feedback data from Hugging Face Hub..."): feedback_file_path = hf_hub_download(repo_id=HF_FEEDBACK_DATASET_REPO_ID, filename=HF_FEEDBACK_FILENAME_IN_REPO, repo_type="dataset", token=HF_TOKEN_FOR_DATASET, force_download=True) df = pd.read_parquet(feedback_file_path) print(f"DEBUG: Pandas read_parquet successful. Shape: {df.shape}, Columns: {df.columns.tolist()}") if df.empty: print("DEBUG: DataFrame from HF Dataset is empty."); return default_result rename_map = {"timestamp": "ts", "session_id": "sid", "cv_upload_time": "cv_ts", "job_chroma_id": "jid", "predicted_score": "pred_score", "rank_displayed": "rank_disp", "rating": "rt"} for expected_col in rename_map.keys(): if expected_col not in df.columns: print(f"WARNING: Expected column '{expected_col}' not in Parquet. Adding NA."); df[expected_col] = pd.NA df_renamed = df.rename(columns=rename_map) total_up_votes = 0; total_down_votes = 0; per_job_feedback = {} if 'rt' in df_renamed.columns: total_up_votes = int(df_renamed['rt'].value_counts().get('up', 0)); total_down_votes = int(df_renamed['rt'].value_counts().get('down', 0)) if 'jid' in df_renamed.columns and 'rt' in df_renamed.columns: try: job_counts = df_renamed.groupby('jid')['rt'].value_counts().unstack(fill_value=0) if 'up' not in job_counts.columns: job_counts['up'] = 0 if 'down' not in job_counts.columns: job_counts['down'] = 0 per_job_feedback = job_counts.apply(lambda row: {"up": int(row['up']), "down": int(row['down'])}, axis=1).to_dict() except Exception as e_agg: print(f"DEBUG: Error during per-job aggregation: {e_agg}") aggregates = {"per_job": per_job_feedback, "total_up": total_up_votes, "total_down": total_down_votes} if 'ts' in df_renamed.columns: df_renamed['ts'] = pd.to_datetime(df_renamed['ts'], errors='coerce'); df_renamed.dropna(subset=['ts'], inplace=True) print(f"DEBUG: load_and_process_feedback_hf_dataset - Processed Aggregates: TotalUp={total_up_votes}, TotalDown={total_down_votes}") return {"aggregates": aggregates, "dataframe": df_renamed} except EntryNotFoundError: print(f"DEBUG: {HF_FEEDBACK_FILENAME_IN_REPO} not found in dataset '{HF_FEEDBACK_DATASET_REPO_ID}'. Initializing."); initialize_hf_dataset_feedback_file(); return default_result except RepositoryNotFoundError: st.error(f"Feedback Dataset Repo '{HF_FEEDBACK_DATASET_REPO_ID}' not found."); print(f"ERROR: Feedback Dataset Repo '{HF_FEEDBACK_DATASET_REPO_ID}' not found."); return default_result except Exception as e: print(f"DEBUG: Exception loading/processing feedback from HF Dataset: {e}"); traceback.print_exc(); st.warning(f"Could not process feedback from Dataset: {e}"); return default_result # --- Streamlit App State Initialization --- if 'session_id' not in st.session_state: st.session_state.session_id = str(uuid.uuid4()) if 'cv_upload_time' not in st.session_state: st.session_state.cv_upload_time = None if 'feedback_given_jobs' not in st.session_state: st.session_state.feedback_given_jobs = {} if 'cv_skills' not in st.session_state: st.session_state.cv_skills = None if 'generated_cover_letters' not in st.session_state: st.session_state.generated_cover_letters = {} if 'all_job_matches_cache' not in st.session_state: st.session_state.all_job_matches_cache = None if 'cv_text_cache' not in st.session_state: st.session_state.cv_text_cache = None if 'feedback_loaded' not in st.session_state: feedback_result = load_and_process_feedback_hf_dataset() st.session_state.feedback_aggregates = feedback_result["aggregates"] st.session_state.feedback_df = feedback_result["dataframe"] st.session_state.feedback_loaded = True # --- App Header & Intro --- st.title("πŸ‘¨β€πŸ’ΌπŸ‡©πŸ‡° CV Job Matcher") st.subheader("Unlock Your Next Career Move in Denmark!") st.markdown("Upload your CV, and our AI will find jobs that truly match your skills, help you understand why, and even draft a cover letter.") st.markdown("---") # --- Prerequisite Checks --- if not all([os.getenv('EMBEDDING_API_URL'), os.getenv('CHROMA_HOST'), os.getenv('CHROMA_PORT'), os.getenv('CHROMA_COLLECTION')]): st.error("Backend services are not fully configured. Please check secrets. The app cannot function without them.") st.stop() if not os.getenv("OPENAI_API_KEY"): st.warning("OpenAI API key not found. Cover letter generation will be disabled.", icon="πŸ”’") cover_letter_gen = None # --- Main App Layout --- with st.sidebar: st.header("πŸš€ Get Started") uploaded_file = st.file_uploader("1. Upload Your CV", type=['pdf', 'docx', 'txt', 'md'], key="cv_uploader_key", help="Supports PDF, DOCX, Markdown, and TXT files.", on_change=lambda: st.session_state.update(all_job_matches_cache=None, generated_cover_letters={}, cv_text_cache=None, cv_skills=None, feedback_given_jobs={})) st.markdown("---") st.header("πŸ” Filter Job Matches") unique_locations_options = []; unique_categories_options = [] if st.session_state.all_job_matches_cache: unique_locations_options = sorted(list(set(job.get('area', 'N/A') for job in st.session_state.all_job_matches_cache if job.get('area')))) raw_categories_from_cache = [job.get('category') for job in st.session_state.all_job_matches_cache] unique_categories_options = sorted(list(set(cat for cat in raw_categories_from_cache if isinstance(cat, str) and cat.strip()))) selected_locations = st.multiselect("Job Locations (Area)", options=unique_locations_options, placeholder="Any Location" if unique_locations_options else "Upload CV") selected_categories = st.multiselect("Job Categories", options=unique_categories_options, placeholder="Any Category" if unique_categories_options else "Upload CV") selected_languages = st.multiselect("Required Languages", options=CANONICAL_LANGUAGES_FOR_FILTER, placeholder="Any Language") st.markdown("---") st.info("Tip: Upload CV first, then apply filters.") st.caption(f"Displays top {MAX_JOBS_TO_DISPLAY_PER_PAGE} matches after filters from up to {TOP_N_RESULTS_FROM_SEARCH} initial results.") placeholder_processing_status = st.empty() if uploaded_file is not None: if st.session_state.all_job_matches_cache is None: with placeholder_processing_status.container(): with st.spinner(f"Analyzing `{uploaded_file.name}`... This might take a moment."): st.session_state.cv_upload_time = datetime.now(timezone.utc).isoformat() st.session_state.feedback_given_jobs = {} st.session_state.generated_cover_letters = {} cv_text = read_cv_file(uploaded_file) st.session_state.cv_text_cache = cv_text if cv_text: with st.spinner("πŸ€– Extracting skills from your CV..."): with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[1]) as tmp_file: tmp_file.write(uploaded_file.getvalue()); temp_cv_path = tmp_file.name cv_skills_list = get_extracted_skills_from_file(temp_cv_path) if os.path.exists(temp_cv_path): os.unlink(temp_cv_path) if not cv_skills_list: placeholder_processing_status.error("Could not extract skills from CV."); st.stop() st.session_state.cv_skills = cv_skills_list if len(cv_skills_list) <= 50 : with st.expander("πŸ’‘ View Extracted CV Skills", expanded=False): st.caption(f"{len(cv_skills_list)} skills: {', '.join(cv_skills_list)}") else: st.sidebar.caption(f"Extracted {len(cv_skills_list)} skills from CV.") with st.spinner("🧬 Generating CV skills embedding..."): cv_skill_embedding_vec = generate_embedding_for_skills(cv_skills_list) if cv_skill_embedding_vec is None: placeholder_processing_status.error("Could not generate CV embedding."); st.stop() with st.spinner('🧠 Searching for matching jobs in database...'): # CORRECTED CALL TO find_similar_jobs AND UNPACKING matches_list, search_status_msg = find_similar_jobs( cv_skills=cv_skills_list, cv_embedding=cv_skill_embedding_vec, top_n=TOP_N_RESULTS_FROM_SEARCH, active_only=True # Use active_only, not filter_active_only ) st.session_state.all_job_matches_cache = matches_list if matches_list is not None else [] print(f"Search status from cv_match: {search_status_msg}") else: placeholder_processing_status.error("Could not read CV content."); st.session_state.all_job_matches_cache = [] if st.session_state.all_job_matches_cache is not None: placeholder_processing_status.success(f"CV processing complete! Found {len(st.session_state.all_job_matches_cache)} potential matches. Filters updated.") time.sleep(1); placeholder_processing_status.empty(); st.rerun() else: placeholder_processing_status.warning("CV processing finished, but no initial matches found or an error occurred.") if st.session_state.all_job_matches_cache is not None: current_matches_to_filter = list(st.session_state.all_job_matches_cache) if selected_locations: current_matches_to_filter = [job for job in current_matches_to_filter if job.get('area') in selected_locations] if selected_categories: current_matches_to_filter = [job for job in current_matches_to_filter if job.get('category') in selected_categories] if selected_languages: current_matches_to_filter = [job for job in current_matches_to_filter if any(lang in selected_languages for lang in get_job_languages_from_metadata(job))] final_display_matches = [j for j in current_matches_to_filter if isinstance(j.get('score'), (int, float)) and j.get('score', 0) >= SIMILARITY_THRESHOLD] final_display_matches.sort(key=lambda x: x.get('score', 0), reverse=True) final_display_matches = final_display_matches[:MAX_JOBS_TO_DISPLAY_PER_PAGE] tab_results, tab_feedback_analytics = st.tabs(["🎯 Matching Jobs", "πŸ“Š Feedback Analytics"]) with tab_results: if not uploaded_file: st.info("πŸ‘‹ Upload your CV using the sidebar to find matching jobs!") elif not st.session_state.all_job_matches_cache and uploaded_file: st.warning("Initial job search yielded no results or an error occurred.", icon="🀷") if final_display_matches: st.success(f"Displaying top {len(final_display_matches)} of {len(current_matches_to_filter)} filtered matches (Score β‰₯ {SIMILARITY_THRESHOLD:.0f}%). Total initial matches: {len(st.session_state.all_job_matches_cache)}") for i, job_match in enumerate(final_display_matches): job_unique_id = job_match.get('chroma_id', job_match.get('job_id', f"job_fallback_{i}_{uuid.uuid4()}")) job_title = job_match.get('title', 'N/A') job_company = job_match.get('company', 'N/A') job_area_display = job_match.get('area', 'N/A') job_category_display = job_match.get('category', 'N/A') job_status = job_match.get('status', 'unknown').capitalize() job_url_primary = job_match.get('application_url', job_match.get('url', '#')) job_score = job_match.get('score', 0.0) contributing_skills = job_match.get('contributing_skills', []) job_description_text = job_match.get('document', '') # Use 'document' as per updated cv_match.py job_languages_display = get_job_languages_from_metadata(job_match) job_feedback_stats = st.session_state.feedback_aggregates["per_job"].get(job_unique_id, {"up": 0, "down": 0}) with st.container(border=True): main_cols = st.columns([5, 2]) with main_cols[0]: st.markdown(f"#### {i+1}. {job_title}") st.caption(f"**🏒 Company:** {job_company} | **πŸ“ Location:** {job_area_display} | **🏷️ Category:** {job_category_display} | **🚦 Status:** `{job_status}`") if job_languages_display: st.caption(f"**πŸ—£οΈ Languages:** {', '.join(job_languages_display)}") with st.expander("πŸ”¬ See Matching Skills Analysis", expanded=False): if contributing_skills: st.markdown("**Key CV skills contributing to this match:**") for skill_text, skill_sim_score in contributing_skills: st.markdown(f"- `{skill_text}` (Contribution: {skill_sim_score:.2f})") elif not contributing_skills: st.caption("Skill contribution analysis not available or no strong contributing skills.") action_cols = st.columns([1, 1]) with action_cols[0]: if job_url_primary and job_url_primary != '#': st.link_button("Apply Now πŸš€", url=job_url_primary, type="primary", use_container_width=True) else: st.button("Apply Now", disabled=True, use_container_width=True, help="Application link unavailable.") with action_cols[1]: cl_button_key = f"cl_btn_{job_unique_id}" disable_cl = not (cover_letter_gen and job_description_text and st.session_state.get('cv_text_cache')) cl_help_text = "Job/CV text missing." if not (job_description_text and st.session_state.get('cv_text_cache')) else "OpenAI API key missing." if not cover_letter_gen else "Draft cover letter" if st.button("Draft Cover Letter πŸ“„", key=cl_button_key, use_container_width=True, disabled=disable_cl, help=cl_help_text): with st.spinner("πŸ–‹οΈ Drafting cover letter..."): gen_letter = cover_letter_gen.generate_cover_letter(job_description_text, st.session_state.cv_text_cache) st.session_state.generated_cover_letters[job_unique_id] = gen_letter if (gen_letter and not gen_letter.lower().startswith("error:")) else "Error: Could not generate cover letter." if job_unique_id in st.session_state.generated_cover_letters: letter_content = st.session_state.generated_cover_letters[job_unique_id] if not letter_content.lower().startswith("error:"): st.markdown("**Generated Cover Letter Draft:**") st.text_area("", value=letter_content, height=250, key=f"cl_txt_{job_unique_id}", help="AI draft. Review & edit carefully.") st_copy_to_clipboard(letter_content, key=f"copy_widget_{job_unique_id}") else: st.error(letter_content, icon="⚠️") with main_cols[1]: st.metric("Match Score", f"{job_score:.1f}%", help="How well your CV skills match this job's profile.") st.markdown("**Rate this match:**") fb_key_suffix = f"fb_{job_unique_id}_{st.session_state.cv_upload_time or str(time.time())}" current_rating = st.session_state.feedback_given_jobs.get(job_unique_id) fb_cols = st.columns(2) with fb_cols[0]: if st.button("πŸ‘", key=f"up_{fb_key_suffix}", disabled=(current_rating is not None), use_container_width=True, type="primary" if current_rating=="up" else "secondary", help="Good match!"): if record_feedback_hf_dataset(st.session_state.session_id, st.session_state.cv_upload_time, job_unique_id, "up", predicted_score_val=job_score, rank_displayed_val=str(i+1)): st.session_state.feedback_given_jobs[job_unique_id] = "up"; st.rerun() with fb_cols[1]: if st.button("πŸ‘Ž", key=f"down_{fb_key_suffix}", disabled=(current_rating is not None), use_container_width=True, type="primary" if current_rating=="down" else "secondary", help="Not a good match."): if record_feedback_hf_dataset(st.session_state.session_id, st.session_state.cv_upload_time, job_unique_id, "down", predicted_score_val=job_score, rank_displayed_val=str(i+1)): st.session_state.feedback_given_jobs[job_unique_id] = "down"; st.rerun() st.caption(f"Community Votes: πŸ‘{job_feedback_stats.get('up', 0)} | πŸ‘Ž{job_feedback_stats.get('down', 0)}") if current_rating: st.success(f"You rated: {'πŸ‘' if current_rating == 'up' else 'πŸ‘Ž'}") elif st.session_state.all_job_matches_cache is not None and not final_display_matches and (selected_locations or selected_categories or selected_languages): st.info("No jobs match your current filter criteria.", icon="🧐") elif st.session_state.all_job_matches_cache is not None and not final_display_matches and uploaded_file: st.info(f"Found {len(st.session_state.all_job_matches_cache)} potential matches, but none scored above {SIMILARITY_THRESHOLD:.0f}%. Try adjusting filters or uploading a different CV.", icon="πŸ“‰") with tab_feedback_analytics: st.header("πŸ“ˆ Community Feedback Analytics") st.markdown("Insights from user feedback (powered by Hugging Face Datasets). Your ratings help improve this overview!") st.divider() feedback_aggregates = st.session_state.get("feedback_aggregates", {"total_up": 0, "total_down": 0, "per_job": {}}) feedback_df_display = st.session_state.get("feedback_df", pd.DataFrame()) total_up = feedback_aggregates.get("total_up", 0); total_down = feedback_aggregates.get("total_down", 0) total_votes = total_up + total_down if st.button("πŸ”„ Refresh Feedback Data"): with st.spinner("Reloading feedback from Hugging Face Hub..."): feedback_result_refresh = load_and_process_feedback_hf_dataset() st.session_state.feedback_aggregates = feedback_result_refresh["aggregates"] st.session_state.feedback_df = feedback_result_refresh["dataframe"] total_up = st.session_state.feedback_aggregates.get("total_up", 0) # Recalculate after refresh total_down = st.session_state.feedback_aggregates.get("total_down", 0) total_votes = total_up + total_down feedback_df_display = st.session_state.feedback_df st.rerun() if total_votes > 0: st.markdown("**Key Metrics**"); satisfaction_score = (total_up / total_votes) * 100 if total_votes > 0 else 0 m_col1, m_col2 = st.columns(2) m_col1.metric("Total Feedback Votes", total_votes); m_col2.metric("Overall Satisfaction", f"{satisfaction_score:.1f}%") st.divider(); st.markdown("**Feedback Distribution**") pie_data = pd.DataFrame({'Rating Type': ['Good Matches πŸ‘', 'Bad Matches πŸ‘Ž'], 'Votes': [total_up, total_down]}) try: fig_pie = px.pie(pie_data, values='Votes', names='Rating Type', color='Rating Type', color_discrete_map={'Good Matches πŸ‘':'#2ECC71', 'Bad Matches πŸ‘Ž':'#E74C3C'}, hole=0.3) fig_pie.update_layout(legend_title_text='Feedback', margin=dict(t=20,b=20,l=0,r=0)); fig_pie.update_traces(textposition='inside', textinfo='percent+label') st.plotly_chart(fig_pie, use_container_width=True) except Exception as plot_err: st.error(f"Pie chart error: {plot_err}") st.divider(); st.markdown("**Feedback Trend Over Time (Daily)**") if (feedback_df_display is not None and not feedback_df_display.empty and 'ts' in feedback_df_display.columns and pd.api.types.is_datetime64_any_dtype(feedback_df_display['ts'])): try: daily_fb = feedback_df_display.set_index('ts').resample('D')['rt'].value_counts().unstack(fill_value=0) if 'up' not in daily_fb.columns: daily_fb['up'] = 0 if 'down' not in daily_fb.columns: daily_fb['down'] = 0 daily_fb = daily_fb.rename(columns={'up': 'Good πŸ‘', 'down': 'Bad πŸ‘Ž'}) if not daily_fb.empty and (daily_fb['Good πŸ‘'].sum() > 0 or daily_fb['Bad πŸ‘Ž'].sum() > 0): fig_time = px.area(daily_fb, y=['Good πŸ‘', 'Bad πŸ‘Ž'], labels={"ts": "Date", "value": "Ratings", "variable": "Rating"}, color_discrete_map={'Good πŸ‘': '#2ECC71', 'Bad πŸ‘Ž': '#E74C3C'}, markers=True) fig_time.update_layout(hovermode="x unified", legend_title_text='Rating', yaxis_title="Number of Ratings") st.plotly_chart(fig_time, use_container_width=True) else: st.info("Not enough data for trend chart after resampling.", icon="πŸ“ˆ") except Exception as time_plot_err: st.error(f"Trend chart error: {time_plot_err}") else: st.info("Not enough timestamp data or DataFrame is empty for trend chart.", icon="⏳") else: st.info("πŸ“Š No feedback data recorded in the Dataset yet. Be the first to rate some matches!", icon="✏️") # Footer st.markdown("---") st.caption("CV Job Matcher | An MLOps Project | For Educational Purposes")