Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| import tempfile | |
| import json | |
| import gradio as gr | |
| from resume_extractor import ResumeExtractor | |
| from job_description_extractor import JobDescriptionExtractor | |
| from model_trainer import ModelTrainer | |
| from comparison_utils import ( | |
| compare_with_chatgpt_job_title, | |
| compare_with_chatgpt_education, | |
| compare_with_chatgpt_location, | |
| compare_age_range_with_description | |
| ) | |
| from synthetic_data import create_synthetic_data | |
| def main(resume_text, job_description): | |
| openai_api_key = 'sk-proj-bC6H6QrP6DUqHkn5vOkYT3BlbkFJsSyvL4Bc9c3UEbHrsPMj' | |
| ner_model_name_or_path = "NLPclass/Named-entity-recognition" | |
| skill_model_name_or_path = "GalalEwida/lm-ner-skills-recognition" | |
| resume_extractor = ResumeExtractor(ner_model_name_or_path, openai_api_key) | |
| job_description_extractor = JobDescriptionExtractor(openai_api_key) | |
| full_name, loc, age, skills, education_resume, title_job_resume = resume_extractor.extract_resume_info(resume_text, skill_model_name_or_path) | |
| job_skills, education_job, title_job, location, age_DS = job_description_extractor.extract_job_info(job_description, skill_model_name_or_path) | |
| education_match = compare_with_chatgpt_education(education_resume, education_job, openai_api_key) | |
| title_job_match = compare_with_chatgpt_job_title(title_job_resume, title_job, openai_api_key) | |
| title_loc_match = compare_with_chatgpt_location(loc, location, openai_api_key) | |
| title_age_match = compare_age_range_with_description(age, age_DS, openai_api_key) | |
| synthetic_data = create_synthetic_data(job_skills, education_job, title_job, location, age_DS) | |
| synthetic_data.to_csv('synthetic_data.csv') | |
| model_trainer = ModelTrainer(synthetic_data) | |
| best_model = model_trainer.train_models() | |
| input_data = {skill: 1 if skill in skills else 0 for skill in job_skills} | |
| input_data[education_job] = education_match | |
| input_data[title_job] = title_job_match | |
| input_data[location] = title_loc_match | |
| input_data[age_DS] = title_age_match | |
| input_df = pd.DataFrame([input_data]) | |
| input_df.to_csv('input_df.csv') | |
| predicted_target = best_model.predict(input_df) | |
| return { | |
| "full_name": full_name, | |
| "location": loc, | |
| "age": age, | |
| "age_DS": age_DS, | |
| "skills": skills, | |
| "education_resume": education_resume, | |
| "title_job_resume": title_job_resume, | |
| "job_skills": job_skills, | |
| "education_job": education_job, | |
| "title_job": title_job, | |
| "location_job": location, | |
| "predicted_target": predicted_target[0] | |
| } | |
| def process_text(resume_text, job_description_text): | |
| try: | |
| output = main(resume_text, job_description_text) | |
| # ذخیره خروجی JSON در یک فایل موقت | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".json", mode='w', encoding='utf-8') as tmp_file: | |
| json.dump(output, tmp_file, ensure_ascii=False, indent=4) | |
| return tmp_file.name | |
| except Exception as e: | |
| # ایجاد یک فایل متنی موقت برای پیام خطا | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w', encoding='utf-8') as tmp_file: | |
| tmp_file.write(f"Error: {str(e)}") | |
| return tmp_file.name | |
| iface = gr.Interface( | |
| fn=process_text, | |
| inputs=[gr.Textbox(lines=10, placeholder="لطفاً رزومه خود را وارد کنید..."), | |
| gr.Textbox(lines=10, placeholder="لطفاً توضیحات شغلی را وارد کنید...")], | |
| outputs=gr.File(label="دانلود فایل JSON"), | |
| title="پردازش رزومه و توضیحات شغلی", | |
| description="این ابزار رزومه و توضیحات شغلی شما را پردازش کرده و امتیازات مشابهت را محاسبه میکند." | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch() |