Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import openai | |
| import fitz # PyMuPDF | |
| import torch | |
| from transformers import pipeline, BloomForCausalLM, BloomTokenizerFast | |
| from huggingface_hub import login | |
| import requests | |
| import os | |
| from models import evaluate_with_gpt,evaluate_with_bloom,evaluate_with_llama | |
| def extract_text_from_pdf(pdf_file): | |
| document = fitz.open(pdf_file) | |
| text = "" | |
| for page_num in range(len(document)): | |
| page = document.load_page(page_num) | |
| text += page.get_text() | |
| return text | |
| def evaluate_all_models(pdf_file, job_description): | |
| gpt_result = evaluate_with_gpt(pdf_file, job_description) | |
| # gemma_result = evaluate_with_gemma(pdf_file, job_description) | |
| bloom_result = evaluate_with_bloom(pdf_file, job_description) | |
| jabir_result = evaluate_with_jabir(pdf_file, job_description) | |
| llama_result=evaluate_with_llama(pdf_file, job_description) | |
| return f"GPT-4o Result:\n{gpt_result}\n\nBloom Result:\n{bloom_result}\n\njabir Result:\n{jabir_result}\n\nllama Result:\n{llam_result}" | |
| # return f"\n\nllama Result:\n{llam_result}" | |
| iface = gr.Interface( | |
| fn=lambda pdf, jd, model: evaluate_with_gpt(pdf, jd) if model == "GPT-4o" else evaluate_with_bloom(pdf, jd) if model == "Bloom" else evaluate_with_jabir(pdf, jd) if model == "jabir" else evaluate_with_llama(pdf, jd) if model == "llama" else evaluate_all_models(pdf, jd), | |
| # fn=lambda pdf, jd, model: evaluate_with_llama(pdf, jd), | |
| inputs=[ | |
| gr.File(label="Upload Resume PDF"), | |
| gr.Textbox(lines=10, label="Job Description"), | |
| gr.Radio(choices=["GPT-4o", "Bloom", "jabir","llama", "All"], label="Choose Model") | |
| # gr.Radio(choices=["llama"], label="Choose Model") | |
| ], | |
| outputs="text", | |
| title="Resume Evaluator" | |
| ) | |
| iface.launch() | |