AI-Interviewer / app.py
Spooke's picture
Update app.py
4f766ea verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from docx import Document
from PyPDF2 import PdfReader
# Load the Hugging Face model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
def analyze_resume(file):
try:
# Extract text from the uploaded file
if file.name.endswith(".txt"):
resume_content = file.read().decode("utf-8")
elif file.name.endswith(".docx"):
doc = Document(file)
resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
elif file.name.endswith(".pdf"):
reader = PdfReader(file)
resume_content = "\n".join([page.extract_text() for page in reader.pages])
else:
return "Unsupported file format. Please upload a .txt, .docx, or .pdf file."
# Prepare the input for the model
input_text = (
f"You are an expert resume reviewer. Analyze the following resume and provide detailed feedback "
f"on its strengths, weaknesses, and areas for improvement. Ensure your feedback is professional and actionable:\n\n{resume_content}"
)
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
# Generate feedback using the model
outputs = model.generate(
inputs,
max_new_tokens=300,
num_return_sequences=1,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
feedback = tokenizer.decode(outputs[0], skip_special_tokens=True)
return feedback
except Exception as e:
return f"Error analyzing resume: {e}"
def generate_questions(role):
try:
# Prepare the input for the model
input_text = f"Generate 5 interview questions for a {role}."
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
# Generate questions using the model
outputs = model.generate(
inputs,
max_new_tokens=300,
num_return_sequences=1,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
questions = tokenizer.decode(outputs[0], skip_special_tokens=True).split("\n")
return "\n".join([q.strip() for q in questions if q.strip()][:5])
except Exception as e:
return f"Error generating questions: {e}"
# Define the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# AI-Powered Interview System")
gr.Markdown("### Resume Analysis")
resume_file = gr.File(label="Upload Resume")
resume_feedback = gr.Textbox(label="Feedback", lines=10, interactive=False)
analyze_button = gr.Button("Analyze Resume")
analyze_button.click(analyze_resume, inputs=resume_file, outputs=resume_feedback)
gr.Markdown("### Interview Question Generator")
role_input = gr.Textbox(label="Enter Job Role")
questions_output = gr.Textbox(label="Generated Questions", lines=10, interactive=False)
generate_button = gr.Button("Generate Questions")
generate_button.click(generate_questions, inputs=role_input, outputs=questions_output)
# Run the Gradio app
if __name__ == "__main__":
demo.launch()