File size: 3,400 Bytes
4f766ea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
from docx import Document
from PyPDF2 import PdfReader

# Load the Hugging Face model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")

def analyze_resume(file):
    try:
        # Extract text from the uploaded file
        if file.name.endswith(".txt"):
            resume_content = file.read().decode("utf-8")
        elif file.name.endswith(".docx"):
            doc = Document(file)
            resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
        elif file.name.endswith(".pdf"):
            reader = PdfReader(file)
            resume_content = "\n".join([page.extract_text() for page in reader.pages])
        else:
            return "Unsupported file format. Please upload a .txt, .docx, or .pdf file."

        # Prepare the input for the model
        input_text = (
            f"You are an expert resume reviewer. Analyze the following resume and provide detailed feedback "
            f"on its strengths, weaknesses, and areas for improvement. Ensure your feedback is professional and actionable:\n\n{resume_content}"
        )
        inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)

        # Generate feedback using the model
        outputs = model.generate(
            inputs,
            max_new_tokens=300,
            num_return_sequences=1,
            temperature=0.7,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id
        )
        feedback = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return feedback
    except Exception as e:
        return f"Error analyzing resume: {e}"

def generate_questions(role):
    try:
        # Prepare the input for the model
        input_text = f"Generate 5 interview questions for a {role}."
        inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)

        # Generate questions using the model
        outputs = model.generate(
            inputs,
            max_new_tokens=300,
            num_return_sequences=1,
            temperature=0.7,
            do_sample=True,
            pad_token_id=tokenizer.eos_token_id
        )
        questions = tokenizer.decode(outputs[0], skip_special_tokens=True).split("\n")
        return "\n".join([q.strip() for q in questions if q.strip()][:5])
    except Exception as e:
        return f"Error generating questions: {e}"

# Define the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# AI-Powered Interview System")
    gr.Markdown("### Resume Analysis")
    resume_file = gr.File(label="Upload Resume")
    resume_feedback = gr.Textbox(label="Feedback", lines=10, interactive=False)
    analyze_button = gr.Button("Analyze Resume")
    analyze_button.click(analyze_resume, inputs=resume_file, outputs=resume_feedback)

    gr.Markdown("### Interview Question Generator")
    role_input = gr.Textbox(label="Enter Job Role")
    questions_output = gr.Textbox(label="Generated Questions", lines=10, interactive=False)
    generate_button = gr.Button("Generate Questions")
    generate_button.click(generate_questions, inputs=role_input, outputs=questions_output)

# Run the Gradio app
if __name__ == "__main__":
    demo.launch()