Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- Requirements.txt +25 -0
- app.py +82 -0
Requirements.txt
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Create an AI powered Interview System, to conduct real time interviews, dynamically genrate questions based on selected job roles, evaluate candidate responses and provide feedback.
|
| 2 |
+
# The scope and features of this application should be user authentication with a secure login system for candidates, allow the candidates to choose from different job roles.
|
| 3 |
+
# The system should be able to generate questions dynamically based on the job role selected by the candidate.
|
| 4 |
+
# The system should ask questions one by one, analyze responses using NLP, give dual time feedback based on candidate responses. And the admin should be able to add or modify interview questions and topics for different job roles.
|
| 5 |
+
# Implement a scoring sytem to access candidates responses, should be able to get audio and video based responses.
|
| 6 |
+
# Tech stack used should be python, databse should be mongoDB.
|
| 7 |
+
# For deployment, the application must be hosted and accessible online.
|
| 8 |
+
# The user experience should be intuitive and professional UI-UX.
|
| 9 |
+
# Implement an AI based resume analysis feature to provide feedback before the interview and candidates to receive AI generated sugestions on improving their responses.
|
| 10 |
+
|
| 11 |
+
PyQt5==5.15.9 # For GUI development
|
| 12 |
+
pymongo==4.5.0 # For MongoDB interaction
|
| 13 |
+
spacy==3.6.0 # For NLP processing
|
| 14 |
+
transformers==4.34.0 # For Hugging Face GPT models
|
| 15 |
+
speechrecognition==3.8.1 # For audio response processing
|
| 16 |
+
opencv-python==4.8.1.78 # For video response processing
|
| 17 |
+
pyinstaller==5.13.0 # For packaging the application
|
| 18 |
+
openai>=1.0.0 # For OpenAI API integration
|
| 19 |
+
python-docx==0.8.11 # For handling .docx files
|
| 20 |
+
PyPDF2==3.0.0 # For handling .pdf files
|
| 21 |
+
torch>=1.9.0 # For PyTorch backend
|
| 22 |
+
# Upgrade pip to avoid version conflicts
|
| 23 |
+
pydub==0.25.1 # For audio processing
|
| 24 |
+
moviepy==1.0.3 # For video processing
|
| 25 |
+
gradio==3.41.2 # For creating a web-based interface
|
app.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
+
from docx import Document
|
| 4 |
+
from PyPDF2 import PdfReader
|
| 5 |
+
|
| 6 |
+
# Load the Hugging Face model and tokenizer
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 9 |
+
|
| 10 |
+
def analyze_resume(file):
|
| 11 |
+
try:
|
| 12 |
+
# Extract text from the uploaded file
|
| 13 |
+
if file.name.endswith(".txt"):
|
| 14 |
+
resume_content = file.read().decode("utf-8")
|
| 15 |
+
elif file.name.endswith(".docx"):
|
| 16 |
+
doc = Document(file)
|
| 17 |
+
resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
|
| 18 |
+
elif file.name.endswith(".pdf"):
|
| 19 |
+
reader = PdfReader(file)
|
| 20 |
+
resume_content = "\n".join([page.extract_text() for page in reader.pages])
|
| 21 |
+
else:
|
| 22 |
+
return "Unsupported file format. Please upload a .txt, .docx, or .pdf file."
|
| 23 |
+
|
| 24 |
+
# Prepare the input for the model
|
| 25 |
+
input_text = (
|
| 26 |
+
f"You are an expert resume reviewer. Analyze the following resume and provide detailed feedback "
|
| 27 |
+
f"on its strengths, weaknesses, and areas for improvement. Ensure your feedback is professional and actionable:\n\n{resume_content}"
|
| 28 |
+
)
|
| 29 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
|
| 30 |
+
|
| 31 |
+
# Generate feedback using the model
|
| 32 |
+
outputs = model.generate(
|
| 33 |
+
inputs,
|
| 34 |
+
max_new_tokens=300,
|
| 35 |
+
num_return_sequences=1,
|
| 36 |
+
temperature=0.7,
|
| 37 |
+
do_sample=True,
|
| 38 |
+
pad_token_id=tokenizer.eos_token_id
|
| 39 |
+
)
|
| 40 |
+
feedback = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 41 |
+
return feedback
|
| 42 |
+
except Exception as e:
|
| 43 |
+
return f"Error analyzing resume: {e}"
|
| 44 |
+
|
| 45 |
+
def generate_questions(role):
|
| 46 |
+
try:
|
| 47 |
+
# Prepare the input for the model
|
| 48 |
+
input_text = f"Generate 5 interview questions for a {role}."
|
| 49 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
|
| 50 |
+
|
| 51 |
+
# Generate questions using the model
|
| 52 |
+
outputs = model.generate(
|
| 53 |
+
inputs,
|
| 54 |
+
max_new_tokens=300,
|
| 55 |
+
num_return_sequences=1,
|
| 56 |
+
temperature=0.7,
|
| 57 |
+
do_sample=True,
|
| 58 |
+
pad_token_id=tokenizer.eos_token_id
|
| 59 |
+
)
|
| 60 |
+
questions = tokenizer.decode(outputs[0], skip_special_tokens=True).split("\n")
|
| 61 |
+
return "\n".join([q.strip() for q in questions if q.strip()][:5])
|
| 62 |
+
except Exception as e:
|
| 63 |
+
return f"Error generating questions: {e}"
|
| 64 |
+
|
| 65 |
+
# Define the Gradio interface
|
| 66 |
+
with gr.Blocks() as demo:
|
| 67 |
+
gr.Markdown("# AI-Powered Interview System")
|
| 68 |
+
gr.Markdown("### Resume Analysis")
|
| 69 |
+
resume_file = gr.File(label="Upload Resume")
|
| 70 |
+
resume_feedback = gr.Textbox(label="Feedback", lines=10, interactive=False)
|
| 71 |
+
analyze_button = gr.Button("Analyze Resume")
|
| 72 |
+
analyze_button.click(analyze_resume, inputs=resume_file, outputs=resume_feedback)
|
| 73 |
+
|
| 74 |
+
gr.Markdown("### Interview Question Generator")
|
| 75 |
+
role_input = gr.Textbox(label="Enter Job Role")
|
| 76 |
+
questions_output = gr.Textbox(label="Generated Questions", lines=10, interactive=False)
|
| 77 |
+
generate_button = gr.Button("Generate Questions")
|
| 78 |
+
generate_button.click(generate_questions, inputs=role_input, outputs=questions_output)
|
| 79 |
+
|
| 80 |
+
# Run the Gradio app
|
| 81 |
+
if __name__ == "__main__":
|
| 82 |
+
demo.launch()
|