Krish30 commited on
Commit
a2aee01
·
verified ·
1 Parent(s): c04ab19

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +144 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ import google.generativeai as genai
4
+ import re
5
+
6
+ # Configure the Generative AI API
7
+ genai.configure(api_key="AIzaSyCtN-Ontp3bOU0g209_dXbDCMCfCULMFCQ")
8
+
9
+ # Updated Prompt Template
10
+ input_prompt = """
11
+ You are an AI question generator. Generate a technical mock test with 30 questions based on the following instructions:
12
+ 1. Analyze the resume to identify the candidate's skills, experience, projects, and certifications.
13
+ 2. Generate questions that thoroughly test the candidate's knowledge and expertise in these areas.
14
+ 3. Analyze the job description to extract key requirements and generate questions that evaluate whether the candidate meets these requirements.
15
+ 4. Ensure the questions assess the candidate's capability and alignment with the job's expectations, aiding the hiring team in evaluating the validity of the candidate's claims.
16
+ Resume:
17
+ {resume_text}
18
+ Job Description:
19
+ {jd_text}
20
+ """
21
+
22
+ answer_prompt_template = """
23
+ You are an AI evaluator. Below are questions and a student's answers. Score each answer on a scale of 0 to 1, where 1 is completely correct and 0 is incorrect. Provide the correct answer for each question as well.
24
+ Questions and Answers:
25
+ {questions_and_answers}
26
+ Provide your response in the following format:
27
+ 1. [Score] - [Probable Correct Answer]
28
+ 2. [Score] - [Probable Correct Answer]
29
+ ...
30
+ """
31
+
32
+ # Function to extract text from PDF
33
+ def input_pdf_text(file):
34
+ try:
35
+ pdf = PdfReader(file)
36
+ text = "".join(page.extract_text() for page in pdf.pages)
37
+ return text
38
+ except Exception as e:
39
+ return f"Error extracting text: {e}"
40
+
41
+ # Function to generate questions using Generative AI
42
+ def get_gemini_response(prompt):
43
+ model = genai.GenerativeModel('gemini-pro')
44
+ response = model.generate_content(prompt)
45
+ if hasattr(response.candidates[0], "content") and hasattr(response.candidates[0].content, "parts"):
46
+ return response.candidates[0].content.parts[0].text
47
+ else:
48
+ return None
49
+
50
+ # Initialize session state for persistence
51
+ if "questions" not in st.session_state:
52
+ st.session_state.questions = None
53
+ if "user_answers" not in st.session_state:
54
+ st.session_state.user_answers = {}
55
+ if "test_submitted" not in st.session_state:
56
+ st.session_state.test_submitted = False
57
+
58
+ # Streamlit UI
59
+ st.title("Technical Mock Test Generator")
60
+
61
+ st.write("Upload a resume and a job description to generate a technical mock test based on the skills and requirements.")
62
+
63
+ resume_file = st.file_uploader("Upload Resume (PDF)", type=["pdf"])
64
+ jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"])
65
+
66
+ if resume_file and jd_file:
67
+ # Reset session state when new files are uploaded
68
+ if st.session_state.questions is None or st.session_state.test_submitted:
69
+ resume_text = input_pdf_text(resume_file)
70
+ jd_text = input_pdf_text(jd_file)
71
+
72
+ if "Error" in resume_text or "Error" in jd_text:
73
+ st.error("Error reading one or both files. Please check the files and try again.")
74
+ else:
75
+ prompt = input_prompt.format(resume_text=resume_text, jd_text=jd_text)
76
+ st.write("Generating questions...")
77
+
78
+ try:
79
+ # Generate questions
80
+ questions_content = get_gemini_response(prompt)
81
+
82
+ if not questions_content:
83
+ st.error("Unexpected response format. Please check the Generative AI configuration.")
84
+ else:
85
+ # Extract questions using regular expression
86
+ questions = re.findall(r'\d+\.\s.*', questions_content)
87
+ questions = [re.sub(r'^\d+\.\s', '', question).strip() for question in questions]
88
+
89
+ # Save questions to session state
90
+ st.session_state.questions = questions
91
+ st.session_state.user_answers = {idx + 1: "" for idx in range(len(questions))}
92
+ st.session_state.test_submitted = False
93
+
94
+ except Exception as e:
95
+ st.error(f"Error generating questions: {e}")
96
+
97
+ # Display questions
98
+ if st.session_state.questions:
99
+ st.write("### Mock Test Questions")
100
+ for idx, question in enumerate(st.session_state.questions, 1):
101
+ st.write(f"**Q{idx}:** {question}")
102
+ st.session_state.user_answers[idx] = st.text_input(
103
+ f"Your Answer for Q{idx}:",
104
+ value=st.session_state.user_answers[idx],
105
+ key=f"q_{idx}"
106
+ )
107
+
108
+ # Submit button
109
+ if st.button("Submit Test"):
110
+ # Prepare input for LLM evaluation
111
+ qa_pairs = "\n".join(
112
+ f"{idx}. Q: {question} A: {st.session_state.user_answers[idx]}"
113
+ for idx, question in enumerate(st.session_state.questions, 1)
114
+ )
115
+ eval_prompt = answer_prompt_template.format(questions_and_answers=qa_pairs)
116
+
117
+ try:
118
+ # Get LLM evaluation
119
+ eval_response = get_gemini_response(eval_prompt)
120
+
121
+ # Debug: Display raw response
122
+ st.write("### Raw Evaluation Response")
123
+ st.write(eval_response)
124
+
125
+ # Parse response
126
+ results = re.findall(r'(\d+)\.\s\[([\d\.]+)\]\s-\s(.*)', eval_response)
127
+
128
+ # Display score and feedback
129
+ if results:
130
+ total_score = sum(float(score) for _, score, _ in results)
131
+ attempted = sum(1 for idx in st.session_state.user_answers.values() if idx.strip())
132
+ st.subheader(f"Your Score: {total_score:.2f} / {len(results)}")
133
+ st.write(f"Total Questions Attempted: {attempted} / {len(results)}")
134
+
135
+ # Display probable correct answers
136
+ st.subheader("Probable Correct Answers")
137
+ for idx, (_, score, correct_answer) in enumerate(results, 1):
138
+ st.write(f"Q{idx}: {correct_answer} (Score: {score})")
139
+
140
+ else:
141
+ st.error("Unexpected evaluation response format.")
142
+
143
+ except Exception as e:
144
+ st.error(f"Error evaluating answers: {e}")
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ google-generativeai
4
+ PyPDF2
5
+ python-dotenv
6
+ fpdf
7
+ python-multipart
8
+ streamlit