Spaces:
Runtime error
Runtime error
| import json | |
| import streamlit as st | |
| import shelve | |
| # Import Google's Generative AI package | |
| import google.generativeai as genai | |
| # Configure the Gemini API with your API key | |
| genai.configure(api_key="AIzaSyCA4__JMC_ZIQ9xQegIj5LOMLhSSrn3pMw") | |
| def generate_questions(company_name, role_applied_for, job_description, interview_type, focus_areas): | |
| """ | |
| Generates custom interview questions based on the provided details, | |
| explicitly asking for the output in a list format. | |
| """ | |
| generation_config = { | |
| "temperature": 0.9, | |
| "top_p": 1, | |
| "top_k": 1, | |
| "max_output_tokens": 2048, | |
| } | |
| safety_settings = [ | |
| {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
| {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
| {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
| {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}, | |
| ] | |
| model = genai.GenerativeModel(model_name="gemini-1.0-pro", | |
| generation_config=generation_config, | |
| safety_settings=safety_settings) | |
| # Adjust the prompt to explicitly request a list of questions | |
| prompt_parts = [ | |
| f"Based on the following details, generate a list of interview questions:", | |
| f"Company Name: {company_name}", | |
| f"Role Applied For: {role_applied_for}", | |
| f"Job Description: {job_description}", | |
| f"Interview Type: {interview_type}", | |
| f"Focus Areas: {focus_areas}", | |
| "Format the questions as a bulleted list." | |
| ] | |
| # Joining the prompt parts to form the complete prompt | |
| complete_prompt = "\n".join(prompt_parts) | |
| # Call Gemini to generate content | |
| response = model.generate_content(complete_prompt) | |
| return response.text | |
| def parse_response_text(response_text): | |
| """ | |
| Manually parse the structured response text into a dictionary. | |
| This function assumes a very specific format and might need adjustments | |
| based on actual response variations. | |
| """ | |
| questions_dict = {} | |
| current_section = "" | |
| for line in response_text.split("\n"): | |
| if line.strip().endswith(": {"): | |
| # This is a section header | |
| current_section = line.split(":")[0].strip() | |
| questions_dict[current_section] = {} | |
| elif line.strip().startswith('"Question'): | |
| # This is a question within the current section | |
| question_number, question_text = line.strip().split(":") | |
| question_text = question_text.strip().strip('"').strip(",") | |
| questions_dict[current_section][question_number.strip('"')] = question_text | |
| return questions_dict | |
| def app(): | |
| st.title("Interview Round 1 Preparation") | |
| # Load saved data from bio.py | |
| with shelve.open("datastore") as db: | |
| candidate_bio = db.get("candidate_bio", {}) | |
| if candidate_bio: | |
| # Displaying loaded information for verification | |
| st.write("## Loaded Information for Interview Preparation") | |
| # Extract necessary details | |
| company_name = candidate_bio.get("company_name", "Not specified") | |
| interviewing_department = candidate_bio.get("interviewing_department", "Not specified") | |
| role_applied_for = candidate_bio.get("role_applied_for", "Not specified") | |
| job_description = candidate_bio.get("job_description", "Not specified") | |
| interview_type = candidate_bio.get("interview_type", "Not specified") | |
| focus_areas = candidate_bio.get("focus_areas", "Not specified") | |
| # Display the loaded information | |
| st.write(f"**Company Name:** {company_name}") | |
| st.write(f"**Interviewing Department:** {interviewing_department}") | |
| st.write(f"**Role Applied For:** {role_applied_for}") | |
| st.write(f"**Job Description:** {job_description}") | |
| st.write(f"**Type of Interview:** {interview_type}") | |
| st.write(f"**Focus Areas for Preparation:** {focus_areas}") | |
| # Generate and display questions | |
| if st.button("Generate Interview Questions"): | |
| questions_text = generate_questions(company_name, role_applied_for, job_description, interview_type, focus_areas) | |
| st.write(questions_text) | |
| st.write("## Generated Interview Questions") | |
| questions_list = questions_text.split('\n') # Splitting the generated text into a list of questions | |
| # Initialize a place to store responses | |
| responses = {} | |
| for i, question in enumerate(questions_list, start=1): | |
| st.write(f"**Question {i}:** {question}") | |
| # Capture the user's response | |
| response = st.text_input(f"Your answer to Question {i}", key=f"response_{i}") | |
| responses[f"Question {i}"] = response | |
| # Optional: Display captured responses | |
| if st.button("Submit Responses"): | |
| st.write("## Your Responses") | |
| for question, response in responses.items(): | |
| st.write(f"{question}: {response}") | |
| if __name__ == "__main__": | |
| app() | |