import streamlit as st from ocr import extract_screen_text from stt import transcribe_audio from interview_engine import generate_question from evaluation import evaluate_response # Initialize session state if "context_store" not in st.session_state: st.session_state.context_store = "" st.title("🎤 AI-Driven Automated Project Interviewer") st.write( "Upload a screenshot of your project (slides/code/diagrams) and explain it verbally. " "The AI will ask follow-up questions and provide feedback." ) # Upload screen image screen_file = st.file_uploader("Upload Screen / Slide / Code Screenshot", type=["png", "jpg", "jpeg"]) # Upload audio explanation audio_file = st.file_uploader("Explain Your Project (Audio file)", type=["wav", "mp3", "m4a"]) if st.button("Submit"): # Process screen if screen_file is not None: screen_text = extract_screen_text(screen_file) st.session_state.context_store += "\n" + screen_text # Process audio if audio_file is not None: student_response = transcribe_audio(audio_file) # Generate follow-up question question = generate_question(st.session_state.context_store, student_response) # Evaluate response score = evaluate_response(st.session_state.context_store, student_response) # Display results st.subheader("AI Interviewer Question:") st.write(question) st.subheader("Evaluation & Feedback:") st.json(score) else: st.warning("Please upload an audio explanation to get a question and feedback.")