| import streamlit as st |
| from langchain.prompts import PromptTemplate |
| from langchain.chains import LLMChain |
| from langchain_google_genai import ChatGoogleGenerativeAI |
| import fitz |
| import json |
| import docx |
| import os |
|
|
|
|
| |
| st.title("π File-based MCQ Generator") |
|
|
| |
| st.sidebar.title("Upload & Settings") |
|
|
| |
| uploaded_file = st.sidebar.file_uploader("Upload a file (PDF or Word)", type=["pdf", "docx"]) |
|
|
| |
| number_of_questions = st.sidebar.slider("Number of questions", min_value=1, max_value=20, value=5) |
|
|
| |
| if "mcqs" not in st.session_state: |
| st.session_state.mcqs = [] |
| if "current_q" not in st.session_state: |
| st.session_state.current_q = 0 |
| if "user_answers" not in st.session_state: |
| st.session_state.user_answers = {} |
| if "quiz_finished" not in st.session_state: |
| st.session_state.quiz_finished = False |
|
|
| |
| GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") |
| llm = ChatGoogleGenerativeAI( |
| model="gemini-2.0-flash", |
| google_api_key=GOOGLE_API_KEY, |
| temperature=0.7 |
| ) |
|
|
| template = """ |
| You are an expert MCQ generator. Generate {number} unique multiple-choice questions from the given text. |
| Each question must have exactly 1 correct answer and 3 incorrect options. |
| Strictly return output in the following JSON format (no explanations, no markdown): |
| |
| [ |
| {{ |
| "question": "What is ...?", |
| "options": ["Option A", "Option B", "Option C", "Option D"], |
| "answer": "Option D" |
| }}, |
| ... |
| ] |
| |
| TEXT: |
| {text} |
| """ |
|
|
|
|
| prompt = PromptTemplate( |
| input_variables=["text", "number"], |
| template=template |
| ) |
|
|
| mcq_chain = LLMChain(llm=llm, prompt=prompt) |
|
|
| |
| def extract_text(file): |
| if file.name.endswith(".pdf"): |
| |
| file_bytes = file.read() |
| |
| doc = fitz.open(stream=file_bytes, filetype="pdf") |
| |
| text = "" |
| for page in doc: |
| text += page.get_text() |
| return text |
| elif file.name.endswith(".docx"): |
| doc = docx.Document(file) |
| return "\n".join([para.text for para in doc.paragraphs]) |
| return "" |
|
|
| |
| if st.sidebar.button("Generate MCQs"): |
| if uploaded_file is None: |
| st.error("Please upload a file.") |
| else: |
| with st.spinner("Extracting text and generating MCQs..."): |
| text = extract_text(uploaded_file) |
| try: |
| response = mcq_chain.run(text=text, number=str(number_of_questions)) |
| mcqs_json = json.loads(response[8:-3]) |
| st.session_state.mcqs = mcqs_json |
| st.session_state.current_q = 0 |
| st.session_state.user_answers = {} |
| st.session_state.quiz_finished = False |
| st.success("β
MCQs generated successfully!") |
| except Exception as e: |
| st.error(f"Error generating MCQs: {e}") |
|
|
| |
| if st.session_state.mcqs and not st.session_state.quiz_finished: |
| idx = st.session_state.current_q |
| q_data = st.session_state.mcqs[idx] |
|
|
| st.subheader(f"Question {idx + 1}: {q_data['question']}") |
| |
| with st.form(key=f"form_{idx}"): |
| selected_option = st.radio("Choose an answer:", q_data["options"], key=f"radio_{idx}") |
| submitted = st.form_submit_button("Next") |
|
|
| if submitted: |
| st.session_state.user_answers[idx] = selected_option |
| if idx < len(st.session_state.mcqs) - 1: |
| st.session_state.current_q += 1 |
| else: |
| st.session_state.quiz_finished = True |
| st.success("π Quiz completed!") |
|
|
| |
| if st.session_state.quiz_finished: |
| st.header("π Quiz Results") |
| score = 0 |
| total = len(st.session_state.mcqs) |
|
|
| for i, q in enumerate(st.session_state.mcqs): |
| user_ans = st.session_state.user_answers.get(i) |
| correct_ans = q["answer"] |
| if user_ans == correct_ans: |
| score += 1 |
| st.markdown(f"**Q{i+1}: {q['question']}**") |
| st.markdown(f"- Your answer: {user_ans}") |
| st.markdown(f"- Correct answer: {correct_ans}") |
| st.markdown("---") |
|
|
| st.success(f"β
You scored {score} out of {total}") |
|
|