File size: 4,061 Bytes
582bf6b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# modules/hf_llm_generator.py
import os
import requests
import json
import streamlit as st
import time

API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-large" # Example model, can be changed

def query_huggingface_model(payload, hf_api_key: str):
    """Sends a query to the Hugging Face Inference API."""
    if not hf_api_key:
        return {"error": "Hugging Face API key not provided."}

    headers = {
        "Authorization": f"Bearer {hf_api_key}",
        "Content-Type": "application/json"
    }
    response = requests.post(API_URL, headers=headers, json=payload)
    response.raise_for_status() # Raise an exception for HTTP errors
    return response.json()

def generate_mcq_with_llm(topic: str, difficulty: str, hf_api_key: str) -> dict:
    """

    Generates a single MCQ question, options, correct answer, and explanation using an LLM.

    """
    if not hf_api_key:
        return None # Indicate failure if API key is missing

    prompt = f"""

    Generate a multiple-choice question about "{topic}" at a "{difficulty}" difficulty level.

    Provide 4 options, indicate the correct answer, and give a brief explanation.



    Format the output as a JSON object with the following keys:

    "question": "...",

    "options": ["...", "...", "...", "..."],

    "correct_answer": "...",

    "explanation": "..."

    """

    payload = {
        "inputs": prompt,
        "parameters": {
            "max_new_tokens": 250,
            "temperature": 0.7,
            "do_sample": True
        }
    }

    try:
        response = query_huggingface_model(payload, hf_api_key)
        
        if "error" in response: # Handle API key not found or other API errors
            st.error(f"Hugging Face API error: {response['error']}")
            return None

        # Hugging Face API responses can vary. We need to parse the text output.
        # Assuming the model returns a string that needs to be parsed as JSON.
        generated_text = response[0]['generated_text']
        
        # Attempt to parse the generated text as JSON
        mcq_data = json.loads(generated_text)
        
        # Basic validation
        if all(k in mcq_data for k in ["question", "options", "correct_answer", "explanation"]):
            return {
                "question": mcq_data["question"],
                "options": mcq_data["options"],
                "correct_answer": mcq_data["correct_answer"],
                "explanation": mcq_data["explanation"],
                "topic": topic,
                "difficulty": difficulty,
            }
        else:
            st.warning("LLM generated incomplete or malformed JSON. Retrying...")
            return None # Indicate failure

    except json.JSONDecodeError:
        st.error(f"Failed to decode JSON from LLM response: {generated_text}")
        return None
    except requests.exceptions.RequestException as e:
        st.error(f"Hugging Face API request failed: {e}")
        return None
    except Exception as e:
        st.error(f"An unexpected error occurred during LLM question generation: {e}")
        return None

def generate_quiz_set_with_llm(topic: str, difficulty: str, num_questions: int = 5, hf_api_key: str = None) -> list:
    """

    Generates a set of MCQ questions using an LLM.

    """
    if not hf_api_key:
        st.error("Hugging Face API key not found. Please set the HF_API_KEY environment variable.")
        return []

    quiz_set = []
    for i in range(num_questions):
        mcq = None
        retries = 3
        while mcq is None and retries > 0:
            mcq = generate_mcq_with_llm(topic, difficulty, hf_api_key)
            if mcq is None:
                retries -= 1
                time.sleep(1) # Wait before retrying
        if mcq:
            quiz_set.append(mcq)
        else:
            st.warning(f"Could not generate question {i+1} after retries for topic '{topic}'.")
            break
    return quiz_set