Spaces:
Sleeping
Sleeping
| # modules/hf_llm_generator.py | |
| import os | |
| import requests | |
| import json | |
| import streamlit as st | |
| import time | |
| API_URL = "https://api-inference.huggingface.co/models/google/flan-t5-large" # Example model, can be changed | |
| def query_huggingface_model(payload, hf_api_key: str): | |
| """Sends a query to the Hugging Face Inference API.""" | |
| if not hf_api_key: | |
| return {"error": "Hugging Face API key not provided."} | |
| headers = { | |
| "Authorization": f"Bearer {hf_api_key}", | |
| "Content-Type": "application/json" | |
| } | |
| response = requests.post(API_URL, headers=headers, json=payload) | |
| response.raise_for_status() # Raise an exception for HTTP errors | |
| return response.json() | |
| def generate_mcq_with_llm(topic: str, difficulty: str, hf_api_key: str) -> dict: | |
| """ | |
| Generates a single MCQ question, options, correct answer, and explanation using an LLM. | |
| """ | |
| if not hf_api_key: | |
| return None # Indicate failure if API key is missing | |
| prompt = f""" | |
| Generate a multiple-choice question about "{topic}" at a "{difficulty}" difficulty level. | |
| Provide 4 options, indicate the correct answer, and give a brief explanation. | |
| Format the output as a JSON object with the following keys: | |
| "question": "...", | |
| "options": ["...", "...", "...", "..."], | |
| "correct_answer": "...", | |
| "explanation": "..." | |
| """ | |
| payload = { | |
| "inputs": prompt, | |
| "parameters": { | |
| "max_new_tokens": 250, | |
| "temperature": 0.7, | |
| "do_sample": True | |
| } | |
| } | |
| try: | |
| response = query_huggingface_model(payload, hf_api_key) | |
| if "error" in response: # Handle API key not found or other API errors | |
| st.error(f"Hugging Face API error: {response['error']}") | |
| return None | |
| # Hugging Face API responses can vary. We need to parse the text output. | |
| # Assuming the model returns a string that needs to be parsed as JSON. | |
| generated_text = response[0]['generated_text'] | |
| # Attempt to parse the generated text as JSON | |
| mcq_data = json.loads(generated_text) | |
| # Basic validation | |
| if all(k in mcq_data for k in ["question", "options", "correct_answer", "explanation"]): | |
| return { | |
| "question": mcq_data["question"], | |
| "options": mcq_data["options"], | |
| "correct_answer": mcq_data["correct_answer"], | |
| "explanation": mcq_data["explanation"], | |
| "topic": topic, | |
| "difficulty": difficulty, | |
| } | |
| else: | |
| st.warning("LLM generated incomplete or malformed JSON. Retrying...") | |
| return None # Indicate failure | |
| except json.JSONDecodeError: | |
| st.error(f"Failed to decode JSON from LLM response: {generated_text}") | |
| return None | |
| except requests.exceptions.RequestException as e: | |
| st.error(f"Hugging Face API request failed: {e}") | |
| return None | |
| except Exception as e: | |
| st.error(f"An unexpected error occurred during LLM question generation: {e}") | |
| return None | |
| def generate_quiz_set_with_llm(topic: str, difficulty: str, num_questions: int = 5, hf_api_key: str = None) -> list: | |
| """ | |
| Generates a set of MCQ questions using an LLM. | |
| """ | |
| if not hf_api_key: | |
| st.error("Hugging Face API key not found. Please set the HF_API_KEY environment variable.") | |
| return [] | |
| quiz_set = [] | |
| for i in range(num_questions): | |
| mcq = None | |
| retries = 3 | |
| while mcq is None and retries > 0: | |
| mcq = generate_mcq_with_llm(topic, difficulty, hf_api_key) | |
| if mcq is None: | |
| retries -= 1 | |
| time.sleep(1) # Wait before retrying | |
| if mcq: | |
| quiz_set.append(mcq) | |
| else: | |
| st.warning(f"Could not generate question {i+1} after retries for topic '{topic}'.") | |
| break | |
| return quiz_set |