Spaces:
Running
Running
File size: 6,982 Bytes
848f548 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
from flask import Flask, Blueprint, request, jsonify, current_app
import openai
import random
import os
from flask_cors import CORS
# --- Blueprint ---
reading_bp = Blueprint("reading", __name__)
# app = Flask(__name__)
app = Flask(__name__)
CORS(app)
_OPENAI_API_KEY_FALLBACK = os.getenv("OPENAI_API_KEY", "")
# Set up your OpenAI API key (replace this with your own API key)
# openai.api_key = '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' # Replace with your actual OpenAI API key
def _ensure_openai_key():
"""Set openai.api_key from app config or env before each API call."""
api_key = (current_app.config.get("OPENAI_API_KEY")
if current_app else None) or _OPENAI_API_KEY_FALLBACK
if api_key:
openai.api_key = api_key
# Function to generate content dynamically based on the topic and difficulty level
def generate_content(topic, difficulty):
_ensure_openai_key()
try:
# Define instructions based on difficulty level
if difficulty == "easy":
instruction = f"Write a very simple and basic explanation about {topic} for children aged 6-8. Use very simple words and short sentences."
elif difficulty == "medium":
instruction = f"Write a detailed and engaging explanation about {topic} for children aged 9-12. Use simple words but include more details."
else: # Hard difficulty
instruction = f"Write an in-depth explanation about {topic} for children aged 13-16. Use more complex words and provide deeper insights into the topic."
# Call OpenAI API to generate the content
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a friendly teacher explaining concepts to students."},
{"role": "user", "content": instruction}
],
max_tokens=700,
temperature=0.7
)
content = response.choices[0].message.content.strip()
return content
except Exception as e:
return f"Error generating content: {str(e)}"
# Function to generate multiple-choice questions from content based on difficulty level
def generate_questions(content, difficulty):
_ensure_openai_key()
try:
# Split the content into sentences or key points and shuffle them
content_sentences = content.split(".") # Assuming content is in sentence form. If not, modify accordingly.
random.shuffle(content_sentences)
# Adjust question complexity based on difficulty
if difficulty == "easy":
question_instruction = "Generate 3 very simple multiple-choice questions based on the content. The questions should be very easy to understand."
elif difficulty == "medium":
question_instruction = "Generate 3 multiple-choice questions with moderate difficulty based on the content."
else: # Hard difficulty
question_instruction = "Generate 3 challenging multiple-choice questions that require deep understanding of the content."
# prompt = f"{question_instruction}\nContent:\n{content}\n\nFormat the output like this:\n\n1. Question: What is XYZ?\nOptions: [Option 1, Option 2, Option 3, Option 4]\nCorrect Answer: Option 1\n\n2. Question: Why does XYZ happen?\nOptions: [Option 1, Option 2, Option 3, Option 4]\nCorrect Answer: Option 2"
prompt = f"{question_instruction}\nContent:\n{'. '.join(content_sentences[:3])}\n\nFormat the output like this:\n\n1. Question: What is XYZ?\nOptions: [Option 1, Option 2, Option 3, Option 4]\nCorrect Answer: Option 1\n\n2. Question: Why does XYZ happen?\nOptions: [Option 1, Option 2, Option 3, Option 4]\nCorrect Answer: Option 2"
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant who generates educational multiple-choice questions."},
{"role": "user", "content": prompt}
],
max_tokens=700,
temperature=0.7
)
questions = response.choices[0].message.content.strip()
return questions
except Exception as e:
return f"Error generating questions: {str(e)}"
@reading_bp.route('/generate_content', methods=['POST'])
# @app.route('/generate_content', methods=['POST'])
def generate_content_route():
data = request.json
topic = data.get('topic')
difficulty = data.get('difficulty', 'medium') # Default to medium if not provided
if not topic:
return jsonify({"error": "Topic is required"}), 400
if difficulty not in ["easy", "medium", "hard"]:
return jsonify({"error": "Invalid difficulty level. Choose 'easy', 'medium', or 'hard'."}), 400
content = generate_content(topic, difficulty)
return jsonify({"content": content})
@reading_bp.route('/generate_questions', methods=['POST'])
# @app.route('/generate_questions', methods=['POST'])
def generate_questions_route():
data = request.json
content = data.get('content')
difficulty = data.get('difficulty', 'medium') # Default to medium if not provided
if not content:
return jsonify({"error": "Content is required"}), 400
if difficulty not in ["easy", "medium", "hard"]:
return jsonify({"error": "Invalid difficulty level. Choose 'easy', 'medium', or 'hard'."}), 400
questions = generate_questions(content, difficulty)
return jsonify({"questions": questions})
@reading_bp.route('/validate_answer', methods=['POST'])
# @app.route('/validate_answer', methods=['POST'])
def validate_answer():
question = request.json.get('question')
selected_answer = request.json.get('selected_answer')
if not question or not selected_answer:
return jsonify({"error": "Question and answer are required"}), 400
# Ensure both answers are stripped of leading/trailing spaces before comparison
correct_answer = question["correct_answer"].strip()
selected_answer = selected_answer.strip()
# Print the correct answer to the backend console for debugging
print(f"Correct Answer: {correct_answer}")
is_correct = selected_answer == correct_answer
return jsonify({"is_correct": is_correct, "correct_answer": correct_answer})
# if __name__ == '__main__':
# app.run(debug=True)
# if __name__ == '__main__':
# app.run(host='0.0.0.0', port=5001)
# --- Optional: allow this file to run standalone locally while still using the blueprint ---
if __name__ == '__main__':
app = Flask(__name__)
CORS(app)
# For local runs, pull key from env; no hard-coding
app.config["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY", "")
app.register_blueprint(reading_bp, url_prefix='')
app.run(host='0.0.0.0', port=5001, debug=True)
|