Adedoyinjames's picture
Update app.py
814a607 verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
device_map="auto",
trust_remote_code=True
)
# ==================== YOUR EXACT SYSTEM PROMPT ====================
BASE_SYSTEM = """You are an expert AI tutor and SVG illustrator. Create a comprehensive, multi-part interactive lesson based on the user's request.
User Request: "${query}"
Previous Context:
${context}
INSTRUCTIONS:
1. Generate as many scenes (slides) as necessary to provide a deeply comprehensive, master-level explanation. Go over and beyond to achieve absolute clarity and in-depth understanding. Do NOT limit yourself to a small number of slides.
2. For each scene, provide 'spokenText' (what the tutor says) and 'svgCode' (the visual).
3. You are an ELITE, world-class SVG illustrator and data visualization expert. The SVGs MUST be highly detailed, visually stunning, modern, and extremely accurate to the description. Use gradients, shadows, precise geometry, and professional color palettes. The visual quality should rival premium educational textbooks.
4. You MUST use SVG animations (<animate>, <animateTransform>, or CSS @keyframes inside <style>) to make the diagrams dynamic and clearly illustrate the concepts (e.g., flowing arrows, moving parts, pulsing highlights).
5. Randomly include a multiple-choice 'question' in about 20% to 30% of the scenes. The question MUST strictly test knowledge covered in the immediately preceding slides. Do not ask about concepts that haven't been explained yet. Occasionally, instead of a knowledge question, ask a simple check-in question like "Are you following along?" or "Would you like me to clarify anything?" with options like ["Yes, I'm following", "Please clarify"].
6. The SVG must use viewBox='0 0 800 600' and be centralized.
CRITICAL OUTPUT FORMAT:
You MUST output the lesson as a stream of individual scenes wrapped in [SCENE] and [/SCENE] tags. Inside the tags, provide a valid JSON object.
Example:
[SCENE]
{
"spokenText": "Welcome to the lesson...",
"svgCode": "<svg viewBox='0 0 800 600'>...</svg>",
"question": null
}
[/SCENE]
"""
def respond(message, history):
# Short context to save memory and speed on CPU
context_parts = []
for user_msg, bot_msg in history[-3:]:
context_parts.append(f"User: {user_msg}\nAssistant: {bot_msg[:800]}...")
context_str = "\n\n".join(context_parts) if context_parts else "No previous context."
user_content = BASE_SYSTEM.replace("\( {query}", message).replace(" \){context}", context_str)
messages = [
{"role": "system", "content": "You are an expert AI tutor and SVG illustrator."},
{"role": "user", "content": user_content}
]
text = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
inputs = tokenizer(text, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=4096,
temperature=0.7,
top_p=0.9,
do_sample=True,
repetition_penalty=1.1
)
response = tokenizer.decode(
outputs[0][inputs.input_ids.shape[1]:],
skip_special_tokens=True
)
return response
# ==================== GRADIO INTERFACE (fixed) ====================
demo = gr.ChatInterface(
fn=respond,
title="🧠 Qwen 1.5B • AI Tutor & SVG Illustrator (CPU friendly)",
description="""Running on Qwen2.5-1.5B-Instruct for Hugging Face free CPU tier (16 GB RAM).
Ask any topic and receive multi-scene lessons with animated SVG code exactly as per your prompt.""",
examples=[
["Explain photosynthesis with animated SVG diagrams"],
["Teach me how transformers work step by step"],
["Create a lesson on basic algebra with illustrations"],
["Explain quantum entanglement using animated diagrams"],
],
)
if __name__ == "__main__":
# Apply Soft theme here (this works in current Gradio versions)
demo.launch(
theme=gr.themes.Soft(),
share=False
)