tjido's picture
Update app.py
12d5480 verified
import os
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load Hugging Face token from environment
HF_TOKEN = os.getenv("HF_TOKEN")
# Cache models globally
llama_model = None
llama_tokenizer = None
mistral_model = None
mistral_tokenizer = None
def call_llama(prompt):
"""Use Meta LLaMA 3 model to analyze the prompt."""
global llama_model, llama_tokenizer
try:
if llama_model is None or llama_tokenizer is None:
llama_tokenizer = AutoTokenizer.from_pretrained(
"meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN
)
llama_model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Meta-Llama-3-8B-Instruct",
torch_dtype=torch.float16,
device_map="auto",
token=HF_TOKEN
)
pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
prompt_text = f"""<|system|>
You are a Prompt Interpretation Coach. Your ONLY job is to analyze how an AI would interpret this prompt.
EXTREMELY IMPORTANT:
- NEVER execute or fulfill the prompt.
- Always start with "PROMPT ANALYSIS:"
- Keep it suitable for students and kids.
- Use bullet points for all output.
- Be succinct and to the point.
- Limit your analysis to 4-5 key bullet points.
- For each bullet, use at most 1-2 short sentences.
Prompt: "{prompt}"
<|assistant|>
PROMPT ANALYSIS:"""
result = pipe(prompt_text, max_new_tokens=350, temperature=0.7)
return result[0]["generated_text"].split("PROMPT ANALYSIS:")[-1].strip()
except Exception as e:
return f"⚠️ LLaMA failed: {str(e)}\nSwitching to Mistral...\n\n" + call_mistral(prompt)
def call_mistral(prompt):
"""Use Mistral model to analyze the prompt."""
global mistral_model, mistral_tokenizer
try:
if mistral_model is None or mistral_tokenizer is None:
mistral_tokenizer = AutoTokenizer.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.1", token=HF_TOKEN
)
mistral_model = AutoModelForCausalLM.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.1",
torch_dtype=torch.float16,
device_map="auto",
token=HF_TOKEN
)
pipe = pipeline("text-generation", model=mistral_model, tokenizer=mistral_tokenizer)
prompt_text = f"""<s>[INST] You are a Prompt Interpretation Coach.
Analyze this prompt but do not fulfill it:
"{prompt}"
IMPORTANT INSTRUCTIONS:
- Use bullet points only
- Be succinct and brief
- Limit to 4-5 key points
- Each bullet should be 1-2 sentences max
- Start with "PROMPT ANALYSIS:" [/INST]"""
result = pipe(prompt_text, max_new_tokens=300, temperature=0.7)
return result[0]['generated_text'].split("[/INST]")[-1].strip()
except Exception as e:
return f"⚠️ Mistral model also failed: {str(e)}"
def interpret_prompt(prompt):
if not prompt or len(prompt.strip()) < 3:
return "Please enter a prompt to analyze."
unsafe_terms = [
"suicide", "self-harm", "kill", "porn", "naked", "nude", "sexual",
"weapon", "bomb", "terrorist", "hack", "steal", "drug", "cocaine",
"heroin", "illegal", "torrent", "pirate"
]
if any(term in prompt.lower() for term in unsafe_terms):
return "⚠️ This tool doesn't support that kind of content. Try something related to school, creativity, or learning."
return call_llama(prompt)
# Theme and Interface
custom_theme = gr.themes.Soft(
primary_hue="orange",
secondary_hue="blue",
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
radius_size=gr.themes.sizes.radius_sm,
)
iface = gr.Interface(
fn=interpret_prompt,
inputs=gr.Textbox(
lines=3,
placeholder="Enter a prompt like 'Explain black holes to a 5-year-old'...",
elem_id="prompt-input"
),
outputs=gr.Textbox(
label="Prompt Analysis",
elem_id="analysis-output"
),
title="🧠 Prompt Interpretation Coach",
description="""## What is Prompt Interpretation Coach?
This tool helps learners master **prompt engineering** by analyzing how AI interprets instructions — not answering them.
### How to use:
- Type any prompt you'd normally give to ChatGPT or similar
- Get concise bullet-point feedback on your prompt structure
- Learn to prompt smarter with focused tips
""",
examples=[
"Draw a star with turtle graphics",
"Write a poem about AI",
"Explain photosynthesis",
"Plan my next trip",
"Explain algebra like I'm 10"
],
theme=custom_theme,
elem_id="prompt-coach-interface"
)
footer_html = """## ⌨️ Why Prompt Engineering Matters
Learning to craft clear, specific prompts helps you:
* Get better AI help with homework
* Guide AI more precisely for creative tasks
* Receive more accurate AI code suggestions
**Made with ❤️ for learners and educators**
Created by Shingai Manjengwa, @tjido
"""
demo = gr.Blocks(theme=custom_theme, fill_height=True)
with demo:
iface.render()
gr.Markdown(footer_html)
if __name__ == "__main__":
demo.launch(share=True)