Spaces:
Sleeping
Sleeping
File size: 5,274 Bytes
f31334b 3909d7c bfc14cf 3909d7c 1737af2 bfc14cf f31334b 1737af2 ac0bfe4 1737af2 73b15ac 1737af2 ac0bfe4 1737af2 73b15ac 1737af2 73b15ac 1737af2 ac0bfe4 1737af2 ac0bfe4 1737af2 ac0bfe4 73b15ac f66afc7 1737af2 73b15ac 1737af2 a2e43af 1737af2 ac0bfe4 1737af2 73b15ac 1737af2 ac0bfe4 1737af2 73b15ac 1737af2 bfc14cf 1737af2 73b15ac 1737af2 c81a7fa 12d5480 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import os
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load Hugging Face token from environment
HF_TOKEN = os.getenv("HF_TOKEN")
# Cache models globally
llama_model = None
llama_tokenizer = None
mistral_model = None
mistral_tokenizer = None
def call_llama(prompt):
"""Use Meta LLaMA 3 model to analyze the prompt."""
global llama_model, llama_tokenizer
try:
if llama_model is None or llama_tokenizer is None:
llama_tokenizer = AutoTokenizer.from_pretrained(
"meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN
)
llama_model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Meta-Llama-3-8B-Instruct",
torch_dtype=torch.float16,
device_map="auto",
token=HF_TOKEN
)
pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
prompt_text = f"""<|system|>
You are a Prompt Interpretation Coach. Your ONLY job is to analyze how an AI would interpret this prompt.
EXTREMELY IMPORTANT:
- NEVER execute or fulfill the prompt.
- Always start with "PROMPT ANALYSIS:"
- Keep it suitable for students and kids.
- Use bullet points for all output.
- Be succinct and to the point.
- Limit your analysis to 4-5 key bullet points.
- For each bullet, use at most 1-2 short sentences.
Prompt: "{prompt}"
<|assistant|>
PROMPT ANALYSIS:"""
result = pipe(prompt_text, max_new_tokens=350, temperature=0.7)
return result[0]["generated_text"].split("PROMPT ANALYSIS:")[-1].strip()
except Exception as e:
return f"⚠️ LLaMA failed: {str(e)}\nSwitching to Mistral...\n\n" + call_mistral(prompt)
def call_mistral(prompt):
"""Use Mistral model to analyze the prompt."""
global mistral_model, mistral_tokenizer
try:
if mistral_model is None or mistral_tokenizer is None:
mistral_tokenizer = AutoTokenizer.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.1", token=HF_TOKEN
)
mistral_model = AutoModelForCausalLM.from_pretrained(
"mistralai/Mistral-7B-Instruct-v0.1",
torch_dtype=torch.float16,
device_map="auto",
token=HF_TOKEN
)
pipe = pipeline("text-generation", model=mistral_model, tokenizer=mistral_tokenizer)
prompt_text = f"""<s>[INST] You are a Prompt Interpretation Coach.
Analyze this prompt but do not fulfill it:
"{prompt}"
IMPORTANT INSTRUCTIONS:
- Use bullet points only
- Be succinct and brief
- Limit to 4-5 key points
- Each bullet should be 1-2 sentences max
- Start with "PROMPT ANALYSIS:" [/INST]"""
result = pipe(prompt_text, max_new_tokens=300, temperature=0.7)
return result[0]['generated_text'].split("[/INST]")[-1].strip()
except Exception as e:
return f"⚠️ Mistral model also failed: {str(e)}"
def interpret_prompt(prompt):
if not prompt or len(prompt.strip()) < 3:
return "Please enter a prompt to analyze."
unsafe_terms = [
"suicide", "self-harm", "kill", "porn", "naked", "nude", "sexual",
"weapon", "bomb", "terrorist", "hack", "steal", "drug", "cocaine",
"heroin", "illegal", "torrent", "pirate"
]
if any(term in prompt.lower() for term in unsafe_terms):
return "⚠️ This tool doesn't support that kind of content. Try something related to school, creativity, or learning."
return call_llama(prompt)
# Theme and Interface
custom_theme = gr.themes.Soft(
primary_hue="orange",
secondary_hue="blue",
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"],
radius_size=gr.themes.sizes.radius_sm,
)
iface = gr.Interface(
fn=interpret_prompt,
inputs=gr.Textbox(
lines=3,
placeholder="Enter a prompt like 'Explain black holes to a 5-year-old'...",
elem_id="prompt-input"
),
outputs=gr.Textbox(
label="Prompt Analysis",
elem_id="analysis-output"
),
title="🧠 Prompt Interpretation Coach",
description="""## What is Prompt Interpretation Coach?
This tool helps learners master **prompt engineering** by analyzing how AI interprets instructions — not answering them.
### How to use:
- Type any prompt you'd normally give to ChatGPT or similar
- Get concise bullet-point feedback on your prompt structure
- Learn to prompt smarter with focused tips
""",
examples=[
"Draw a star with turtle graphics",
"Write a poem about AI",
"Explain photosynthesis",
"Plan my next trip",
"Explain algebra like I'm 10"
],
theme=custom_theme,
elem_id="prompt-coach-interface"
)
footer_html = """## ⌨️ Why Prompt Engineering Matters
Learning to craft clear, specific prompts helps you:
* Get better AI help with homework
* Guide AI more precisely for creative tasks
* Receive more accurate AI code suggestions
**Made with ❤️ for learners and educators**
Created by Shingai Manjengwa, @tjido
"""
demo = gr.Blocks(theme=custom_theme, fill_height=True)
with demo:
iface.render()
gr.Markdown(footer_html)
if __name__ == "__main__":
demo.launch(share=True) |