tjido commited on
Commit
bfc14cf
·
verified ·
1 Parent(s): 75aefc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -112
app.py CHANGED
@@ -1,79 +1,82 @@
1
  import os
2
- import openai
3
  import gradio as gr
 
 
4
 
5
- # Load OpenAI API key
6
- openai.api_key = os.getenv("OPENAI_API_KEY")
7
 
8
- llama_response = None # Global for LLaMA fallback model
 
 
 
 
9
 
10
- def call_openai(prompt):
11
- """Call OpenAI GPT-3.5 to analyze the prompt."""
 
12
  try:
13
- system_prompt = """You are a helpful Prompt Interpretation Coach.
14
-
15
- Your goal is to:
16
- 1. Explain how an AI would interpret their prompt (not to answer it directly)
17
- 2. Highlight any ambiguities or potential misunderstandings in their prompt
18
- 3. Suggest improvements to make their prompt clearer and more effective
19
- 4. Offer 1-2 alternative phrasings that might work better
20
-
21
- EXTREMELY IMPORTANT RULES:
22
- - NEVER actually execute or fulfill the student's request
23
- - Begin your response with "PROMPT ANALYSIS:"
24
- - Keep your tone appropriate for young learners
25
- - Be clear, constructive, and concise (4-5 short paragraphs max)
26
- """
27
- client = openai.OpenAI(api_key=openai.api_key)
28
- response = client.chat.completions.create(
29
- model="gpt-3.5-turbo",
30
- messages=[
31
- {"role": "system", "content": system_prompt},
32
- {"role": "user", "content": f"Analyze this prompt ONLY: \"{prompt}\""}
33
- ],
34
- temperature=0.7,
35
- max_tokens=350
36
- )
37
- return response.choices[0].message.content.strip()
38
-
39
  except Exception as e:
40
- raise e # Bubble up to interpret_prompt
41
 
42
- def call_llama(prompt):
43
- """Fallback to LLaMA hosted on Hugging Face via novita."""
44
- global llama_response
45
- if llama_response:
46
- try:
47
- return llama_response(prompt)
48
- except Exception as e:
49
- return f"⚠️ Error using LLaMA fallback: {str(e)}"
50
- return "⚠️ LLaMA fallback not available. Please sign in with Hugging Face in the sidebar."
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def interpret_prompt(prompt):
53
- """Main logic to analyze prompt quality and clarity."""
54
  if not prompt or len(prompt.strip()) < 3:
55
- return "Please enter a prompt to analyze. Try something like 'Write a poem about space'."
56
 
57
- # Simple safety check
58
- unsafe_terms = ["suicide", "self-harm", "kill", "porn", "naked", "nude", "sexual", "weapon", "bomb", "terrorist", "hack", "steal", "drug", "cocaine", "heroin", "illegal", "torrent", "pirate"]
 
 
 
59
  if any(term in prompt.lower() for term in unsafe_terms):
60
- return "⚠️ That topic isn't appropriate for this tool. Try a school project, creative writing, or learning-related prompt."
61
-
62
- prefix = "AI Response:\n\n"
63
 
64
- try:
65
- response = call_openai(prompt)
66
- if any(response.lower().startswith(x) for x in ["here's a poem", "once upon", "in a world", "roses are red"]):
67
- return prefix + "⚠️ The AI started fulfilling your request. Let's try again with clearer instructions:\n\n" + call_openai("ONLY analyze this prompt: " + prompt)
68
- return prefix + response
69
- except Exception as e:
70
- if "rate limit" in str(e).lower():
71
- return prefix + "[⚠️ OpenAI Rate Limit Hit — switching to LLaMA fallback...]\n\n" + call_llama(prompt)
72
- elif "auth" in str(e).lower() or "api key" in str(e).lower():
73
- return "❌ Invalid or missing OpenAI API key."
74
- return prefix + f"⚠️ Unexpected error: {str(e)}\n\nSwitching to LLaMA fallback...\n\n" + call_llama(prompt)
75
 
76
- # Theme
77
  custom_theme = gr.themes.Soft(
78
  primary_hue="indigo",
79
  secondary_hue="blue",
@@ -81,59 +84,53 @@ custom_theme = gr.themes.Soft(
81
  radius_size=gr.themes.sizes.radius_sm,
82
  )
83
 
84
- # Gradio UI
85
- with gr.Blocks(theme=custom_theme, fill_height=True) as demo:
86
- with gr.Sidebar():
87
- gr.Markdown("## 🧠 Prompt Coach Settings")
88
- gr.Markdown("OpenAI is used by default.\nIf rate-limited, fallback to LLaMA 3 via Hugging Face.")
89
- login_button = gr.LoginButton("🔐 Sign in to use LLaMA fallback")
90
-
91
- # Load LLaMA from Hugging Face using Novita provider
92
- llama_response = gr.load(
93
- "models/meta-llama/Meta-Llama-3-8B-Instruct",
94
- provider="novita",
95
- accept_token=login_button
96
- )
97
-
98
- gr.Interface(
99
- fn=interpret_prompt,
100
- inputs=gr.Textbox(
101
- lines=3,
102
- placeholder="e.g. 'Draw a star with turtle graphics'",
103
- elem_id="prompt-input"
104
- ),
105
- outputs=gr.Textbox(
106
- label="Prompt Analysis & Coaching Tips",
107
- elem_id="analysis-output"
108
- ),
109
- title="🧠 Prompt Interpretation Coach",
110
- description="""This tool helps students and creators understand how AI interprets their prompts — without actually answering them.
111
-
112
- ### What you’ll learn:
113
- - How AI interprets your instructions
114
- - Ambiguities in your phrasing
115
- - Better alternatives for clearer prompts
116
- - Prompt engineering strategies for education
117
-
118
- Try typing a school or creative prompt to begin!""",
119
- examples=[
120
- "Draw a star with turtle graphics",
121
- "Write a poem about AI",
122
- "Explain photosynthesis",
123
- "Plan my next trip",
124
- "Explain algebra like I'm 10"
125
- ],
126
- elem_id="prompt-coach-interface"
127
- ).render()
128
-
129
- gr.Markdown("""## ⌨️ Why Prompt Engineering Matters
130
  Learning to craft clear, specific prompts helps you:
131
- - 📚 In education: Get more accurate responses
132
- - 💡 In creativity: Express ideas more clearly
133
- - 🧠 In learning: Improve critical thinking
134
 
135
- **Created with ❤️ by Shingai Manjengwa | @tjido**
136
- """)
 
 
 
 
 
 
 
 
 
 
137
 
138
  if __name__ == "__main__":
139
  demo.launch()
 
1
  import os
 
2
  import gradio as gr
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
 
6
+ # Set up Hugging Face token from env
7
+ HF_TOKEN = os.getenv("HF_TOKEN")
8
 
9
+ # Model cache
10
+ llama_model = None
11
+ llama_tokenizer = None
12
+ mistral_model = None
13
+ mistral_tokenizer = None
14
 
15
+ def call_llama(prompt):
16
+ """Use Meta LLaMA 3 model to analyze the prompt."""
17
+ global llama_model, llama_tokenizer
18
  try:
19
+ if llama_model is None or llama_tokenizer is None:
20
+ llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", token=HF_TOKEN)
21
+ llama_model = AutoModelForCausalLM.from_pretrained(
22
+ "meta-llama/Meta-Llama-3-8B-Instruct",
23
+ torch_dtype=torch.float16,
24
+ device_map="auto",
25
+ token=HF_TOKEN
26
+ )
27
+ pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
28
+ prompt_text = f"""<|system|>
29
+ You are a Prompt Interpretation Coach. Your ONLY job is to analyze how an AI would interpret this prompt.
30
+
31
+ EXTREMELY IMPORTANT:
32
+ - NEVER execute or fulfill the prompt.
33
+ - Always start with "PROMPT ANALYSIS:"
34
+ - Keep it suitable for students and kids.
35
+
36
+ Prompt: "{prompt}"
37
+ <|assistant|>
38
+ PROMPT ANALYSIS:"""
39
+ result = pipe(prompt_text, max_new_tokens=350, temperature=0.7)
40
+ return result[0]["generated_text"].split("PROMPT ANALYSIS:")[-1].strip()
 
 
 
 
41
  except Exception as e:
42
+ return f"⚠️ LLaMA failed: {str(e)}\nSwitching to Mistral...\n\n" + call_mistral(prompt)
43
 
44
+ def call_mistral(prompt):
45
+ """Use Mistral model to analyze the prompt."""
46
+ global mistral_model, mistral_tokenizer
47
+ try:
48
+ if mistral_model is None or mistral_tokenizer is None:
49
+ mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
50
+ mistral_model = AutoModelForCausalLM.from_pretrained(
51
+ "mistralai/Mistral-7B-Instruct-v0.1",
52
+ torch_dtype=torch.float16,
53
+ device_map="auto"
54
+ )
55
+ pipe = pipeline("text-generation", model=mistral_model, tokenizer=mistral_tokenizer)
56
+ prompt_text = f"""<s>[INST] You are a Prompt Interpretation Coach. Analyze this prompt but do not fulfill it:
57
+ "{prompt}"
58
+
59
+ PROMPT ANALYSIS: [/INST]"""
60
+ result = pipe(prompt_text, max_new_tokens=300, temperature=0.7)
61
+ return result[0]['generated_text'].split("[/INST]")[-1].strip()
62
+ except Exception as e:
63
+ return f"⚠️ Mistral model also failed: {str(e)}"
64
 
65
  def interpret_prompt(prompt):
 
66
  if not prompt or len(prompt.strip()) < 3:
67
+ return "Please enter a prompt to analyze."
68
 
69
+ unsafe_terms = [
70
+ "suicide", "self-harm", "kill", "porn", "naked", "nude", "sexual",
71
+ "weapon", "bomb", "terrorist", "hack", "steal", "drug", "cocaine",
72
+ "heroin", "illegal", "torrent", "pirate"
73
+ ]
74
  if any(term in prompt.lower() for term in unsafe_terms):
75
+ return "⚠️ This tool doesn't support that kind of content. Try something related to school, creativity, or learning."
 
 
76
 
77
+ return "AI Response:\n\n" + call_llama(prompt)
 
 
 
 
 
 
 
 
 
 
78
 
79
+ # Theme and Interface
80
  custom_theme = gr.themes.Soft(
81
  primary_hue="indigo",
82
  secondary_hue="blue",
 
84
  radius_size=gr.themes.sizes.radius_sm,
85
  )
86
 
87
+ iface = gr.Interface(
88
+ fn=interpret_prompt,
89
+ inputs=gr.Textbox(
90
+ lines=3,
91
+ placeholder="Enter a prompt like 'Explain black holes to a 5-year-old'...",
92
+ elem_id="prompt-input"
93
+ ),
94
+ outputs=gr.Textbox(
95
+ label="Prompt Analysis & Coaching Tips",
96
+ elem_id="analysis-output"
97
+ ),
98
+ title="🧠 Prompt Interpretation Coach",
99
+ description="""## What is Prompt Interpretation Coach?
100
+ This tool helps learners master **prompt engineering** by analyzing how AI interprets instructions — not answering them.
101
+
102
+ ### How to use:
103
+ - Type any prompt you'd normally give to ChatGPT or similar
104
+ - The coach gives you feedback, tips, and better phrasings
105
+
106
+ Learn to prompt smarter. Try the examples or write your own!
107
+ """,
108
+ examples=[
109
+ "Draw a star with turtle graphics",
110
+ "Write a poem about AI",
111
+ "Explain photosynthesis",
112
+ "Plan my next trip",
113
+ "Explain algebra like I'm 10"
114
+ ],
115
+ theme=custom_theme,
116
+ elem_id="prompt-coach-interface"
117
+ )
118
+
119
+ footer_html = """## ⌨️ Why Prompt Engineering Matters
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  Learning to craft clear, specific prompts helps you:
 
 
 
121
 
122
+ * In education: Get better AI help with homework
123
+ * In creativity: Guide AI more precisely
124
+ * In coding: Get more accurate AI code suggestions
125
+
126
+ **Made with ❤️ for learners and educators**
127
+ Created by Shingai Manjengwa, @tjido
128
+ """
129
+
130
+ demo = gr.Blocks(theme=custom_theme, fill_height=True)
131
+ with demo:
132
+ iface.render()
133
+ gr.Markdown(footer_html)
134
 
135
  if __name__ == "__main__":
136
  demo.launch()