Nav772's picture
Upload app.py with huggingface_hub
8ecafd5 verified
import gradio as gr
import os
from groq import Groq
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
if not GROQ_API_KEY:
raise ValueError("GROQ_API_KEY environment variable not set")
client = Groq(api_key=GROQ_API_KEY)
MODEL = "llama-3.3-70b-versatile"
print(f"✅ Groq client initialized with model: {MODEL}")
OPTIMIZER_SYSTEM_PROMPT = """You are an expert prompt engineer. Your task is to analyze and optimize user prompts to get better results from AI language models.
When given a prompt, you will:
1. Analyze its weaknesses (vagueness, missing context, unclear output format, etc.)
2. Rewrite it as an optimized version applying best practices
3. Explain what changes you made and why
## Optimization Techniques to Apply:
### 1. Clarity & Specificity
- Replace vague terms with specific ones
- Add necessary context the AI needs
- Define any ambiguous terms
### 2. Role/Persona Framing
- Add "You are a [relevant expert]..." when it would improve response quality
- Match expertise level to the task
### 3. Output Format Instructions
- Specify desired format (bullet points, paragraphs, table, JSON, etc.)
- Include length guidance if appropriate
- Request specific sections or structure
### 4. Constraints & Guardrails
- Add what to include AND what to exclude
- Specify tone (professional, casual, technical, etc.)
- Set boundaries for scope
### 5. Task Decomposition
- Break complex requests into numbered steps
- Add "Think step by step" for reasoning tasks
- Sequence multi-part requests logically
## Response Format:
You MUST respond in this exact format:
**ANALYSIS:**
[2-3 sentences identifying the main weaknesses of the original prompt]
**OPTIMIZED PROMPT:**
[The complete rewritten prompt - ready to copy and use]
**CHANGES MADE:**
- [Change 1]: [Brief explanation why]
- [Change 2]: [Brief explanation why]
- [Change 3]: [Brief explanation why]
[Continue for all significant changes]
## Important Rules:
- The optimized prompt should be self-contained (user can copy-paste it directly)
- Don't over-engineer simple prompts - match complexity to the task
- Preserve the user's original intent completely
- Keep the optimized prompt concise but complete"""
def optimize_prompt(user_prompt: str, context: str = "") -> dict:
if not user_prompt.strip():
return {
"analysis": "No prompt provided.",
"optimized_prompt": "",
"changes": "",
"raw_response": ""
}
user_message = "Please optimize this prompt:\n\n" + user_prompt
if context.strip():
user_message += "\n\nAdditional context: " + context
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": OPTIMIZER_SYSTEM_PROMPT},
{"role": "user", "content": user_message}
],
max_tokens=2000,
temperature=0.7
)
raw_response = response.choices[0].message.content
result = {
"analysis": "",
"optimized_prompt": "",
"changes": "",
"raw_response": raw_response
}
if "**ANALYSIS:**" in raw_response:
parts = raw_response.split("**ANALYSIS:**")
if len(parts) > 1:
remainder = parts[1]
if "**OPTIMIZED PROMPT:**" in remainder:
analysis_part, remainder = remainder.split("**OPTIMIZED PROMPT:**", 1)
result["analysis"] = analysis_part.strip()
if "**CHANGES MADE:**" in remainder:
prompt_part, changes_part = remainder.split("**CHANGES MADE:**", 1)
result["optimized_prompt"] = prompt_part.strip()
result["changes"] = changes_part.strip()
else:
result["optimized_prompt"] = remainder.strip()
if not result["optimized_prompt"]:
result["optimized_prompt"] = raw_response
result["analysis"] = "Could not parse structured response."
return result
def process_optimization(prompt: str, context: str) -> tuple:
if not prompt.strip():
return ("⚠️ Please enter a prompt to optimize.", "", "")
try:
result = optimize_prompt(prompt, context)
analysis = "### 🔍 Analysis\n\n" + result['analysis']
optimized = result['optimized_prompt']
changes = "### 📝 Changes Made\n\n" + result['changes']
return analysis, optimized, changes
except Exception as e:
return (f"❌ Error: {str(e)}", "", "")
EXAMPLES = [
["write about dogs", ""],
["help me with my code", ""],
["explain quantum computing", "For a blog post aimed at beginners"],
["write an email to my boss", "Requesting time off next week"],
["create a marketing plan", "For a new mobile app startup"],
["summarize this article", ""],
["give me recipe ideas", "Vegetarian, quick weeknight meals"],
["help me prepare for interview", "Software engineering position at Google"],
]
demo = gr.Blocks()
with demo:
gr.Markdown("""
# ✨ Prompt Optimizer
Transform basic prompts into powerful, well-structured instructions that get better results from AI.
[![Powered by Groq](https://console.groq.com/powered-by-groq-dark.svg)](https://groq.com)
**How it works:** Enter your rough prompt → AI analyzes weaknesses → Returns an optimized version with explanations.
**Optimization Techniques Applied:**
- 🎯 Clarity & Specificity
- 🎭 Role/Persona Framing
- 📋 Output Format Instructions
- 🚧 Constraints & Guardrails
- 🔢 Task Decomposition
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 📝 Input")
prompt_input = gr.Textbox(
label="Your Prompt",
placeholder="Enter the prompt you want to optimize...\n\nExample: 'write about dogs'",
lines=4
)
context_input = gr.Textbox(
label="Additional Context (Optional)",
placeholder="Any extra context about how you'll use this prompt...",
lines=2
)
optimize_btn = gr.Button("✨ Optimize Prompt", variant="primary")
gr.Markdown("### 💡 Example Prompts")
gr.Examples(examples=EXAMPLES, inputs=[prompt_input, context_input], label="")
with gr.Column(scale=1):
gr.Markdown("### 🎯 Results")
analysis_output = gr.Markdown(label="Analysis")
optimized_output = gr.Textbox(
label="Optimized Prompt (Ready to Copy)",
lines=10,
show_copy_button=True,
interactive=False
)
changes_output = gr.Markdown(label="Changes Made")
optimize_btn.click(
fn=process_optimization,
inputs=[prompt_input, context_input],
outputs=[analysis_output, optimized_output, changes_output]
)
with gr.Accordion("📚 Prompt Engineering Tips", open=False):
gr.Markdown("""
### Best Practices for Writing Prompts
**1. Be Specific** - ❌ "Write about history" → ✅ "Write a 500-word overview of the causes of World War I"
**2. Define the Output Format** - ❌ "Give me some ideas" → ✅ "Give me 5 ideas as a numbered list"
**3. Set the Role/Persona** - ❌ "Explain machine learning" → ✅ "You are a CS professor. Explain ML to a first-year student"
**4. Add Constraints** - ❌ "Write a story" → ✅ "Write a 300-word short story set in Tokyo with a surprise ending"
**5. Break Down Complex Tasks** - ❌ "Analyze this data" → ✅ "1) Summarize trends. 2) Identify issues. 3) Provide recommendations."
""")
with gr.Accordion("🔧 Technical Details", open=False):
gr.Markdown("""
| Component | Details |
|-----------|---------|
| **LLM Backend** | Groq API |
| **Model** | Llama 3.3 70B Versatile |
| **Optimization Techniques** | 5 (Clarity, Role, Format, Constraints, Decomposition) |
""")
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)