Spaces:
Sleeping
Sleeping
File size: 8,309 Bytes
567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 8ecafd5 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 8ecafd5 567e878 d7b5d10 567e878 d7b5d10 8ecafd5 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 8ecafd5 567e878 8ecafd5 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 567e878 d7b5d10 8ecafd5 d7b5d10 8ecafd5 d7b5d10 8ecafd5 d7b5d10 8ecafd5 d7b5d10 8ecafd5 d7b5d10 567e878 d7b5d10 567e878 8ecafd5 567e878 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | import gradio as gr
import os
from groq import Groq
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
if not GROQ_API_KEY:
raise ValueError("GROQ_API_KEY environment variable not set")
client = Groq(api_key=GROQ_API_KEY)
MODEL = "llama-3.3-70b-versatile"
print(f"✅ Groq client initialized with model: {MODEL}")
OPTIMIZER_SYSTEM_PROMPT = """You are an expert prompt engineer. Your task is to analyze and optimize user prompts to get better results from AI language models.
When given a prompt, you will:
1. Analyze its weaknesses (vagueness, missing context, unclear output format, etc.)
2. Rewrite it as an optimized version applying best practices
3. Explain what changes you made and why
## Optimization Techniques to Apply:
### 1. Clarity & Specificity
- Replace vague terms with specific ones
- Add necessary context the AI needs
- Define any ambiguous terms
### 2. Role/Persona Framing
- Add "You are a [relevant expert]..." when it would improve response quality
- Match expertise level to the task
### 3. Output Format Instructions
- Specify desired format (bullet points, paragraphs, table, JSON, etc.)
- Include length guidance if appropriate
- Request specific sections or structure
### 4. Constraints & Guardrails
- Add what to include AND what to exclude
- Specify tone (professional, casual, technical, etc.)
- Set boundaries for scope
### 5. Task Decomposition
- Break complex requests into numbered steps
- Add "Think step by step" for reasoning tasks
- Sequence multi-part requests logically
## Response Format:
You MUST respond in this exact format:
**ANALYSIS:**
[2-3 sentences identifying the main weaknesses of the original prompt]
**OPTIMIZED PROMPT:**
[The complete rewritten prompt - ready to copy and use]
**CHANGES MADE:**
- [Change 1]: [Brief explanation why]
- [Change 2]: [Brief explanation why]
- [Change 3]: [Brief explanation why]
[Continue for all significant changes]
## Important Rules:
- The optimized prompt should be self-contained (user can copy-paste it directly)
- Don't over-engineer simple prompts - match complexity to the task
- Preserve the user's original intent completely
- Keep the optimized prompt concise but complete"""
def optimize_prompt(user_prompt: str, context: str = "") -> dict:
if not user_prompt.strip():
return {
"analysis": "No prompt provided.",
"optimized_prompt": "",
"changes": "",
"raw_response": ""
}
user_message = "Please optimize this prompt:\n\n" + user_prompt
if context.strip():
user_message += "\n\nAdditional context: " + context
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": OPTIMIZER_SYSTEM_PROMPT},
{"role": "user", "content": user_message}
],
max_tokens=2000,
temperature=0.7
)
raw_response = response.choices[0].message.content
result = {
"analysis": "",
"optimized_prompt": "",
"changes": "",
"raw_response": raw_response
}
if "**ANALYSIS:**" in raw_response:
parts = raw_response.split("**ANALYSIS:**")
if len(parts) > 1:
remainder = parts[1]
if "**OPTIMIZED PROMPT:**" in remainder:
analysis_part, remainder = remainder.split("**OPTIMIZED PROMPT:**", 1)
result["analysis"] = analysis_part.strip()
if "**CHANGES MADE:**" in remainder:
prompt_part, changes_part = remainder.split("**CHANGES MADE:**", 1)
result["optimized_prompt"] = prompt_part.strip()
result["changes"] = changes_part.strip()
else:
result["optimized_prompt"] = remainder.strip()
if not result["optimized_prompt"]:
result["optimized_prompt"] = raw_response
result["analysis"] = "Could not parse structured response."
return result
def process_optimization(prompt: str, context: str) -> tuple:
if not prompt.strip():
return ("⚠️ Please enter a prompt to optimize.", "", "")
try:
result = optimize_prompt(prompt, context)
analysis = "### 🔍 Analysis\n\n" + result['analysis']
optimized = result['optimized_prompt']
changes = "### 📝 Changes Made\n\n" + result['changes']
return analysis, optimized, changes
except Exception as e:
return (f"❌ Error: {str(e)}", "", "")
EXAMPLES = [
["write about dogs", ""],
["help me with my code", ""],
["explain quantum computing", "For a blog post aimed at beginners"],
["write an email to my boss", "Requesting time off next week"],
["create a marketing plan", "For a new mobile app startup"],
["summarize this article", ""],
["give me recipe ideas", "Vegetarian, quick weeknight meals"],
["help me prepare for interview", "Software engineering position at Google"],
]
demo = gr.Blocks()
with demo:
gr.Markdown("""
# ✨ Prompt Optimizer
Transform basic prompts into powerful, well-structured instructions that get better results from AI.
[](https://groq.com)
**How it works:** Enter your rough prompt → AI analyzes weaknesses → Returns an optimized version with explanations.
**Optimization Techniques Applied:**
- 🎯 Clarity & Specificity
- 🎭 Role/Persona Framing
- 📋 Output Format Instructions
- 🚧 Constraints & Guardrails
- 🔢 Task Decomposition
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 📝 Input")
prompt_input = gr.Textbox(
label="Your Prompt",
placeholder="Enter the prompt you want to optimize...\n\nExample: 'write about dogs'",
lines=4
)
context_input = gr.Textbox(
label="Additional Context (Optional)",
placeholder="Any extra context about how you'll use this prompt...",
lines=2
)
optimize_btn = gr.Button("✨ Optimize Prompt", variant="primary")
gr.Markdown("### 💡 Example Prompts")
gr.Examples(examples=EXAMPLES, inputs=[prompt_input, context_input], label="")
with gr.Column(scale=1):
gr.Markdown("### 🎯 Results")
analysis_output = gr.Markdown(label="Analysis")
optimized_output = gr.Textbox(
label="Optimized Prompt (Ready to Copy)",
lines=10,
show_copy_button=True,
interactive=False
)
changes_output = gr.Markdown(label="Changes Made")
optimize_btn.click(
fn=process_optimization,
inputs=[prompt_input, context_input],
outputs=[analysis_output, optimized_output, changes_output]
)
with gr.Accordion("📚 Prompt Engineering Tips", open=False):
gr.Markdown("""
### Best Practices for Writing Prompts
**1. Be Specific** - ❌ "Write about history" → ✅ "Write a 500-word overview of the causes of World War I"
**2. Define the Output Format** - ❌ "Give me some ideas" → ✅ "Give me 5 ideas as a numbered list"
**3. Set the Role/Persona** - ❌ "Explain machine learning" → ✅ "You are a CS professor. Explain ML to a first-year student"
**4. Add Constraints** - ❌ "Write a story" → ✅ "Write a 300-word short story set in Tokyo with a surprise ending"
**5. Break Down Complex Tasks** - ❌ "Analyze this data" → ✅ "1) Summarize trends. 2) Identify issues. 3) Provide recommendations."
""")
with gr.Accordion("🔧 Technical Details", open=False):
gr.Markdown("""
| Component | Details |
|-----------|---------|
| **LLM Backend** | Groq API |
| **Model** | Llama 3.3 70B Versatile |
| **Optimization Techniques** | 5 (Clarity, Role, Format, Constraints, Decomposition) |
""")
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|