faizee07 commited on
Commit
cd816ec
Β·
verified Β·
1 Parent(s): 1c0373a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +154 -149
app.py CHANGED
@@ -2,18 +2,15 @@ import asyncio
2
  import gradio as gr
3
  from browser_use import Agent
4
  from langchain_openai import ChatOpenAI
5
- from langchain_huggingface import HuggingFaceEndpoint
6
- from langchain_community.llms import HuggingFaceHub
7
  import re
8
  import os
9
  import subprocess
10
  import sys
11
 
12
- # Install Playwright on startup
13
  def install_playwright():
14
  try:
15
  subprocess.run([sys.executable, "-m", "playwright", "install", "chromium"], check=True)
16
- subprocess.run([sys.executable, "-m", "playwright", "install-deps", "chromium"], check=True)
17
  print("βœ… Playwright installed successfully")
18
  except Exception as e:
19
  print(f"⚠️ Playwright installation warning: {e}")
@@ -23,53 +20,12 @@ install_playwright()
23
 
24
  # Model mapping for HuggingFace Inference API
25
  MODELS = {
26
- "Qwen 2.5 72B (Recommended)": "Qwen/Qwen2.5-72B-Instruct",
27
- "Meta Llama 3.1 70B": "meta-llama/Meta-Llama-3.1-70B-Instruct",
28
- "Llama 3.2 3B (Fast)": "meta-llama/Llama-3.2-3B-Instruct",
29
  "Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
30
- "Phi-3 Mini": "microsoft/Phi-3-mini-4k-instruct",
31
  }
32
 
33
- def get_llm(model_name: str, hf_token: str):
34
- """Create LLM instance with proper configuration"""
35
- try:
36
- # Method 1: Try OpenAI-compatible endpoint
37
- llm = ChatOpenAI(
38
- base_url="https://api-inference.huggingface.co/v1/",
39
- api_key=hf_token,
40
- model=MODELS[model_name],
41
- temperature=0.3,
42
- max_tokens=2048,
43
- timeout=120,
44
- )
45
- return llm, "openai"
46
- except Exception as e:
47
- print(f"OpenAI endpoint failed: {e}")
48
-
49
- try:
50
- # Method 2: Try HuggingFace native endpoint
51
- llm = HuggingFaceEndpoint(
52
- repo_id=MODELS[model_name],
53
- huggingfacehub_api_token=hf_token,
54
- temperature=0.3,
55
- max_new_tokens=2048,
56
- timeout=120,
57
- )
58
- return llm, "hf_endpoint"
59
- except Exception as e2:
60
- print(f"HF Endpoint failed: {e2}")
61
-
62
- # Method 3: Fallback to HuggingFaceHub
63
- llm = HuggingFaceHub(
64
- repo_id=MODELS[model_name],
65
- huggingfacehub_api_token=hf_token,
66
- model_kwargs={
67
- "temperature": 0.3,
68
- "max_new_tokens": 2048,
69
- }
70
- )
71
- return llm, "hf_hub"
72
-
73
  async def generate_meme_async(query: str, model_name: str):
74
  """Async function to generate meme"""
75
 
@@ -77,48 +33,62 @@ async def generate_meme_async(query: str, model_name: str):
77
  hf_token = os.environ.get("HF_TOKEN", "")
78
 
79
  if not hf_token:
80
- return None, "❌ Error: HuggingFace token not found!\n\nPlease set HF_TOKEN in Space Settings β†’ Repository Secrets.\nGet your token from: https://huggingface.co/settings/tokens"
 
81
 
82
- if not query:
83
- return None, "❌ Error: Please enter a meme idea!"
 
84
 
85
  try:
86
  yield None, f"πŸ”„ Initializing {model_name}..."
87
 
88
- # Get LLM with fallback methods
89
- llm, method = get_llm(model_name, hf_token)
 
 
 
 
 
 
 
90
 
91
- yield None, f"πŸ€– Using {model_name} via {method}\n🌐 Browsing ImgFlip..."
92
 
93
- task_description = f"""You are a meme generator expert. Your task: Create a meme about "{query}"
94
 
95
- Follow these exact steps:
96
- 1. Go to https://imgflip.com/memegenerator
97
- 2. Browse popular meme templates or search for relevant template
98
- 3. Click on a template that fits the theme of: "{query}"
99
- 4. Add appropriate Top Text and Bottom Text related to: "{query}"
100
- 5. Click "Generate Meme" button
101
- 6. Wait for the meme to be created
102
- 7. Copy the final meme URL
 
 
 
103
 
104
- The meme should be funny and relatable to: {query}
 
 
 
105
 
106
- Return ONLY the final imgflip meme URL (format: https://imgflip.com/i/xxxxx or https://i.imgflip.com/xxxxx.jpg)"""
107
 
108
- yield None, "🎨 Generating meme... This may take 30-90 seconds..."
109
 
110
  agent = Agent(
111
  task=task_description,
112
  llm=llm,
113
- max_actions_per_step=5,
114
- max_failures=30,
115
  use_vision=False
116
  )
117
 
118
  history = await agent.run()
119
  final_result = history.final_result()
120
 
121
- yield None, "πŸ” Extracting meme URL..."
122
 
123
  # Extract meme URL with multiple patterns
124
  patterns = [
@@ -141,55 +111,56 @@ Return ONLY the final imgflip meme URL (format: https://imgflip.com/i/xxxxx or h
141
  # Download the image to display
142
  import requests
143
  try:
 
144
  response = requests.get(meme_url, timeout=10)
145
  if response.status_code == 200:
146
  temp_path = f"/tmp/meme_{meme_id}.jpg"
147
  with open(temp_path, 'wb') as f:
148
  f.write(response.content)
149
- yield temp_path, f"βœ… Success! Meme generated using {model_name}\n\nURL: {meme_url}"
150
  else:
151
- yield None, f"βœ… Meme created but couldn't download.\n\nDirect link: {meme_url}"
152
- except:
153
- yield None, f"βœ… Meme URL: {meme_url}\n\n(Right-click to open)"
154
  else:
155
- yield None, f"⚠️ Meme generation completed but URL not found.\n\nAgent output:\n{final_result[:500]}\n\nTry again or use a different model."
156
 
157
  except Exception as e:
158
  error_msg = str(e)
159
 
160
- if "rate limit" in error_msg.lower():
161
- yield None, "❌ Rate limit exceeded.\n\n⏰ Please wait 60 seconds and try again.\n\nTip: HuggingFace free tier has rate limits."
162
  elif "token" in error_msg.lower() or "unauthorized" in error_msg.lower() or "401" in error_msg:
163
- yield None, "❌ Authentication failed.\n\nPlease check:\n1. HF_TOKEN is set in Space Settings β†’ Repository Secrets\n2. Token has 'read' permission\n3. Token is valid\n\nGet token: https://huggingface.co/settings/tokens"
164
  elif "model" in error_msg.lower() or "404" in error_msg:
165
- yield None, f"❌ Model '{model_name}' not accessible.\n\nTry:\n1. Use 'Llama 3.2 3B (Fast)' - most reliable\n2. Wait a moment and retry\n3. Check model status on HuggingFace"
166
  elif "timeout" in error_msg.lower():
167
- yield None, "❌ Request timeout.\n\nThe model took too long to respond.\n\nTry:\n1. Use a faster model (Llama 3.2 3B)\n2. Simplify your meme idea\n3. Try again in a moment"
168
  else:
169
- yield None, f"❌ Error: {error_msg[:300]}\n\nTroubleshooting:\n1. Check HF_TOKEN in Space secrets\n2. Try 'Llama 3.2 3B (Fast)' model\n3. Wait 30 seconds and retry\n4. Simplify your meme description"
170
 
171
- def generate_meme(query: str, model_name: str):
172
- """Wrapper function for Gradio with streaming"""
173
- generator = generate_meme_async(query, model_name)
174
-
175
- # Create event loop if needed
176
  try:
177
  loop = asyncio.get_event_loop()
 
 
178
  except RuntimeError:
179
  loop = asyncio.new_event_loop()
180
  asyncio.set_event_loop(loop)
181
 
182
- # Run async generator
183
- async def run():
184
- async for image, status in generator:
185
- yield image, status
186
 
187
- # Execute and yield results
188
- for result in loop.run_until_complete(collect_results(run())):
189
  yield result
190
 
191
- async def collect_results(async_gen):
192
- """Collect async generator results"""
193
  results = []
194
  async for item in async_gen:
195
  results.append(item)
@@ -197,24 +168,50 @@ async def collect_results(async_gen):
197
 
198
  # Example meme ideas
199
  examples = [
200
- ["When you finally fix the bug at 3 AM", "Llama 3.2 3B (Fast)"],
201
- ["Junior dev vs Senior dev looking at the same error", "Llama 3.2 3B (Fast)"],
202
- ["My code in development vs in production", "Qwen 2.5 72B (Recommended)"],
203
- ["Trying to explain AI to my parents", "Llama 3.2 3B (Fast)"],
204
- ["Me before coffee vs after coffee", "Mistral 7B"],
205
- ["Client: Can you add one small feature? The feature:", "Llama 3.2 3B (Fast)"],
 
 
206
  ]
207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  # Create Gradio interface
209
- with gr.Blocks(theme=gr.themes.Soft(), title="πŸ₯Έ AI Meme Generator") as demo:
210
 
211
- gr.Markdown("""
212
- # πŸ₯Έ AI Meme Generator
213
- ### Powered by Free HuggingFace Models + Browser Automation πŸ€—
214
-
215
- This AI agent automatically browses ImgFlip and creates custom memes based on your description!
 
 
216
 
217
- ⚠️ **First time setup:** Add your HuggingFace token to Space Settings β†’ Repository Secrets β†’ Name: `HF_TOKEN`
 
218
  """)
219
 
220
  with gr.Row():
@@ -223,14 +220,14 @@ with gr.Blocks(theme=gr.themes.Soft(), title="πŸ₯Έ AI Meme Generator") as demo:
223
  label="🎨 Describe Your Meme Idea",
224
  placeholder="Example: When the client says 'just one small change'...",
225
  lines=3,
226
- info="Be specific! Describe the situation, emotion, or comparison you want to meme-ify"
227
  )
228
 
229
  model_dropdown = gr.Dropdown(
230
  choices=list(MODELS.keys()),
231
- value="Llama 3.2 3B (Fast)",
232
  label="πŸ€– Select AI Model (All Free!)",
233
- info="Llama 3.2 3B is fastest and most reliable for free tier"
234
  )
235
 
236
  with gr.Row():
@@ -239,81 +236,89 @@ with gr.Blocks(theme=gr.themes.Soft(), title="πŸ₯Έ AI Meme Generator") as demo:
239
 
240
  with gr.Column(scale=1):
241
  gr.Markdown("""
242
- ### πŸ“– Quick Setup Guide
243
 
244
- **First Time Setup:**
245
- 1. Get FREE token: [HuggingFace Tokens](https://huggingface.co/settings/tokens)
246
- 2. Copy the token (starts with `hf_...`)
247
- 3. Go to this Space's **Settings** β†’ **Repository Secrets**
248
- 4. Add secret: Name=`HF_TOKEN`, Value=(your token)
249
- 5. Restart the Space
250
 
251
- **Usage Tips:**
252
- βœ… Use "Llama 3.2 3B (Fast)" for best free tier results
253
- βœ… Be specific: "Boss walks in while I'm on YouTube"
254
- βœ… Include emotions: happy, shocked, confused, etc.
255
- βœ… Generation takes 30-90 seconds - be patient!
256
 
257
- **Popular Formats:**
258
- - Before/After situations
259
- - Expectation vs Reality
260
- - Two-panel comparisons
261
- - Relatable struggles
262
 
263
- ⏰ **Please wait 30-90 seconds per generation**
 
 
 
 
264
  """)
265
 
266
  with gr.Row():
267
- output_image = gr.Image(label="πŸ–ΌοΈ Generated Meme", type="filepath", height=400)
268
 
269
- status_output = gr.Textbox(label="πŸ“Š Status & Progress", lines=4, max_lines=8)
270
 
271
- gr.Markdown("""
272
- ### πŸ’‘ Example Meme Ideas (Click to try!)
273
- """)
274
 
275
  gr.Examples(
276
  examples=examples,
277
  inputs=[query_input, model_dropdown],
278
  label="Click any example to auto-fill",
279
- examples_per_page=6
280
  )
281
 
282
  gr.Markdown("""
283
  ---
284
 
285
- ### πŸ”§ Troubleshooting
 
 
 
 
 
 
 
 
 
 
 
286
 
287
- | Issue | Solution |
288
- |-------|----------|
289
- | ❌ Token not found | Add `HF_TOKEN` to Space Settings β†’ Repository Secrets |
290
- | ❌ Rate limit | Wait 60 seconds (free tier limits) |
291
- | ❌ Model error | Switch to "Llama 3.2 3B (Fast)" |
292
- | ⏰ Slow generation | Normal! Browser automation takes 30-90 sec |
293
- | πŸ–ΌοΈ No image | Try again or check the direct URL in status |
294
 
295
  ---
296
 
297
  <div style='text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 10px; color: white;'>
298
  <p><strong>πŸ€— 100% Free & Open Source</strong></p>
299
- <p>Powered by: HuggingFace Inference API β€’ Browser-Use β€’ LangChain β€’ Gradio</p>
300
- <p>⭐ Star on GitHub if you like this project!</p>
301
- <p><small>⚠️ Free tier has rate limits. Please be patient between generations.</small></p>
302
  </div>
303
  """)
304
 
305
- # Connect button to function
306
  generate_btn.click(
307
- fn=generate_meme,
308
  inputs=[query_input, model_dropdown],
309
- outputs=[output_image, status_output]
 
310
  )
311
 
312
- # Launch the app
313
  if __name__ == "__main__":
314
  demo.launch(
315
  server_name="0.0.0.0",
316
  server_port=7860,
317
- share=False,
318
  show_error=True
319
  )
 
2
  import gradio as gr
3
  from browser_use import Agent
4
  from langchain_openai import ChatOpenAI
 
 
5
  import re
6
  import os
7
  import subprocess
8
  import sys
9
 
10
+ # Install Playwright on startup (for non-Docker environments)
11
  def install_playwright():
12
  try:
13
  subprocess.run([sys.executable, "-m", "playwright", "install", "chromium"], check=True)
 
14
  print("βœ… Playwright installed successfully")
15
  except Exception as e:
16
  print(f"⚠️ Playwright installation warning: {e}")
 
20
 
21
  # Model mapping for HuggingFace Inference API
22
  MODELS = {
23
+ "Llama 3.2 3B (Fast & Reliable)": "meta-llama/Llama-3.2-3B-Instruct",
24
+ "Qwen 2.5 72B (Most Powerful)": "Qwen/Qwen2.5-72B-Instruct",
 
25
  "Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
26
+ "Phi-3.5 Mini": "microsoft/Phi-3.5-mini-instruct",
27
  }
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  async def generate_meme_async(query: str, model_name: str):
30
  """Async function to generate meme"""
31
 
 
33
  hf_token = os.environ.get("HF_TOKEN", "")
34
 
35
  if not hf_token:
36
+ yield None, "❌ Error: HuggingFace token not found!\n\n**Setup Required:**\n1. Go to Space Settings β†’ Repository Secrets\n2. Add secret: Name=`HF_TOKEN`, Value=(your token)\n3. Get token from: https://huggingface.co/settings/tokens\n4. Restart the Space"
37
+ return
38
 
39
+ if not query or len(query.strip()) < 5:
40
+ yield None, "❌ Error: Please enter a more detailed meme idea (at least 5 characters)!"
41
+ return
42
 
43
  try:
44
  yield None, f"πŸ”„ Initializing {model_name}..."
45
 
46
+ # Create LLM with OpenAI-compatible endpoint
47
+ llm = ChatOpenAI(
48
+ base_url="https://api-inference.huggingface.co/v1/",
49
+ api_key=hf_token,
50
+ model=MODELS[model_name],
51
+ temperature=0.3,
52
+ max_tokens=2048,
53
+ timeout=180,
54
+ )
55
 
56
+ yield None, f"πŸ€– Using {model_name}\n🌐 Starting browser automation..."
57
 
58
+ task_description = f"""You are an expert meme creator. Create a funny meme about: "{query}"
59
 
60
+ **Your task:**
61
+ 1. Navigate to https://imgflip.com/memegenerator
62
+ 2. Look at the popular meme templates on the page
63
+ 3. Select a template that best fits the theme: "{query}"
64
+ 4. Click on the selected template
65
+ 5. In the text boxes, add:
66
+ - Top Text: Setup/first part related to "{query}"
67
+ - Bottom Text: Punchline/second part related to "{query}"
68
+ 6. Click "Generate Meme" button
69
+ 7. Wait for generation to complete
70
+ 8. Copy the final meme URL
71
 
72
+ **Important:**
73
+ - Make the text funny and relevant to: {query}
74
+ - Keep text concise (max 10 words per box)
75
+ - Return ONLY the final meme URL
76
 
77
+ Expected URL format: https://imgflip.com/i/xxxxx or https://i.imgflip.com/xxxxx.jpg"""
78
 
79
+ yield None, "🎨 AI is creating your meme...\n\n⏰ This takes 30-90 seconds\nπŸ“ The AI is browsing ImgFlip, selecting template, and adding text..."
80
 
81
  agent = Agent(
82
  task=task_description,
83
  llm=llm,
84
+ max_actions_per_step=10,
 
85
  use_vision=False
86
  )
87
 
88
  history = await agent.run()
89
  final_result = history.final_result()
90
 
91
+ yield None, "πŸ” Extracting meme URL from results..."
92
 
93
  # Extract meme URL with multiple patterns
94
  patterns = [
 
111
  # Download the image to display
112
  import requests
113
  try:
114
+ yield None, "πŸ“₯ Downloading your meme..."
115
  response = requests.get(meme_url, timeout=10)
116
  if response.status_code == 200:
117
  temp_path = f"/tmp/meme_{meme_id}.jpg"
118
  with open(temp_path, 'wb') as f:
119
  f.write(response.content)
120
+ yield temp_path, f"βœ… **Success!** Meme generated using {model_name}\n\nπŸ”— **Direct Link:** {meme_url}\n\nπŸ’‘ **Tip:** Right-click image to save or share!"
121
  else:
122
+ yield None, f"βœ… Meme created successfully!\n\nπŸ”— **Direct Link:** {meme_url}\n\n(Click link to view)"
123
+ except Exception as download_error:
124
+ yield None, f"βœ… Meme created!\n\nπŸ”— **Direct Link:** {meme_url}\n\n(Right-click to open in new tab)\n\nDownload error: {str(download_error)}"
125
  else:
126
+ yield None, f"⚠️ Meme generation completed but URL not found in response.\n\n**Agent Output:**\n{final_result[:500]}...\n\n**Suggestions:**\n1. Try again with a simpler meme idea\n2. Use a different model\n3. Wait a moment and retry"
127
 
128
  except Exception as e:
129
  error_msg = str(e)
130
 
131
+ if "rate limit" in error_msg.lower() or "429" in error_msg:
132
+ yield None, "❌ **Rate Limit Exceeded**\n\n⏰ HuggingFace free tier limit reached.\n\n**Solutions:**\n1. Wait 60 seconds and try again\n2. Try a smaller model (Llama 3.2 3B)\n3. Use your own HF Pro account for higher limits"
133
  elif "token" in error_msg.lower() or "unauthorized" in error_msg.lower() or "401" in error_msg:
134
+ yield None, "❌ **Authentication Failed**\n\n**Checklist:**\n1. Is HF_TOKEN set in Space Settings β†’ Repository Secrets?\n2. Does your token have 'read' permission?\n3. Is the token still valid?\n\nπŸ”— Get/check token: https://huggingface.co/settings/tokens"
135
  elif "model" in error_msg.lower() or "404" in error_msg:
136
+ yield None, f"❌ **Model Not Accessible**\n\nThe selected model '{model_name}' is not available.\n\n**Try:**\n1. Switch to 'Llama 3.2 3B (Fast & Reliable)'\n2. Wait a moment for model to warm up\n3. Check model status on HuggingFace"
137
  elif "timeout" in error_msg.lower():
138
+ yield None, "❌ **Request Timeout**\n\nThe AI took too long to respond (>180s).\n\n**Solutions:**\n1. Try 'Llama 3.2 3B (Fast & Reliable)' - it's faster\n2. Simplify your meme description\n3. Wait 30 seconds and try again"
139
  else:
140
+ yield None, f"❌ **Error Occurred**\n\n```\n{error_msg[:400]}\n```\n\n**Troubleshooting Steps:**\n1. Verify HF_TOKEN is set correctly in Space secrets\n2. Try 'Llama 3.2 3B (Fast & Reliable)' model\n3. Wait 30 seconds between attempts\n4. Simplify your meme description\n5. Check Space logs for more details"
141
 
142
+ def generate_meme_wrapper(query: str, model_name: str):
143
+ """Synchronous wrapper for Gradio"""
144
+ # Create or get event loop
 
 
145
  try:
146
  loop = asyncio.get_event_loop()
147
+ if loop.is_closed():
148
+ raise RuntimeError("Event loop is closed")
149
  except RuntimeError:
150
  loop = asyncio.new_event_loop()
151
  asyncio.set_event_loop(loop)
152
 
153
+ # Run the async generator
154
+ async def run_generator():
155
+ async for result in generate_meme_async(query, model_name):
156
+ yield result
157
 
158
+ # Execute generator
159
+ for result in loop.run_until_complete(async_to_sync(run_generator())):
160
  yield result
161
 
162
+ async def async_to_sync(async_gen):
163
+ """Convert async generator to sync"""
164
  results = []
165
  async for item in async_gen:
166
  results.append(item)
 
168
 
169
  # Example meme ideas
170
  examples = [
171
+ ["When you finally fix the bug at 3 AM", "Llama 3.2 3B (Fast & Reliable)"],
172
+ ["Junior dev vs Senior dev looking at the same error", "Llama 3.2 3B (Fast & Reliable)"],
173
+ ["My code in development vs in production", "Llama 3.2 3B (Fast & Reliable)"],
174
+ ["Trying to explain AI to my parents", "Mistral 7B"],
175
+ ["Me before coffee vs after coffee", "Llama 3.2 3B (Fast & Reliable)"],
176
+ ["Client: Can you add one small feature? The feature:", "Qwen 2.5 72B (Most Powerful)"],
177
+ ["When the code works on first try", "Llama 3.2 3B (Fast & Reliable)"],
178
+ ["Developer debugging at 2 AM", "Llama 3.2 3B (Fast & Reliable)"],
179
  ]
180
 
181
+ # Custom CSS
182
+ custom_css = """
183
+ .gradio-container {
184
+ max-width: 1200px !important;
185
+ }
186
+ .header-box {
187
+ text-align: center;
188
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
189
+ padding: 30px;
190
+ border-radius: 15px;
191
+ color: white;
192
+ margin-bottom: 20px;
193
+ }
194
+ .status-box {
195
+ background: #f0f7ff;
196
+ padding: 15px;
197
+ border-radius: 10px;
198
+ border-left: 4px solid #667eea;
199
+ }
200
+ """
201
+
202
  # Create Gradio interface
203
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="πŸ₯Έ AI Meme Generator") as demo:
204
 
205
+ gr.HTML("""
206
+ <div class="header-box">
207
+ <h1>πŸ₯Έ AI Meme Generator</h1>
208
+ <h3>Powered by Free HuggingFace Models + Browser Automation πŸ€—</h3>
209
+ <p>This AI agent automatically browses ImgFlip and creates custom memes!</p>
210
+ </div>
211
+ """)
212
 
213
+ gr.Markdown("""
214
+ ⚠️ **First Time Setup:** Add `HF_TOKEN` to Space Settings β†’ Repository Secrets ([Get Token](https://huggingface.co/settings/tokens))
215
  """)
216
 
217
  with gr.Row():
 
220
  label="🎨 Describe Your Meme Idea",
221
  placeholder="Example: When the client says 'just one small change'...",
222
  lines=3,
223
+ info="Be specific! Describe the situation, emotion, or comparison"
224
  )
225
 
226
  model_dropdown = gr.Dropdown(
227
  choices=list(MODELS.keys()),
228
+ value="Llama 3.2 3B (Fast & Reliable)",
229
  label="πŸ€– Select AI Model (All Free!)",
230
+ info="Llama 3.2 3B recommended for speed and reliability"
231
  )
232
 
233
  with gr.Row():
 
236
 
237
  with gr.Column(scale=1):
238
  gr.Markdown("""
239
+ ### πŸ“– Quick Setup
240
 
241
+ **1. Get HuggingFace Token:**
242
+ - Go to [HF Tokens](https://huggingface.co/settings/tokens)
243
+ - Click "New token"
244
+ - Copy the token
 
 
245
 
246
+ **2. Add to Space:**
247
+ - Settings β†’ Repository Secrets
248
+ - Name: `HF_TOKEN`
249
+ - Value: (paste token)
250
+ - Save & Restart
251
 
252
+ **3. Generate Memes!**
253
+ - Enter your idea
254
+ - Click generate
255
+ - Wait 30-90 seconds
 
256
 
257
+ **πŸ’‘ Pro Tips:**
258
+ - Use "Llama 3.2 3B" for best results
259
+ - Be specific and descriptive
260
+ - Include emotions/scenarios
261
+ - Be patient (browser automation takes time!)
262
  """)
263
 
264
  with gr.Row():
265
+ output_image = gr.Image(label="πŸ–ΌοΈ Your Generated Meme", type="filepath", height=400)
266
 
267
+ status_output = gr.Textbox(label="πŸ“Š Status & Progress", lines=6, max_lines=10)
268
 
269
+ gr.Markdown("### πŸ’‘ Example Ideas (Click to Try!)")
 
 
270
 
271
  gr.Examples(
272
  examples=examples,
273
  inputs=[query_input, model_dropdown],
274
  label="Click any example to auto-fill",
275
+ examples_per_page=8
276
  )
277
 
278
  gr.Markdown("""
279
  ---
280
 
281
+ ### πŸ”§ Troubleshooting Guide
282
+
283
+ | Problem | Solution |
284
+ |---------|----------|
285
+ | ❌ Token not found | Add `HF_TOKEN` in Space Settings β†’ Repository Secrets |
286
+ | ❌ Rate limit error | Wait 60 seconds (free tier limits apply) |
287
+ | ❌ Model not working | Switch to "Llama 3.2 3B (Fast & Reliable)" |
288
+ | ⏰ Taking too long | Normal! Browser automation needs 30-90 seconds |
289
+ | πŸ–ΌοΈ No image shown | Check the direct URL in status message |
290
+ | πŸ”΄ Build failed | Make sure Dockerfile is present in repository |
291
+
292
+ ### πŸš€ How It Works
293
 
294
+ 1. **You describe** what meme you want
295
+ 2. **AI browses** ImgFlip automatically
296
+ 3. **Selects template** that fits your idea
297
+ 4. **Adds funny text** based on your description
298
+ 5. **Generates & downloads** your custom meme!
 
 
299
 
300
  ---
301
 
302
  <div style='text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 10px; color: white;'>
303
  <p><strong>πŸ€— 100% Free & Open Source</strong></p>
304
+ <p>Tech Stack: HuggingFace β€’ Browser-Use β€’ LangChain β€’ Gradio β€’ Playwright</p>
305
+ <p>⭐ Star this Space if you like it!</p>
306
+ <p><small>Free tier has rate limits - please be patient between generations</small></p>
307
  </div>
308
  """)
309
 
310
+ # Event handler
311
  generate_btn.click(
312
+ fn=generate_meme_wrapper,
313
  inputs=[query_input, model_dropdown],
314
+ outputs=[output_image, status_output],
315
+ show_progress=True
316
  )
317
 
318
+ # Launch
319
  if __name__ == "__main__":
320
  demo.launch(
321
  server_name="0.0.0.0",
322
  server_port=7860,
 
323
  show_error=True
324
  )