faizee07 commited on
Commit
09b4c4b
Β·
verified Β·
1 Parent(s): 2c2a5aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +140 -177
app.py CHANGED
@@ -1,161 +1,143 @@
1
- import asyncio
2
  import gradio as gr
3
- from browser_use import Agent
4
- from langchain_openai import ChatOpenAI
5
- import re
6
  import os
7
  import requests
 
8
 
9
- # Model mapping
10
- MODELS = {
11
- "Llama 3.2 3B (Fast)": "meta-llama/Llama-3.2-3B-Instruct",
12
- "Qwen 2.5 72B": "Qwen/Qwen2.5-72B-Instruct",
13
- "Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
14
- "Phi-3.5 Mini": "microsoft/Phi-3.5-mini-instruct",
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  }
16
 
17
- async def generate_meme_async(query: str, model_name: str):
18
- """Generate meme using AI browser automation"""
19
-
20
- hf_token = os.environ.get("HF_TOKEN", "")
21
-
22
- if not hf_token:
23
- yield None, "❌ **Setup Required**\n\nAdd HF_TOKEN to Space Settings β†’ Repository Secrets\n\nGet token: https://huggingface.co/settings/tokens"
24
- return
25
-
26
- if not query or len(query.strip()) < 5:
27
- yield None, "❌ Please enter a detailed meme idea (minimum 5 characters)"
28
- return
29
 
30
  try:
31
- yield None, f"πŸ”„ Initializing {model_name}..."
32
-
33
- llm = ChatOpenAI(
34
- base_url="https://api-inference.huggingface.co/v1/",
35
- api_key=hf_token,
36
- model=MODELS[model_name],
37
- temperature=0.3,
38
- max_tokens=2048,
39
- timeout=180,
40
- )
41
-
42
- yield None, f"πŸ€– Using {model_name}\n🌐 Starting browser..."
43
-
44
- task = f"""Create a meme about: "{query}"
45
 
46
- Steps:
47
- 1. Go to https://imgflip.com/memegenerator
48
- 2. Select a popular meme template that fits: "{query}"
49
- 3. Click on the template
50
- 4. Add Top Text (setup) and Bottom Text (punchline) about: "{query}"
51
- 5. Click "Generate Meme"
52
- 6. Return the meme URL
53
 
54
- Make it funny and relevant to: {query}
55
- Return only the URL."""
56
 
57
- yield None, "🎨 AI creating meme...\n\n⏰ Takes 30-90 seconds"
58
-
59
- agent = Agent(
60
- task=task,
61
- llm=llm,
62
- max_actions_per_step=10,
63
- use_vision=False
64
- )
65
-
66
- history = await agent.run()
67
- result = history.final_result()
68
 
69
- yield None, "πŸ” Extracting meme URL..."
 
 
 
70
 
71
- # Find meme URL
72
- patterns = [
73
- r'https://i\.imgflip\.com/(\w+)\.jpg',
74
- r'https://imgflip\.com/i/(\w+)',
75
- r'imgflip\.com/i/(\w+)',
76
- ]
77
 
78
- meme_id = None
79
- for pattern in patterns:
80
- match = re.search(pattern, result)
81
- if match:
82
- meme_id = match.group(1)
83
- break
84
 
85
- if meme_id:
86
- meme_url = f"https://i.imgflip.com/{meme_id}.jpg"
87
-
88
- try:
89
- yield None, "πŸ“₯ Downloading meme..."
90
- response = requests.get(meme_url, timeout=10)
91
- if response.status_code == 200:
92
- temp_path = f"/tmp/meme_{meme_id}.jpg"
93
- with open(temp_path, 'wb') as f:
94
- f.write(response.content)
95
- yield temp_path, f"βœ… **Success!**\n\nGenerated with {model_name}\n\nπŸ”— {meme_url}"
96
- else:
97
- yield None, f"βœ… Meme created!\n\nπŸ”— **Link:** {meme_url}"
98
- except:
99
- yield None, f"βœ… Meme created!\n\nπŸ”— **Link:** {meme_url}"
100
- else:
101
- yield None, f"⚠️ Could not find meme URL\n\nAgent output:\n{result[:300]}..."
102
 
103
  except Exception as e:
104
- error = str(e)
105
-
106
- if "rate limit" in error.lower() or "429" in error:
107
- yield None, "❌ **Rate Limit**\n\nWait 60 seconds and try again\n\nTip: Use Llama 3.2 3B (faster)"
108
- elif "token" in error.lower() or "401" in error:
109
- yield None, "❌ **Auth Failed**\n\nCheck HF_TOKEN in Space Secrets\n\nToken needs 'read' permission"
110
- elif "timeout" in error.lower():
111
- yield None, "❌ **Timeout**\n\nTry Llama 3.2 3B or simplify your idea"
112
- else:
113
- yield None, f"❌ **Error**\n\n{error[:200]}...\n\nTry: Llama 3.2 3B or wait 30s"
114
 
115
- def generate_meme_wrapper(query: str, model_name: str):
116
- """Sync wrapper for Gradio"""
117
- try:
118
- loop = asyncio.get_event_loop()
119
- if loop.is_closed():
120
- raise RuntimeError
121
- except:
122
- loop = asyncio.new_event_loop()
123
- asyncio.set_event_loop(loop)
124
-
125
- async def run():
126
- async for result in generate_meme_async(query, model_name):
127
- yield result
 
 
 
 
 
 
 
 
 
 
128
 
129
- async def collect():
130
- async for item in run():
131
- yield item
132
-
133
- for result in loop.run_until_complete(async_list(collect())):
134
- yield result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
- async def async_list(gen):
137
- """Convert async gen to list"""
138
- items = []
139
- async for item in gen:
140
- items.append(item)
141
- yield item
142
 
143
  # Examples
144
  examples = [
145
- ["When you finally fix the bug at 3 AM", "Llama 3.2 3B (Fast)"],
146
- ["Junior dev vs Senior dev", "Llama 3.2 3B (Fast)"],
147
- ["My code in dev vs production", "Llama 3.2 3B (Fast)"],
148
- ["Trying to explain AI to parents", "Mistral 7B"],
149
- ["Me before vs after coffee", "Llama 3.2 3B (Fast)"],
150
  ]
151
 
152
  # UI
153
  with gr.Blocks(theme=gr.themes.Soft(), title="AI Meme Generator") as demo:
154
 
155
  gr.HTML("""
156
- <div style='text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 30px; border-radius: 15px; color: white; margin-bottom: 20px;'>
 
157
  <h1>πŸ₯Έ AI Meme Generator</h1>
158
- <h3>Free HuggingFace Models + Browser Automation</h3>
159
  </div>
160
  """)
161
 
@@ -163,19 +145,26 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Meme Generator") as demo:
163
 
164
  with gr.Row():
165
  with gr.Column(scale=2):
166
- query_input = gr.Textbox(
167
  label="🎨 Meme Idea",
168
- placeholder="When the client says 'just one small change'...",
169
- lines=3
170
  )
171
 
172
- model_dropdown = gr.Dropdown(
173
- choices=list(MODELS.keys()),
174
- value="Llama 3.2 3B (Fast)",
175
- label="πŸ€– AI Model"
176
- )
 
 
 
 
 
 
 
177
 
178
- generate_btn = gr.Button("πŸš€ Generate", variant="primary", size="lg")
179
 
180
  with gr.Column(scale=1):
181
  gr.Markdown("""
@@ -183,62 +172,36 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Meme Generator") as demo:
183
 
184
  **1. Get Token:**
185
  - [HF Tokens](https://huggingface.co/settings/tokens)
186
- - Click "New token"
187
  - Copy it
188
 
189
  **2. Add to Space:**
190
  - Settings β†’ Secrets
191
  - Name: `HF_TOKEN`
192
  - Value: (paste)
 
193
 
194
  **3. Generate!**
195
- - Describe meme
196
- - Wait 30-90s
 
197
 
198
- **Tips:**
199
- - Use Llama 3.2 3B
200
- - Be specific
201
- - Include emotion
202
  """)
203
 
204
- with gr.Row():
205
- output_image = gr.Image(label="πŸ–ΌοΈ Generated Meme", type="filepath", height=400)
206
-
207
- status_output = gr.Textbox(label="πŸ“Š Status", lines=6)
208
 
209
  gr.Examples(
210
  examples=examples,
211
- inputs=[query_input, model_dropdown],
212
- label="πŸ’‘ Examples"
213
  )
214
 
215
- gr.Markdown("""
216
- ---
217
-
218
- ### πŸ”§ Troubleshooting
219
-
220
- | Issue | Fix |
221
- |-------|-----|
222
- | Token not found | Add HF_TOKEN in Secrets |
223
- | Rate limit | Wait 60s |
224
- | Slow | Use Llama 3.2 3B |
225
- | Build error | Check Dockerfile committed |
226
-
227
- ---
228
-
229
- <div style='text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 10px; color: white;'>
230
- <p><strong>πŸ€— 100% Free & Open Source</strong></p>
231
- <p>HuggingFace β€’ Browser-Use β€’ LangChain β€’ Gradio</p>
232
- <p>⭐ Star if you like it!</p>
233
- </div>
234
- """)
235
-
236
- generate_btn.click(
237
- fn=generate_meme_wrapper,
238
- inputs=[query_input, model_dropdown],
239
- outputs=[output_image, status_output],
240
- show_progress=True
241
  )
242
 
243
  if __name__ == "__main__":
244
- demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
 
 
3
  import os
4
  import requests
5
+ import random
6
 
7
+ # Initialize HF client
8
+ def get_client():
9
+ token = os.environ.get("HF_TOKEN", "")
10
+ if not token:
11
+ return None, "❌ Add HF_TOKEN to Space Settings β†’ Repository Secrets"
12
+ return InferenceClient(token=token), None
13
+
14
+ # Meme templates mapping
15
+ MEME_TEMPLATES = {
16
+ "Drake": "181913649",
17
+ "Distracted Boyfriend": "112126428",
18
+ "Two Buttons": "87743020",
19
+ "Expanding Brain": "93895088",
20
+ "Change My Mind": "129242436",
21
+ "Disaster Girl": "97984",
22
+ "Success Kid": "61544",
23
+ "Bad Luck Brian": "61585",
24
+ "Philosoraptor": "61516",
25
+ "One Does Not Simply": "61579",
26
  }
27
 
28
+ def generate_meme_text(idea: str, model: str):
29
+ """Generate meme text using AI"""
30
+ client, error = get_client()
31
+ if error:
32
+ return None, None, error
 
 
 
 
 
 
 
33
 
34
  try:
35
+ prompt = f"""Generate meme text for: "{idea}"
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ Return ONLY in this exact format:
38
+ TOP: [text for top of meme]
39
+ BOTTOM: [text for bottom of meme]
 
 
 
 
40
 
41
+ Make it funny, relatable, and max 10 words each line."""
 
42
 
43
+ response = ""
44
+ for message in client.chat_completion(
45
+ messages=[{"role": "user", "content": prompt}],
46
+ model=model,
47
+ max_tokens=200,
48
+ stream=True,
49
+ ):
50
+ if message.choices[0].delta.content:
51
+ response += message.choices[0].delta.content
 
 
52
 
53
+ # Parse response
54
+ lines = response.strip().split('\n')
55
+ top_text = ""
56
+ bottom_text = ""
57
 
58
+ for line in lines:
59
+ if line.startswith("TOP:"):
60
+ top_text = line.replace("TOP:", "").strip()
61
+ elif line.startswith("BOTTOM:"):
62
+ bottom_text = line.replace("BOTTOM:", "").strip()
 
63
 
64
+ if not top_text or not bottom_text:
65
+ return None, None, "❌ Could not parse AI response. Try again."
 
 
 
 
66
 
67
+ return top_text, bottom_text, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  except Exception as e:
70
+ return None, None, f"❌ Error: {str(e)}"
 
 
 
 
 
 
 
 
 
71
 
72
+ def create_meme(idea: str, template: str, model: str):
73
+ """Generate complete meme"""
74
+ if not idea:
75
+ return None, "❌ Enter a meme idea!"
76
+
77
+ # Get AI-generated text
78
+ top, bottom, error = generate_meme_text(idea, model)
79
+ if error:
80
+ return None, error
81
+
82
+ # Get template ID
83
+ template_id = MEME_TEMPLATES.get(template, "181913649")
84
+
85
+ # Create meme using ImgFlip API
86
+ url = "https://api.imgflip.com/caption_image"
87
+
88
+ payload = {
89
+ 'template_id': template_id,
90
+ 'username': 'imgflip_huggingface',
91
+ 'password': 'imgflip_huggingface',
92
+ 'text0': top,
93
+ 'text1': bottom
94
+ }
95
 
96
+ try:
97
+ response = requests.post(url, data=payload, timeout=10)
98
+ data = response.json()
99
+
100
+ if data['success']:
101
+ meme_url = data['data']['url']
102
+
103
+ # Download image
104
+ img_response = requests.get(meme_url, timeout=10)
105
+ if img_response.status_code == 200:
106
+ temp_path = f"/tmp/meme_{random.randint(1000,9999)}.jpg"
107
+ with open(temp_path, 'wb') as f:
108
+ f.write(img_response.content)
109
+
110
+ return temp_path, f"βœ… Success!\n\nTop: {top}\nBottom: {bottom}\n\nURL: {meme_url}"
111
+ else:
112
+ return None, f"βœ… Meme created!\n\nURL: {meme_url}"
113
+ else:
114
+ return None, f"❌ ImgFlip API error: {data.get('error_message', 'Unknown')}"
115
+
116
+ except Exception as e:
117
+ return None, f"❌ Error creating meme: {str(e)}"
118
 
119
+ # Models
120
+ MODELS = {
121
+ "Qwen 2.5 72B": "Qwen/Qwen2.5-72B-Instruct",
122
+ "Llama 3.2 3B": "meta-llama/Llama-3.2-3B-Instruct",
123
+ "Mistral 7B": "mistralai/Mistral-7B-Instruct-v0.3",
124
+ }
125
 
126
  # Examples
127
  examples = [
128
+ ["When you finally fix the bug at 3 AM", "Success Kid", "Llama 3.2 3B"],
129
+ ["Trying to explain code to non-developers", "Distracted Boyfriend", "Qwen 2.5 72B"],
130
+ ["My code vs production", "Drake", "Llama 3.2 3B"],
 
 
131
  ]
132
 
133
  # UI
134
  with gr.Blocks(theme=gr.themes.Soft(), title="AI Meme Generator") as demo:
135
 
136
  gr.HTML("""
137
+ <div style='text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
138
+ padding: 30px; border-radius: 15px; color: white; margin-bottom: 20px;'>
139
  <h1>πŸ₯Έ AI Meme Generator</h1>
140
+ <h3>Powered by HuggingFace + ImgFlip API</h3>
141
  </div>
142
  """)
143
 
 
145
 
146
  with gr.Row():
147
  with gr.Column(scale=2):
148
+ idea = gr.Textbox(
149
  label="🎨 Meme Idea",
150
+ placeholder="When you find a bug in production...",
151
+ lines=2
152
  )
153
 
154
+ with gr.Row():
155
+ template = gr.Dropdown(
156
+ choices=list(MEME_TEMPLATES.keys()),
157
+ value="Drake",
158
+ label="πŸ–ΌοΈ Template"
159
+ )
160
+
161
+ model = gr.Dropdown(
162
+ choices=list(MODELS.keys()),
163
+ value="Llama 3.2 3B",
164
+ label="πŸ€– AI Model"
165
+ )
166
 
167
+ btn = gr.Button("πŸš€ Generate Meme", variant="primary", size="lg")
168
 
169
  with gr.Column(scale=1):
170
  gr.Markdown("""
 
172
 
173
  **1. Get Token:**
174
  - [HF Tokens](https://huggingface.co/settings/tokens)
175
+ - Create "Read" token
176
  - Copy it
177
 
178
  **2. Add to Space:**
179
  - Settings β†’ Secrets
180
  - Name: `HF_TOKEN`
181
  - Value: (paste)
182
+ - Restart
183
 
184
  **3. Generate!**
185
+ - Pick template
186
+ - Enter idea
187
+ - Click generate
188
 
189
+ ⚑ **Instant results!**
 
 
 
190
  """)
191
 
192
+ output_img = gr.Image(label="Generated Meme", type="filepath")
193
+ output_status = gr.Textbox(label="Status", lines=4)
 
 
194
 
195
  gr.Examples(
196
  examples=examples,
197
+ inputs=[idea, template, model]
 
198
  )
199
 
200
+ btn.click(
201
+ fn=lambda i, t, m: create_meme(i, t, MODELS[m]),
202
+ inputs=[idea, template, model],
203
+ outputs=[output_img, output_status]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  )
205
 
206
  if __name__ == "__main__":
207
+ demo.launch()