faizee07 commited on
Commit
6fe5e5e
Β·
verified Β·
1 Parent(s): a2ea47b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -85
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  import os
4
  import requests
5
  import random
@@ -9,78 +9,92 @@ import re
9
 
10
  def get_client():
11
  """Get Hugging Face client, handle token error."""
12
- token = os.environ.get("HF_TOKEN", "")
 
 
 
 
 
 
13
  if not token:
14
  return None, "❌ **HuggingFace Token Required**\n\n**Setup:**\n1. Go to Space Settings β†’ Repository Secrets\n2. Add secret: Name=`HF_TOKEN`, Value=(your token)\n3. Get token: https://huggingface.co/settings/tokens\n4. Restart Space"
15
  return InferenceClient(token=token), None
16
 
17
- def generate_meme_text(idea: str, model: str):
18
- """Generate meme text using a STABLE AI model."""
 
 
 
19
  client, error = get_client()
20
  if error:
21
- return None, None, error
22
-
23
- try:
24
- # A simple, direct prompt for a text-to-text model like FLAN-T5
25
- prompt = f"""Generate two short, funny lines for a meme about: "{idea}".
26
-
27
- Use this exact format:
28
- Top: [text] | Bottom: [text]"""
29
-
30
- # Using text_generation which is compatible with these models
31
- response = client.text_generation(
32
- prompt,
33
- model=model,
34
- max_new_tokens=60,
35
- temperature=0.9,
36
- do_sample=True,
37
- )
38
-
39
- # --- Robust Parsing Logic ---
40
- top_text = ""
41
- bottom_text = ""
42
-
43
- # Best case: split by |
44
- if "|" in response:
45
- parts = response.split("|")
46
- top_text = parts[0].replace("Top:", "").strip()
47
- bottom_text = parts[1].replace("Bottom:", "").strip()
48
- else: # Fallback if '|' is not present
49
- lines = response.strip().split('\n')
50
- if len(lines) > 0:
51
- top_text = lines[0].replace("Top:", "").strip()
52
- if len(lines) > 1:
53
- bottom_text = lines[1].replace("Bottom:", "").strip()
54
-
55
- # Final fallback if parsing fails completely
56
- if not top_text or not bottom_text:
57
- words = idea.split()
58
- mid_point = max(1, len(words) // 2)
59
- top_text = ' '.join(words[:mid_point])
60
- bottom_text = ' '.join(words[mid_point:])
61
 
62
- return top_text.strip(), bottom_text.strip(), None
63
-
64
- except Exception as e:
65
- error_msg = str(e)
66
- if "rate limit" in error_msg:
67
- return None, None, "❌ **Rate Limit Exceeded**\n\nWait 60 seconds and try again. Free tier has limits."
68
- elif "503" in error_msg:
69
- return None, None, f"❌ **Model is Loading**\n\nWait 30 seconds for the model to wake up and try again."
70
- elif "404" in error_msg:
71
- return None, None, f"❌ **Model Not Found**\n\nThe model '{model}' is not available on the free tier right now. This should be rare with this model."
72
- else:
73
- return None, None, f"❌ **AI Error:** {error_msg[:200]}"
74
 
75
- def create_meme(idea: str, template: str, model: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  """Main function to generate the complete meme."""
77
  if not idea or len(idea.strip()) < 3:
78
  return None, "❌ Please enter a meme idea (at least 3 characters)!"
79
-
80
- top, bottom, error = generate_meme_text(idea, model)
81
  if error:
82
  return None, error
83
-
84
  template_id = MEME_TEMPLATES.get(template, "181913649")
85
  url = "https://api.imgflip.com/caption_image"
86
 
@@ -93,14 +107,14 @@ def create_meme(idea: str, template: str, model: str):
93
  }
94
 
95
  try:
96
- response = requests.post(url, data=payload, timeout=15)
97
  response.raise_for_status()
98
  data = response.json()
99
 
100
  if data.get('success'):
101
  meme_url = data['data']['url']
102
 
103
- img_response = requests.get(meme_url, timeout=15)
104
  if img_response.status_code == 200:
105
  temp_path = f"/tmp/meme_{random.randint(1000, 9999)}.jpg"
106
  with open(temp_path, 'wb') as f:
@@ -111,7 +125,7 @@ def create_meme(idea: str, template: str, model: str):
111
  πŸ“ **Top Text:** {top}
112
  πŸ“ **Bottom Text:** {bottom}
113
 
114
- πŸ€– **AI Model:** {model}
115
  πŸ”— **Direct URL:** {meme_url}"""
116
  return temp_path, status_message
117
  else:
@@ -126,11 +140,6 @@ def create_meme(idea: str, template: str, model: str):
126
 
127
  # --- CONFIGURATION & UI ---
128
 
129
- # βœ… A STABLE, RELIABLE, ALWAYS-ON FREE MODEL
130
- MODELS = {
131
- "FLAN-T5 (Reliable & Fast)": "google/flan-t5-large",
132
- }
133
-
134
  # Popular meme templates
135
  MEME_TEMPLATES = {
136
  "Drake": "181913649",
@@ -139,13 +148,14 @@ MEME_TEMPLATES = {
139
  "Expanding Brain": "93895088",
140
  "Success Kid": "61544",
141
  "Batman Slapping Robin": "438680",
 
142
  }
143
 
144
  # Example prompts
145
  examples = [
146
- ["When you fix a bug you don't understand", "Success Kid", "FLAN-T5 (Reliable & Fast)"],
147
- ["Writing code vs. explaining it in the meeting", "Drake", "FLAN-T5 (Reliable & Fast)"],
148
- ["My code on my machine vs. my code in production", "Distracted Boyfriend", "FLAN-T5 (Reliable & Fast)"],
149
  ]
150
 
151
  # Gradio UI
@@ -155,7 +165,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Meme Generator") as demo:
155
  <div style='text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
156
  padding: 30px; border-radius: 15px; color: white; margin-bottom: 20px;'>
157
  <h1>πŸ₯Έ AI Meme Generator</h1>
158
- <h3>Powered by a Stable & Reliable Free AI Model πŸ€—</h3>
159
  </div>
160
  """)
161
 
@@ -163,31 +173,29 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Meme Generator") as demo:
163
 
164
  with gr.Row():
165
  with gr.Column(scale=2):
166
- idea_input = gr.Textbox(label="🎨 Your Meme Idea", placeholder="Example: When the CI/CD pipeline finally passes...", lines=2)
167
- with gr.Row():
168
- template_dropdown = gr.Dropdown(choices=list(MEME_TEMPLATES.keys()), value="Drake", label="πŸ–ΌοΈ Meme Template")
169
- # Removed the model dropdown since we are forcing one stable model
170
- model_display = gr.Textbox(value="FLAN-T5 (Reliable & Fast)", label="πŸ€– AI Model (Free & Stable)", interactive=False)
171
  generate_button = gr.Button("πŸš€ Generate Meme", variant="primary", size="lg")
172
 
173
  with gr.Column(scale=1):
174
  gr.Markdown("""
175
- ### πŸ“– How to Use
176
- 1. **Enter Idea:** Describe the meme.
177
- 2. **Pick Template:** Choose a format.
178
- 3. **Click Generate!**
 
 
179
 
180
- **Why only one AI model?**
181
- To ensure this app **always works**, it uses a stable, reliable free model (`FLAN-T5`) instead of popular models that are often offline on the free tier.
182
  """)
183
 
184
  output_image = gr.Image(label="πŸ–ΌοΈ Your Generated Meme", type="filepath", show_download_button=True)
185
  output_status = gr.Textbox(label="πŸ“Š Status & Details", lines=6, show_copy_button=True)
186
 
187
- gr.Examples(examples=examples, inputs=[idea_input, template_dropdown, model_display])
188
 
189
  generate_button.click(
190
- fn=lambda idea, template: create_meme(idea, template, MODELS["FLAN-T5 (Reliable & Fast)"]),
191
  inputs=[idea_input, template_dropdown],
192
  outputs=[output_image, output_status]
193
  )
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient, HfFolder
3
  import os
4
  import requests
5
  import random
 
9
 
10
  def get_client():
11
  """Get Hugging Face client, handle token error."""
12
+ token = os.environ.get("HF_TOKEN")
13
+ if not token:
14
+ # As a fallback for local testing, you might use this, but it's better to use secrets in Spaces
15
+ try:
16
+ token = HfFolder.get_token()
17
+ except Exception:
18
+ pass
19
  if not token:
20
  return None, "❌ **HuggingFace Token Required**\n\n**Setup:**\n1. Go to Space Settings β†’ Repository Secrets\n2. Add secret: Name=`HF_TOKEN`, Value=(your token)\n3. Get token: https://huggingface.co/settings/tokens\n4. Restart Space"
21
  return InferenceClient(token=token), None
22
 
23
+ def generate_meme_text(idea: str):
24
+ """
25
+ Generate meme text by trying a list of models until one succeeds.
26
+ This is the auto-healing part.
27
+ """
28
  client, error = get_client()
29
  if error:
30
+ return None, None, error, None
31
+
32
+ # List of models to try in order of preference. This makes the app resilient.
33
+ MODELS_TO_TRY = [
34
+ "mistralai/Mistral-7B-Instruct-v0.2",
35
+ "HuggingFaceH4/zephyr-7b-beta",
36
+ "google/flan-t5-large",
37
+ "tiiuae/falcon-7b-instruct",
38
+ ]
39
+
40
+ failed_models = []
41
+ for model_id in MODELS_TO_TRY:
42
+ try:
43
+ prompt = f"""Task: Create a funny, two-line meme caption for the idea: "{idea}".
44
+
45
+ Format your response exactly like this, with no extra text:
46
+ Top: [The first line of the meme]
47
+ Bottom: [The second line, the punchline]"""
48
+
49
+ response = client.text_generation(
50
+ prompt,
51
+ model=model_id,
52
+ max_new_tokens=60,
53
+ temperature=0.8,
54
+ do_sample=True,
55
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
+ # --- Robust Parsing Logic ---
58
+ top_text, bottom_text = "",""
59
+ top_match = re.search(r"Top:\s*(.*)", response, re.IGNORECASE)
60
+ bottom_match = re.search(r"Bottom:\s*(.*)", response, re.IGNORECASE)
61
+
62
+ if top_match: top_text = top_match.group(1).strip().strip('[]"')
63
+ if bottom_match: bottom_text = bottom_match.group(1).strip().strip('[]"')
 
 
 
 
 
64
 
65
+ if not top_text or not bottom_text:
66
+ lines = response.strip().split('\n')
67
+ if len(lines) >= 1: top_text = lines[0].replace("Top:", "").strip()
68
+ if len(lines) >= 2: bottom_text = lines[1].replace("Bottom:", "").strip()
69
+
70
+ if not top_text.strip() or not bottom_text.strip():
71
+ continue # If parsing fails, treat it as a model failure and try the next one
72
+
73
+ # SUCCESS!
74
+ return top_text.strip(), bottom_text.strip(), None, model_id
75
+
76
+ except Exception as e:
77
+ error_msg = str(e).lower()
78
+ if "404" in error_msg or "503" in error_msg or "is currently loading" in error_msg:
79
+ # This model is offline or loading, try the next one
80
+ failed_models.append(model_id.split('/')[-1])
81
+ continue
82
+ else:
83
+ # A different error occurred (like rate limit), so we stop
84
+ return None, None, f"❌ **AI Error:** {str(e)[:200]}", model_id
85
+
86
+ # If the loop finishes, all models failed.
87
+ return None, None, f"❌ **All AI Models Are Offline**\n\nI'm sorry, all reliable free models are currently unavailable or busy. This is a Hugging Face issue. Please try again in a few minutes.\n\n**Models Tried:** {', '.join(failed_models)}", None
88
+
89
+ def create_meme(idea: str, template: str):
90
  """Main function to generate the complete meme."""
91
  if not idea or len(idea.strip()) < 3:
92
  return None, "❌ Please enter a meme idea (at least 3 characters)!"
93
+
94
+ top, bottom, error, model_used = generate_meme_text(idea)
95
  if error:
96
  return None, error
97
+
98
  template_id = MEME_TEMPLATES.get(template, "181913649")
99
  url = "https://api.imgflip.com/caption_image"
100
 
 
107
  }
108
 
109
  try:
110
+ response = requests.post(url, data=payload, timeout=20)
111
  response.raise_for_status()
112
  data = response.json()
113
 
114
  if data.get('success'):
115
  meme_url = data['data']['url']
116
 
117
+ img_response = requests.get(meme_url, timeout=20)
118
  if img_response.status_code == 200:
119
  temp_path = f"/tmp/meme_{random.randint(1000, 9999)}.jpg"
120
  with open(temp_path, 'wb') as f:
 
125
  πŸ“ **Top Text:** {top}
126
  πŸ“ **Bottom Text:** {bottom}
127
 
128
+ πŸ€– **AI Model Used:** {model_used.split('/')[-1]}
129
  πŸ”— **Direct URL:** {meme_url}"""
130
  return temp_path, status_message
131
  else:
 
140
 
141
  # --- CONFIGURATION & UI ---
142
 
 
 
 
 
 
143
  # Popular meme templates
144
  MEME_TEMPLATES = {
145
  "Drake": "181913649",
 
148
  "Expanding Brain": "93895088",
149
  "Success Kid": "61544",
150
  "Batman Slapping Robin": "438680",
151
+ "Change My Mind": "129242436",
152
  }
153
 
154
  # Example prompts
155
  examples = [
156
+ ["When you fix a bug you don't understand"],
157
+ ["Writing code vs. explaining it in the meeting"],
158
+ ["My code on my machine vs. my code in production"],
159
  ]
160
 
161
  # Gradio UI
 
165
  <div style='text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
166
  padding: 30px; border-radius: 15px; color: white; margin-bottom: 20px;'>
167
  <h1>πŸ₯Έ AI Meme Generator</h1>
168
+ <h3>Auto-Healing AI that ALWAYS Finds a Working Model πŸ€—</h3>
169
  </div>
170
  """)
171
 
 
173
 
174
  with gr.Row():
175
  with gr.Column(scale=2):
176
+ idea_input = gr.Textbox(label="🎨 Your Meme Idea", placeholder="Example: When the CI/CD pipeline finally passes...", lines=3)
177
+ template_dropdown = gr.Dropdown(choices=list(MEME_TEMPLATES.keys()), value="Drake", label="πŸ–ΌοΈ Meme Template")
 
 
 
178
  generate_button = gr.Button("πŸš€ Generate Meme", variant="primary", size="lg")
179
 
180
  with gr.Column(scale=1):
181
  gr.Markdown("""
182
+ ### πŸ“– How it Works
183
+ This app is now **resilient**. It has a list of reliable free models and automatically tries them until it finds one that is online.
184
+
185
+ 1. **Enter Idea & Pick Template.**
186
+ 2. **Click Generate.**
187
+ 3. The app does the hard work of finding an active AI model for you!
188
 
189
+ **No more model selection. No more 404 errors.**
 
190
  """)
191
 
192
  output_image = gr.Image(label="πŸ–ΌοΈ Your Generated Meme", type="filepath", show_download_button=True)
193
  output_status = gr.Textbox(label="πŸ“Š Status & Details", lines=6, show_copy_button=True)
194
 
195
+ gr.Examples(examples=examples, inputs=[idea_input])
196
 
197
  generate_button.click(
198
+ fn=create_meme,
199
  inputs=[idea_input, template_dropdown],
200
  outputs=[output_image, output_status]
201
  )