rickveloper commited on
Commit
8708059
·
verified ·
1 Parent(s): a4436c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -53
app.py CHANGED
@@ -6,15 +6,16 @@ import gradio as gr
6
  # ==============================
7
  # Secrets / Config
8
  # ==============================
9
- HF_TOKEN = os.getenv("HF_TOKEN") # Space secret
10
- DEFAULT_MODEL_ID = "stabilityai/sd-turbo" # fast + public
11
-
12
- # Use ONLY model IDs that are real + public
13
- MODEL_CHOICES = {
14
- "sd-1.5 (runwayml)": "runwayml/stable-diffusion-v1-5",
15
- "sd-2.1 (stabilityai)": "stabilityai/stable-diffusion-2-1",
16
- }
17
-
 
18
 
19
  # ==============================
20
  # Fonts
@@ -80,9 +81,21 @@ def draw_block(draw, text, img_w, y, font, fill, stroke_fill, stroke_width, alig
80
  draw.text((x, curr_y), line, font=font, fill=fill,
81
  stroke_width=stroke_width, stroke_fill=stroke_fill)
82
  curr_y += heights[i] + int(font.size * 0.25)
83
- total_h = sum(heights) + (len(heights)-1) * int(font.size*0.25)
84
- return curr_y, total_h
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  def smart_split_text(prompt: str):
87
  p = (prompt or "").strip()
88
  if not p:
@@ -98,9 +111,9 @@ def smart_split_text(prompt: str):
98
  return p.upper(), ""
99
 
100
  # ==============================
101
- # HF Inference API
102
  # ==============================
103
- def generate_via_hf_inference(model_id: str, prompt: str, width: int, height: int) -> Image.Image:
104
  if not HF_TOKEN:
105
  raise RuntimeError("HF token not set")
106
  url = f"https://api-inference.huggingface.co/models/{model_id}"
@@ -108,19 +121,53 @@ def generate_via_hf_inference(model_id: str, prompt: str, width: int, height: in
108
  payload = {
109
  "inputs": prompt,
110
  "options": {"wait_for_model": True},
111
- # These params are accepted by sd-turbo and ignored by some others (fine)
112
  "parameters": {"width": int(width), "height": int(height)}
113
  }
114
  r = requests.post(url, headers=headers, json=payload, timeout=180)
115
  if r.status_code != 200:
116
- raise RuntimeError(f"HF API {r.status_code}: {r.text[:160]}")
117
  return Image.open(BytesIO(r.content)).convert("RGB")
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  # ==============================
120
  # Core pipeline (returns image + status)
121
  # ==============================
122
  def generate_and_meme(
123
- prompt, preset_name, use_ai, model_label, width, height,
124
  font_size, stroke_width, text_color, outline_color,
125
  align, top_nudge, bottom_nudge, use_prompt_for_text, top_text_manual, bottom_text_manual
126
  ):
@@ -132,27 +179,18 @@ def generate_and_meme(
132
  style_suffix = PRESETS.get(preset_name or "None", "")
133
  gen_prompt = (base + " " + style_suffix).strip()
134
 
135
- # 1) Image
136
  if use_ai:
137
- model_id = MODEL_CHOICES.get(model_label, DEFAULT_MODEL_ID)
138
- try:
139
- img = generate_via_hf_inference(model_id, gen_prompt, width, height)
140
- status = f"✅ AI image via **{model_id}** | token={'present' if bool(HF_TOKEN) else 'missing'}"
141
- except Exception as e:
142
- img = gradient_from_prompt(gen_prompt, w=width, h=height)
143
- status = f"⚠️ Fallback to gradient. Model=**{model_id}** | token={'present' if bool(HF_TOKEN) else 'missing'} | Reason: {str(e)[:180]}"
144
  else:
145
- img = gradient_from_prompt(gen_prompt, w=width, h=height)
146
- status = "ℹ️ AI generator is OFF"
147
 
148
- # 2) Text
149
  if use_prompt_for_text:
150
  top_text, bottom_text = smart_split_text(base)
151
  else:
152
  top_text = (top_text_manual or "").upper()
153
  bottom_text = (bottom_text_manual or "").upper()
154
 
155
- # 3) Draw
156
  img = img.convert("RGB")
157
  draw = ImageDraw.Draw(img)
158
  w_img, h_img = img.size
@@ -171,25 +209,10 @@ def generate_and_meme(
171
 
172
  return img, status
173
 
174
- # ==============================
175
- # Style presets
176
- # ==============================
177
- PRESETS = {
178
- "None": "",
179
- "Retro Comic": "bold comic outline, grain, high contrast, 35mm scan",
180
- "Vaporwave": "vaporwave, neon pink and cyan, miami sunset, synth grid",
181
- "Game Boy": "pixel art, 4-color green palette, dithering",
182
- "Newspaper Halftone": "b&w halftone dots, newsprint texture",
183
- "Cyberpunk Neon": "neon city at night, purple blue rim light, rain",
184
- "90s Web": "bevel buttons, gradients, clipart stars, lens flare",
185
- "Synthwave Grid": "purple/indigo sky, glowing sun, mountains, grid floor",
186
- }
187
-
188
  # ==============================
189
  # Retro theme + CSS
190
  # ==============================
191
  THEME = gr.themes.Soft(primary_hue="indigo", secondary_hue="violet", neutral_hue="slate")
192
-
193
  CUSTOM_CSS = """
194
  @import url('https://fonts.googleapis.com/css2?family=Press+Start+2P&display=swap');
195
  :root { --radius: 14px; }
@@ -218,13 +241,7 @@ with gr.Blocks(theme=THEME, css=CUSTOM_CSS) as demo:
218
  with gr.Column(scale=1, elem_classes=["crt"]):
219
  prompt = gr.Textbox(label="Your idea (one prompt)", value="cat typing on a laptop at midnight")
220
  preset = gr.Dropdown(choices=list(PRESETS.keys()), value="Retro Comic", label="Style preset")
221
- use_ai = gr.Checkbox(label="Use AI generator (HF Inference API, needs secret)", value=False)
222
-
223
- model_label = gr.Dropdown(
224
- choices=list(MODEL_CHOICES.keys()),
225
- value="sd-turbo (fast)",
226
- label="Model (only used when AI is ON)"
227
- )
228
 
229
  with gr.Row():
230
  width = gr.Slider(384, 1024, value=768, step=64, label="Width")
@@ -249,16 +266,16 @@ with gr.Blocks(theme=THEME, css=CUSTOM_CSS) as demo:
249
 
250
  with gr.Column(scale=1, elem_classes=["crt"]):
251
  out = gr.Image(type="pil", label="Preview / Download", height=540, show_download_button=True)
252
- status = gr.Markdown("…") # shows why it fell back / which model was used
253
  generate = gr.Button("✨ Generate Image + Meme", variant="primary")
254
 
255
- inputs = [prompt, preset, use_ai, model_label, width, height,
256
  font_size, stroke_width, text_color, outline_color,
257
  align, top_nudge, bottom_nudge, use_prompt_for_text, top_text_manual, bottom_text_manual]
258
 
259
  generate.click(fn=generate_and_meme, inputs=inputs, outputs=[out, status])
260
 
261
- for comp in [preset, model_label, use_prompt_for_text, top_text_manual, bottom_text_manual,
262
  font_size, stroke_width, text_color, outline_color, align, top_nudge, bottom_nudge]:
263
  comp.change(fn=generate_and_meme, inputs=inputs, outputs=[out, status], show_progress=False)
264
 
 
6
  # ==============================
7
  # Secrets / Config
8
  # ==============================
9
+ HF_TOKEN = os.getenv("HF_TOKEN") # optional, via Repo Secrets
10
+ # Try these Inference API models in order; skip silently on 404/403/5xx
11
+ INFERENCE_CANDIDATES = [
12
+ "stabilityai/stable-diffusion-2-1",
13
+ "runwayml/stable-diffusion-v1-5",
14
+ "stabilityai/sd-turbo", # may 404 for some accounts
15
+ ]
16
+ # Public Space as last resort (no token). If they change UI, we still fail gracefully.
17
+ PUBLIC_SPACE_ID = "black-forest-labs/FLUX.1-schnell"
18
+ PUBLIC_SPACE_APIS = ["/predict", "/run"]
19
 
20
  # ==============================
21
  # Fonts
 
81
  draw.text((x, curr_y), line, font=font, fill=fill,
82
  stroke_width=stroke_width, stroke_fill=stroke_fill)
83
  curr_y += heights[i] + int(font.size * 0.25)
84
+ return curr_y, sum(heights) + (len(heights)-1) * int(font.size*0.25)
 
85
 
86
+ # ==============================
87
+ # Styles / text split
88
+ # ==============================
89
+ PRESETS = {
90
+ "None": "",
91
+ "Retro Comic": "bold comic outline, grain, high contrast, 35mm scan",
92
+ "Vaporwave": "vaporwave, neon pink and cyan, miami sunset, synth grid",
93
+ "Game Boy": "pixel art, 4-color green palette, dithering",
94
+ "Newspaper Halftone": "b&w halftone dots, newsprint texture",
95
+ "Cyberpunk Neon": "neon city at night, purple blue rim light, rain",
96
+ "90s Web": "bevel buttons, gradients, clipart stars, lens flare",
97
+ "Synthwave Grid": "purple/indigo sky, glowing sun, mountains, grid floor",
98
+ }
99
  def smart_split_text(prompt: str):
100
  p = (prompt or "").strip()
101
  if not p:
 
111
  return p.upper(), ""
112
 
113
  # ==============================
114
+ # Generators (multi-fallback)
115
  # ==============================
116
+ def call_inference_api(model_id: str, prompt: str, width: int, height: int) -> Image.Image:
117
  if not HF_TOKEN:
118
  raise RuntimeError("HF token not set")
119
  url = f"https://api-inference.huggingface.co/models/{model_id}"
 
121
  payload = {
122
  "inputs": prompt,
123
  "options": {"wait_for_model": True},
 
124
  "parameters": {"width": int(width), "height": int(height)}
125
  }
126
  r = requests.post(url, headers=headers, json=payload, timeout=180)
127
  if r.status_code != 200:
128
+ raise RuntimeError(f"{r.status_code}:{r.text[:160]}")
129
  return Image.open(BytesIO(r.content)).convert("RGB")
130
 
131
+ def call_public_space(prompt: str, width: int, height: int) -> Image.Image:
132
+ # no token required
133
+ from gradio_client import Client
134
+ client = Client(PUBLIC_SPACE_ID)
135
+ # try multiple API names to survive UI tweaks
136
+ last_err = None
137
+ for api in PUBLIC_SPACE_APIS:
138
+ try:
139
+ res = client.predict(prompt, width, height, api_name=api)
140
+ if isinstance(res, list): res = res[0]
141
+ return Image.open(res).convert("RGB")
142
+ except Exception as e:
143
+ last_err = e
144
+ continue
145
+ raise RuntimeError(f"public-space-fail:{last_err}")
146
+
147
+ def generate_image_auto(prompt: str, width: int, height: int):
148
+ # 1) try HF Inference API candidates
149
+ tried = []
150
+ if HF_TOKEN:
151
+ for mid in INFERENCE_CANDIDATES:
152
+ try:
153
+ img = call_inference_api(mid, prompt, width, height)
154
+ return img, f"✅ Inference API: **{mid}** (token present)"
155
+ except Exception as e:
156
+ tried.append(f"{mid}→{str(e)[:80]}")
157
+ # 2) try public Space (no token)
158
+ try:
159
+ img = call_public_space(prompt, width, height)
160
+ return img, f"✅ Public Space: **{PUBLIC_SPACE_ID}**"
161
+ except Exception as e:
162
+ tried.append(f"{PUBLIC_SPACE_ID}→{str(e)[:80]}")
163
+ # 3) fallback gradient
164
+ return gradient_from_prompt(prompt, w=width, h=height), f"⚠️ Fallback gradient | tried: {', '.join(tried)}"
165
+
166
  # ==============================
167
  # Core pipeline (returns image + status)
168
  # ==============================
169
  def generate_and_meme(
170
+ prompt, preset_name, use_ai, width, height,
171
  font_size, stroke_width, text_color, outline_color,
172
  align, top_nudge, bottom_nudge, use_prompt_for_text, top_text_manual, bottom_text_manual
173
  ):
 
179
  style_suffix = PRESETS.get(preset_name or "None", "")
180
  gen_prompt = (base + " " + style_suffix).strip()
181
 
 
182
  if use_ai:
183
+ img, status = generate_image_auto(gen_prompt, width, height)
 
 
 
 
 
 
184
  else:
185
+ img, status = gradient_from_prompt(gen_prompt, w=width, h=height), "ℹ️ AI generator is OFF"
 
186
 
187
+ # text
188
  if use_prompt_for_text:
189
  top_text, bottom_text = smart_split_text(base)
190
  else:
191
  top_text = (top_text_manual or "").upper()
192
  bottom_text = (bottom_text_manual or "").upper()
193
 
 
194
  img = img.convert("RGB")
195
  draw = ImageDraw.Draw(img)
196
  w_img, h_img = img.size
 
209
 
210
  return img, status
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  # ==============================
213
  # Retro theme + CSS
214
  # ==============================
215
  THEME = gr.themes.Soft(primary_hue="indigo", secondary_hue="violet", neutral_hue="slate")
 
216
  CUSTOM_CSS = """
217
  @import url('https://fonts.googleapis.com/css2?family=Press+Start+2P&display=swap');
218
  :root { --radius: 14px; }
 
241
  with gr.Column(scale=1, elem_classes=["crt"]):
242
  prompt = gr.Textbox(label="Your idea (one prompt)", value="cat typing on a laptop at midnight")
243
  preset = gr.Dropdown(choices=list(PRESETS.keys()), value="Retro Comic", label="Style preset")
244
+ use_ai = gr.Checkbox(label="Use AI image (auto-fallbacks, no key required)", value=True)
 
 
 
 
 
 
245
 
246
  with gr.Row():
247
  width = gr.Slider(384, 1024, value=768, step=64, label="Width")
 
266
 
267
  with gr.Column(scale=1, elem_classes=["crt"]):
268
  out = gr.Image(type="pil", label="Preview / Download", height=540, show_download_button=True)
269
+ status = gr.Markdown("…")
270
  generate = gr.Button("✨ Generate Image + Meme", variant="primary")
271
 
272
+ inputs = [prompt, preset, use_ai, width, height,
273
  font_size, stroke_width, text_color, outline_color,
274
  align, top_nudge, bottom_nudge, use_prompt_for_text, top_text_manual, bottom_text_manual]
275
 
276
  generate.click(fn=generate_and_meme, inputs=inputs, outputs=[out, status])
277
 
278
+ for comp in [preset, use_prompt_for_text, top_text_manual, bottom_text_manual,
279
  font_size, stroke_width, text_color, outline_color, align, top_nudge, bottom_nudge]:
280
  comp.change(fn=generate_and_meme, inputs=inputs, outputs=[out, status], show_progress=False)
281