File size: 12,524 Bytes
0758129
 
c53f3cc
0758129
c53f3cc
0758129
7cd5b28
0758129
7cd5b28
 
8708059
 
 
 
7cd5b28
8708059
a4436c6
0758129
 
 
c53f3cc
 
 
 
eaad39c
c53f3cc
 
a6e8352
c53f3cc
 
0758129
 
 
 
7cd5b28
 
a6e8352
0758129
a6e8352
eaad39c
 
 
 
 
 
 
 
0758129
eaad39c
 
 
 
 
 
 
 
c53f3cc
 
 
 
 
eaad39c
 
c53f3cc
eaad39c
 
c53f3cc
 
5998398
 
c53f3cc
5998398
c53f3cc
 
eaad39c
5998398
c53f3cc
 
eaad39c
8708059
eaad39c
8708059
 
 
 
 
 
 
 
 
 
 
 
 
eaad39c
 
7cd5b28
eaad39c
 
 
 
 
 
 
 
 
 
0758129
8708059
0758129
8708059
0758129
7cd5b28
0758129
 
7cd5b28
 
0758129
 
7cd5b28
0758129
eaad39c
0c9c5cf
 
8708059
0c9c5cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8708059
 
 
7cd5b28
8708059
 
 
 
 
 
7cd5b28
 
 
8708059
0c9c5cf
 
8708059
7cd5b28
 
8708059
 
0758129
dd3b062
0758129
eaad39c
8708059
c53f3cc
eaad39c
c53f3cc
a6e8352
 
 
 
5998398
eaad39c
 
 
 
8708059
eaad39c
8708059
eaad39c
7cd5b28
eaad39c
 
 
0758129
 
eaad39c
0758129
 
 
 
 
c53f3cc
 
 
0758129
 
eaad39c
0758129
eaad39c
0758129
 
eaad39c
dd3b062
c53f3cc
0758129
 
 
a6e8352
c53f3cc
eaad39c
c53f3cc
 
eaad39c
 
 
 
a6e8352
 
 
eaad39c
a6e8352
eaad39c
 
c53f3cc
 
0758129
 
 
032d725
eaad39c
 
c53f3cc
032d725
eaad39c
 
 
8708059
0758129
eaad39c
 
 
2350fb4
eaad39c
 
 
 
2350fb4
eaad39c
0758129
eaad39c
0758129
032d725
eaad39c
 
0758129
032d725
 
 
 
eaad39c
 
8708059
eaad39c
 
8708059
eaad39c
 
c53f3cc
dd3b062
c53f3cc
8708059
eaad39c
dd3b062
c53f3cc
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
import os, hashlib, textwrap, requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
import gradio as gr

# ==============================
# Config / Secrets
# ==============================
HF_TOKEN = os.getenv("HF_TOKEN")  # optional
# Try these Inference API model IDs first (will skip on 404/403/5xx)
INFERENCE_CANDIDATES = [
    "stabilityai/stable-diffusion-2-1",
    "runwayml/stable-diffusion-v1-5",
]
# Public Space fallback (no token). We'll DISCOVER a valid api_name at runtime.
PUBLIC_SPACE_ID = "black-forest-labs/FLUX.1-schnell"

# ==============================
# Fonts
# ==============================
CANDIDATE_FONTS = [
    "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
    "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
]
def get_font(size: int):
    for p in CANDIDATE_FONTS:
        if os.path.exists(p):
            return ImageFont.truetype(p, size=int(size))
    return ImageFont.load_default()

# ==============================
# Utils
# ==============================
def i(v):
    try: return int(round(float(v)))
    except Exception: return int(v)

def gradient_from_prompt(prompt: str, w=768, h=768) -> Image.Image:
    w, h = i(w), i(h)
    hsh = hashlib.sha256((prompt or "meme").encode()).hexdigest()
    c1 = tuple(int(hsh[i:i+2], 16) for i in (0, 2, 4))
    c2 = tuple(int(hsh[i:i+2], 16) for i in (6, 8, 10))
    c1 = tuple(min(255, int(v*1.2)) for v in c1)
    c2 = tuple(min(255, int(v*1.1)) for v in c2)
    img = Image.new("RGB", (w, h), c1)
    px = img.load()
    for y in range(h):
        t = y / (h - 1)
        r = int(c1[0]*(1-t) + c2[0]*t)
        g = int(c1[1]*(1-t) + c2[1]*t)
        b = int(c1[2]*(1-t) + c2[2]*t)
        for x in range(w):
            px[x, y] = (r, g, b)
    return img

def wrap_lines(draw, text, img_w, font, stroke):
    lines = []
    max_chars = max(12, min(30, img_w // 30))
    for paragraph in (text or "").split("\n"):
        wrapped = textwrap.wrap(paragraph, width=max_chars)
        lines.extend(wrapped if wrapped else [""])
    heights = [draw.textbbox((0, 0), ln, font=font, stroke_width=stroke)[3] for ln in lines]
    return lines, heights

def draw_block(draw, text, img_w, y, font, fill, stroke_fill, stroke_width, align="center"):
    lines, heights = wrap_lines(draw, text, img_w, font, stroke_width)
    curr_y = y
    for i, line in enumerate(lines):
        bbox = draw.textbbox((0, 0), line, font=font, stroke_width=stroke_width)
        w_line = bbox[2] - bbox[0]
        if align == "center":
            x = (img_w - w_line) / 2
        elif align == "left":
            x = int(img_w * 0.05)
        else:
            x = img_w - int(img_w * 0.05) - w_line
        draw.text((x, curr_y), line, font=font, fill=fill,
                  stroke_width=stroke_width, stroke_fill=stroke_fill)
        curr_y += heights[i] + int(font.size * 0.25)
    return curr_y, sum(heights) + (len(heights)-1) * int(font.size*0.25)

# ==============================
# Styles / text split
# ==============================
PRESETS = {
    "None": "",
    "Retro Comic": "bold comic outline, grain, high contrast, 35mm scan",
    "Vaporwave": "vaporwave, neon pink and cyan, miami sunset, synth grid",
    "Game Boy": "pixel art, 4-color green palette, dithering",
    "Newspaper Halftone": "b&w halftone dots, newsprint texture",
    "Cyberpunk Neon": "neon city at night, purple blue rim light, rain",
    "90s Web": "bevel buttons, gradients, clipart stars, lens flare",
    "Synthwave Grid": "purple/indigo sky, glowing sun, mountains, grid floor",
}
def smart_split_text(prompt: str):
    p = (prompt or "").strip()
    if not p: return "TOP TEXT", "BOTTOM TEXT"
    for sep in ["|", " - ", " — ", ":", ";"]:
        if sep in p:
            a, b = p.split(sep, 1)
            return a.strip().upper(), b.strip().upper()
    words = p.split()
    if len(words) > 6:
        mid = len(words) // 2
        return " ".join(words[:mid]).upper(), " ".join(words[mid:]).upper()
    return p.upper(), ""

# ==============================
# Generators (multi-fallback)
# ==============================
def call_inference_api(model_id: str, prompt: str, width: int, height: int) -> Image.Image:
    if not HF_TOKEN:
        raise RuntimeError("no-token")
    url = f"https://api-inference.huggingface.co/models/{model_id}"
    headers = {"Authorization": f"Bearer {HF_TOKEN}"}
    payload = {"inputs": prompt, "options": {"wait_for_model": True},
               "parameters": {"width": int(width), "height": int(height)}}
    r = requests.post(url, headers=headers, json=payload, timeout=180)
    if r.status_code != 200:
        raise RuntimeError(f"{model_id}:{r.status_code}")
    return Image.open(BytesIO(r.content)).convert("RGB")

def call_public_space(prompt: str, width: int, height: int) -> Image.Image:
    """Use the FLUX public Space directly via its /infer endpoint."""
    from gradio_client import Client
    client = Client("black-forest-labs/FLUX.1-schnell")
    # order: prompt, seed, randomize_seed, width, height, num_inference_steps
    result, _seed = client.predict(
        prompt,
        0,            # seed (0 = let Space choose unless randomize_seed=False)
        True,         # randomize_seed
        int(width),
        int(height),
        4,            # num_inference_steps (keep tiny for speed on mobile)
        api_name="/infer"
    )
    # result is a dict with path/url
    path = None
    if isinstance(result, dict):
        path = result.get("path") or result.get("url")
    elif isinstance(result, list) and result:
        item = result[0]
        if isinstance(item, dict):
            path = item.get("path") or item.get("url")
        else:
            path = item
    else:
        path = result
    if not path:
        raise RuntimeError("public-space returned empty result")
    from PIL import Image
    return Image.open(path).convert("RGB")

def generate_image_auto(prompt: str, width: int, height: int):
    tried = []
    # 1) Inference API candidates (if token present)
    if HF_TOKEN:
        for mid in INFERENCE_CANDIDATES:
            try:
                img = call_inference_api(mid, prompt, width, height)
                return img, f"✅ Inference API: **{mid}** (token present)"
            except Exception as e:
                tried.append(f"{mid}{str(e)}")
                continue
    # 2) Public Space dynamic
    try:
        img = call_public_space(prompt, width, height)
return img, "✅ Public Space: FLUX /infer"
    except Exception as e:
        tried.append(f"{PUBLIC_SPACE_ID}{str(e)}")
    # 3) Gradient
    return gradient_from_prompt(prompt, w=width, h=height), f"⚠️ Fallback gradient | tried: {', '.join(tried)}"

# ==============================
# Core pipeline (returns image + status)
# ==============================
def generate_and_meme(
    prompt, preset_name, use_ai, width, height,
    font_size, stroke_width, text_color, outline_color,
    align, top_nudge, bottom_nudge, use_prompt_for_text, top_text_manual, bottom_text_manual
):
    width, height = i(width), i(height)
    top_nudge, bottom_nudge = i(top_nudge), i(bottom_nudge)
    stroke_width = i(stroke_width)

    base = (prompt or "").strip()
    style_suffix = PRESETS.get(preset_name or "None", "")
    gen_prompt = (base + " " + style_suffix).strip()

    if use_ai:
        img, status = generate_image_auto(gen_prompt, width, height)
    else:
        img, status = gradient_from_prompt(gen_prompt, w=width, h=height), "ℹ️ AI generator is OFF"

    # Text
    if use_prompt_for_text:
        top_text, bottom_text = smart_split_text(base)
    else:
        top_text = (top_text_manual or "").upper()
        bottom_text = (bottom_text_manual or "").upper()

    img = img.convert("RGB")
    draw = ImageDraw.Draw(img)
    w_img, h_img = img.size

    base_size = max(12, int((w_img * float(font_size)) / 100))
    font = get_font(base_size)
    stroke = int(max(0, stroke_width))

    top_y = int(h_img * 0.03) + top_nudge
    draw_block(draw, top_text, w_img, top_y, font, text_color, outline_color, stroke, align=align)

    lines, heights = wrap_lines(draw, bottom_text, w_img, font, stroke)
    total_bottom_h = sum(heights) + (len(heights)-1) * int(font.size*0.25)
    bottom_y_start = int(h_img - total_bottom_h - h_img*0.03) - bottom_nudge
    draw_block(draw, bottom_text, w_img, bottom_y_start, font, text_color, outline_color, stroke, align=align)

    return img, status

# ==============================
# Retro theme + CSS
# ==============================
THEME = gr.themes.Soft(primary_hue="indigo", secondary_hue="violet", neutral_hue="slate")
CUSTOM_CSS = """
@import url('https://fonts.googleapis.com/css2?family=Press+Start+2P&display=swap');
:root { --radius: 14px; }
* { -webkit-tap-highlight-color: transparent; }
body { background: radial-gradient(1200px 600px at 50% -10%, #0d1220 10%, #05060b 70%); }
.gradio-container { max-width: 900px; margin: 0 auto; padding: 12px; }
h2, p { text-align: center; color: #cde3ff; text-shadow: 0 0 10px rgba(80,120,255,.25); }
h2 { font-family: 'Press Start 2P', system-ui, sans-serif; letter-spacing: 1px; font-size: 18px; }
.crt { position: relative; border: 2px solid #2a3350; border-radius: 12px; overflow: hidden;
       box-shadow: 0 0 0 1px #0f1427 inset, 0 0 40px rgba(60,80,255,.25); }
.crt::before { content: ""; position: absolute; inset: 0; pointer-events: none;
  background: repeating-linear-gradient(180deg, rgba(255,255,255,0.05), rgba(255,255,255,0.05) 1px, transparent 1px, transparent 3px);
  mix-blend-mode: overlay; opacity: .25; }
label { color: #a9b7ff !important; }
.gr-button { font-weight: 800; border-radius: 12px; }
"""

# ==============================
# App
# ==============================
with gr.Blocks(theme=THEME, css=CUSTOM_CSS) as demo:
    gr.Markdown("<h2>🕹️ MEME LAB — RETRO EDITION</h2>"
                "<p>One prompt → generate image → auto meme text. Style presets for instant vibes.</p>")

    with gr.Row():
        with gr.Column(scale=1, elem_classes=["crt"]):
            prompt = gr.Textbox(label="Your idea (one prompt)", value="cat typing on a laptop at midnight")
            preset = gr.Dropdown(choices=list(PRESETS.keys()), value="Retro Comic", label="Style preset")
            use_ai = gr.Checkbox(label="Use AI image (auto-fallbacks, no key required)", value=True)

            with gr.Row():
                width = gr.Slider(384, 1024, value=768, step=64, label="Width")
                height = gr.Slider(384, 1024, value=768, step=64, label="Height")

            gr.Markdown("### Meme Text")
            use_prompt_for_text = gr.Checkbox(label="Auto from prompt", value=True)
            top_text_manual = gr.Textbox(label="Top text (if not auto)", value="", interactive=True)
            bottom_text_manual = gr.Textbox(label="Bottom text (if not auto)", value="", interactive=True)

            align = gr.Radio(choices=["left", "center", "right"], value="center", label="Text alignment")
            font_size = gr.Slider(8, 24, value=10, step=1, label="Font size (% of width)")
            stroke_width = gr.Slider(0, 16, value=4, step=1, label="Outline thickness")

            with gr.Row():
                text_color = gr.ColorPicker(value="#FFFFFF", label="Text color")
                outline_color = gr.ColorPicker(value="#000000", label="Outline color")

            with gr.Row():
                top_nudge = gr.Slider(-300, 300, value=0, step=1, label="Top nudge (px)")
                bottom_nudge = gr.Slider(-300, 300, value=0, step=1, label="Bottom nudge (px)")

        with gr.Column(scale=1, elem_classes=["crt"]):
            out = gr.Image(type="pil", label="Preview / Download", height=540, show_download_button=True)
            status = gr.Markdown("…")
            generate = gr.Button("✨ Generate Image + Meme", variant="primary")

    inputs = [prompt, preset, use_ai, width, height,
              font_size, stroke_width, text_color, outline_color,
              align, top_nudge, bottom_nudge, use_prompt_for_text, top_text_manual, bottom_text_manual]

    generate.click(fn=generate_and_meme, inputs=inputs, outputs=[out, status])

    for comp in [preset, use_prompt_for_text, top_text_manual, bottom_text_manual,
                 font_size, stroke_width, text_color, outline_color, align, top_nudge, bottom_nudge]:
        comp.change(fn=generate_and_meme, inputs=inputs, outputs=[out, status], show_progress=False)

if __name__ == "__main__":
    demo.launch()