File size: 3,136 Bytes
f9f54d2
 
 
ac813ec
f9f54d2
 
 
eae1bb8
f9f54d2
 
 
 
 
ac813ec
f9f54d2
 
 
 
 
 
 
 
 
ac813ec
 
 
 
 
 
 
 
 
 
 
f9f54d2
 
 
 
 
 
ac813ec
 
 
f9f54d2
 
ac813ec
 
 
 
 
 
 
 
f9f54d2
 
 
 
 
 
 
 
 
 
 
 
eae1bb8
ca75acb
f9f54d2
 
 
ac813ec
 
 
 
 
 
 
f9f54d2
 
 
eae1bb8
f9f54d2
 
 
 
 
ca75acb
f9f54d2
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr
from diffusers import DDPMPipeline
import torch
from PIL import Image, ImageDraw
import os

# --- CONFIGURATION ---
MODEL_ID = "FlameF0X/Stable-Lime-v1.1"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

print(f"πŸ‹ Initializing Stable-Lime Protocol on {DEVICE}...")

# --- LOAD MODEL ---
pipeline = None
try:
    # Load the pipeline directly from your Hub repo
    pipeline = DDPMPipeline.from_pretrained(MODEL_ID)
    pipeline.to(DEVICE)
    print("βœ… Lime Status: ONLINE")
except Exception as e:
    print(f"❌ CRITICAL FAILURE: Could not load the Lime. Error: {e}")
    pipeline = None

def create_error_image(message):
    """Generates a fallback image containing the error message to debug API issues."""
    img = Image.new('RGB', (512, 512), color=(20, 0, 0))
    d = ImageDraw.Draw(img)
    try:
        # Basic text drawing if font loading fails
        d.text((20, 250), f"SYSTEM FAILURE:\n{message}", fill=(255, 50, 50))
    except:
        pass
    return img

def generate_lime():
    """
    The core inference function.
    Summons a lime from the latent void.
    """
    if pipeline is None:
        # Return a generated error image so the frontend has something to show
        # instead of a broken link/null response
        return create_error_image("MODEL_LOAD_FAIL")
    
    print("πŸ‹ Generating new specimen...")
    try:
        # Reduced steps slightly for better responsiveness if on CPU
        steps = 50 if DEVICE == "cpu" else 100
        image = pipeline(num_inference_steps=steps).images[0]
        return image
    except Exception as e:
        print(f"Generation Error: {e}")
        return create_error_image("INFERENCE_ERR")

# --- CUSTOM CSS ---
custom_css = """
body { background-color: #0d0d0d; color: #e0e0e0; font-family: 'Courier New', monospace; }
.gradio-container { max-width: 700px !important; margin-top: 40px !important; }
h1 { color: #ff5722; text-align: center; font-weight: bold; letter-spacing: 2px; }
.lime-btn { background-color: #32CD32 !important; color: #000 !important; font-weight: bold; border: 1px solid #32CD32; }
.lime-btn:hover { box-shadow: 0 0 15px #32CD32; }
.footer { text-align: center; margin-top: 20px; font-size: 0.8em; color: #666; }
"""

# --- THE UI ---
with gr.Blocks(css=custom_css, title="Stable-Lime v1.1") as demo:
    gr.HTML("<h1>> STABLE-LIME v1.1 <</h1>")
    
    with gr.Row():
        with gr.Column():
            # KEY CHANGE: type="filepath" ensures the API receives a string path
            # instead of a complex object or base64 string.
            lime_output = gr.Image(
                label="Generated Artifact", 
                type="filepath", 
                elem_id="lime-out"
            )
            
            generate_btn = gr.Button("INITIALIZE GENERATION", elem_classes="lime-btn")
    
    gr.HTML("<div class='footer'>Running on Unconditional U-Net Architecture | Powered by Limes</div>")

    # Wire up the button
    generate_btn.click(
        fn=generate_lime,
        inputs=None,
        outputs=[lime_output]
    )

# Launch the app
if __name__ == "__main__":
    demo.launch()