Mehak-Mazhar commited on
Commit
1e87a0e
·
verified ·
1 Parent(s): 24b81db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -108
app.py CHANGED
@@ -1,140 +1,73 @@
1
  # -*- coding: utf-8 -*-
2
  """
3
- Gradio Space: Text → Image using FLUX.1 (Hugging Face Inference API)
4
- Attractive interface with custom styling and footer label: "designed by Mehak Mazhar"
5
-
6
- How to use:
7
- 1. Install dependencies: pip install gradio requests pillow
8
- 2. Get a Hugging Face API token (if you want to use the hosted FLUX.1 models) and either set it as an env var HF_TOKEN or paste it into the 'HF Token' field in the UI.
9
- 3. Run: python app.py
10
-
11
- Notes: this script calls the Hugging Face Inference API for the model 'black-forest-labs/FLUX.1-schnell' by default.
12
- You can change the MODEL variable to any compatible image generation model hosted on Hugging Face or point to your own inference server.
13
  """
14
 
15
  import os
16
  import io
17
- import base64
18
- import random
19
  import requests
20
  from PIL import Image
21
  import gradio as gr
22
 
23
  # --- Configuration ---
24
- MODEL = os.environ.get("FLUX_MODEL", "black-forest-labs/FLUX.1-schnell")
25
  HF_API_URL = f"https://api-inference.huggingface.co/models/{MODEL}"
26
 
27
- # --- Helper to call Hugging Face API ---
28
- def call_hf_image_api(prompt, token, width, height, guidance_scale, steps, seed, negative_prompt=None):
29
- headers = {"Authorization": f"Bearer {token}"} if token else {}
 
 
 
30
  payload = {
31
  "inputs": prompt,
32
- "options": {"wait_for_model": True},
33
  "parameters": {
34
  "width": int(width),
35
  "height": int(height),
36
  "guidance_scale": float(guidance_scale),
37
- "num_inference_steps": int(steps),
38
- "seed": None if seed is None else int(seed),
39
- }
40
  }
41
- if negative_prompt:
42
- payload["parameters"]["negative_prompt"] = negative_prompt
43
 
44
- resp = requests.post(HF_API_URL, headers=headers, json=payload, stream=True, timeout=120)
45
- resp.raise_for_status()
 
46
 
47
- content_type = resp.headers.get("content-type", "")
48
- if "application/json" in content_type:
49
- data = resp.json()
50
- if isinstance(data, dict):
51
- for k in ("image", "images", "generated_images", "artifacts"):
52
- if k in data:
53
- imgs = data[k]
54
- if isinstance(imgs, list) and imgs:
55
- b64 = imgs[0].get("data") if isinstance(imgs[0], dict) else imgs[0]
56
- if isinstance(b64, str):
57
- return Image.open(io.BytesIO(base64.b64decode(b64)))
58
- for v in data.values():
59
- if isinstance(v, str) and v.strip().startswith("iVBOR"):
60
- return Image.open(io.BytesIO(base64.b64decode(v)))
61
- raise ValueError("Could not parse image from JSON response")
62
- else:
63
- return Image.open(io.BytesIO(resp.content))
64
 
65
  # --- Gradio UI ---
66
- css = r'''
67
- body { background: linear-gradient(135deg, #fff7e6 0%, #fffaf0 50%, #fff7fd 100%); }
68
- .gradio-container { font-family: 'Inter', system-ui, -apple-system, 'Segoe UI', Roboto, 'Helvetica Neue', Arial; }
69
- .header { display:flex; align-items:center; gap:16px; }
70
- .logo { width:64px; height:64px; border-radius:14px; box-shadow: 0 6px 18px rgba(0,0,0,0.08); }
71
- .card { background: rgba(255,255,255,0.9); border-radius:16px; padding:18px; box-shadow: 0 12px 30px rgba(0,0,0,0.06); }
72
- .footer { text-align:center; font-size:12px; color:#555; margin-top:12px; }
73
- .footer strong { color:#333; }
74
- .generator-btn { border-radius: 12px; padding:10px 18px; }
75
- '''
76
 
77
- with gr.Blocks(css=css, title="Flux TextImage — designed by Mehak Mazhar") as demo:
78
- with gr.Row(elem_id="top-row"):
79
- with gr.Column(scale=1):
80
- gr.HTML(
81
- "<div class='header'>"
82
- "<img class='logo' src='https://raw.githubusercontent.com/black-forest-labs/flux/main/logo.png' "
83
- "alt='Flux logo' onerror=\"this.style.display='none'\"> "
84
- "<div><h2 style='margin:0'>FLUX.1 Text → Image</h2>"
85
- "<p style='margin:0;color:#555;'>Generate high-quality images from text (Hugging Face inference API)</p></div>"
86
- "</div>"
87
- )
88
 
89
  with gr.Row():
90
- with gr.Column(scale=1, min_width=360):
91
- prompt = gr.Textbox(
92
- label="Prompt",
93
- placeholder="A serene mountain lake at sunset, digital painting, ultra-detailed",
94
- lines=4
95
- )
96
- negative = gr.Textbox(
97
- label="Negative prompt (optional)",
98
- placeholder="blurry, lowres, text, watermark",
99
- lines=2
100
- )
101
- hf_token = gr.Textbox(
102
- label="Hugging Face API Token (optional)",
103
- placeholder="Paste your HF token here or set HF_TOKEN env var",
104
- type="password"
105
- )
106
- with gr.Row():
107
- width = gr.Dropdown(choices=[256,384,512,768,1024], value=512, label="Width")
108
- height = gr.Dropdown(choices=[256,384,512,768,1024], value=512, label="Height")
109
- with gr.Row():
110
- steps = gr.Slider(10, 150, value=28, step=1, label="Steps")
111
- guidance = gr.Slider(1.0, 30.0, value=7.5, step=0.1, label="Guidance scale")
112
- with gr.Row():
113
- seed = gr.Number(value=None, precision=0, label="Seed (leave blank for random)")
114
- gen_btn = gr.Button("Generate", elem_classes="generator-btn")
115
- gr.Markdown("**Tip:** Use vivid, descriptive prompts. Try styles like `cinematic lighting`, `digital oil painting`, or `ultra-detailed`.")
116
-
117
- with gr.Column(scale=1, min_width=360):
118
- gallery = gr.Gallery(label="Generated images", show_label=True, elem_id="gallery").style(grid=[2], height="640px")
119
- out_log = gr.Textbox(label="Status / Debug log", lines=4, interactive=False)
120
-
121
- gr.HTML("<div class='footer'><p><strong>designed by Mehak Mazhar</strong></p></div>")
122
 
123
- def generate_image(prompt_text, negative_text, hf_token_text, width_v, height_v, steps_v, guidance_v, seed_v):
124
- token = hf_token_text.strip() or os.environ.get("HF_TOKEN")
125
- if not token:
126
- return None, "ERROR: No Hugging Face token provided. Set HF_TOKEN or paste it into the UI."
127
- try:
128
- if seed_v in (None, "", 0):
129
- seed_val = random.randint(0, 2**31 - 1)
130
- else:
131
- seed_val = int(seed_v)
132
- img = call_hf_image_api(prompt_text, token, width_v, height_v, guidance_v, steps_v, seed_val, negative_prompt=negative_text)
133
- return [img], f"OK — seed={seed_val}, model={MODEL}"
134
- except Exception as e:
135
- return None, f"API error: {e}"
136
 
137
- gen_btn.click(fn=generate_image, inputs=[prompt, negative, hf_token, width, height, steps, guidance, seed], outputs=[gallery, out_log])
 
 
 
 
138
 
139
  if __name__ == "__main__":
140
  demo.launch(server_name="0.0.0.0", share=False)
 
1
  # -*- coding: utf-8 -*-
2
  """
3
+ Gradio Space: Text → Image using Stable Diffusion (Hugging Face Inference API)
4
+ UI designed by Mehak Mazhar
 
 
 
 
 
 
 
 
5
  """
6
 
7
  import os
8
  import io
 
 
9
  import requests
10
  from PIL import Image
11
  import gradio as gr
12
 
13
  # --- Configuration ---
14
+ MODEL = os.environ.get("SD_MODEL", "runwayml/stable-diffusion-v1-5")
15
  HF_API_URL = f"https://api-inference.huggingface.co/models/{MODEL}"
16
 
17
+ # --- API call function ---
18
+ def generate_image(prompt, token, width, height, guidance_scale, steps):
19
+ if not token:
20
+ return None, "❌ Please provide a Hugging Face API token."
21
+
22
+ headers = {"Authorization": f"Bearer {token}"}
23
  payload = {
24
  "inputs": prompt,
 
25
  "parameters": {
26
  "width": int(width),
27
  "height": int(height),
28
  "guidance_scale": float(guidance_scale),
29
+ "num_inference_steps": int(steps)
30
+ },
31
+ "options": {"wait_for_model": True}
32
  }
 
 
33
 
34
+ try:
35
+ response = requests.post(HF_API_URL, headers=headers, json=payload, timeout=60)
36
+ response.raise_for_status()
37
 
38
+ image = Image.open(io.BytesIO(response.content))
39
+ return image, " Image generated successfully!"
40
+ except Exception as e:
41
+ return None, f"⚠️ Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # --- Gradio UI ---
44
+ css = """
45
+ body { background-color: #fff7e6; }
46
+ h1 { color: #a0522d; font-weight: bold; }
47
+ """
 
 
 
 
 
 
48
 
49
+ with gr.Blocks(css=css, title="Stable Diffusion Text-to-Image") as demo:
50
+ gr.HTML("<h1>Stable Diffusion — designed by Mehak Mazhar</h1>")
 
 
 
 
 
 
 
 
 
51
 
52
  with gr.Row():
53
+ with gr.Column():
54
+ prompt = gr.Textbox(label="Prompt", placeholder="A futuristic city at night", lines=3)
55
+ hf_token = gr.Textbox(label="Hugging Face API Token", placeholder="Enter your HF token", type="password")
56
+ width = gr.Dropdown([256, 384, 512, 768, 1024], value=512, label="Width")
57
+ height = gr.Dropdown([256, 384, 512, 768, 1024], value=512, label="Height")
58
+ guidance = gr.Slider(1.0, 15.0, value=7.5, step=0.1, label="Guidance Scale")
59
+ steps = gr.Slider(10, 100, value=30, step=1, label="Steps")
60
+ generate_btn = gr.Button("Generate Image", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ with gr.Column():
63
+ output_image = gr.Image(label="Generated Image")
64
+ status = gr.Textbox(label="Status", interactive=False)
 
 
 
 
 
 
 
 
 
 
65
 
66
+ generate_btn.click(
67
+ fn=generate_image,
68
+ inputs=[prompt, hf_token, width, height, guidance, steps],
69
+ outputs=[output_image, status]
70
+ )
71
 
72
  if __name__ == "__main__":
73
  demo.launch(server_name="0.0.0.0", share=False)