Sugam7 commited on
Commit
7fc7d4e
·
verified ·
1 Parent(s): 32806b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -29
app.py CHANGED
@@ -3,8 +3,10 @@ import numpy as np
3
  import random
4
  import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
7
- from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
 
 
8
 
9
  # Initialize model and settings
10
  dtype = torch.bfloat16
@@ -15,36 +17,85 @@ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_d
15
  MAX_SEED = np.iinfo(np.int32).max
16
  MAX_IMAGE_SIZE = 2048
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  @spaces.GPU(duration=190)
19
- def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, output_format="png", progress=gr.Progress(track_tqdm=True)):
 
 
 
20
  if randomize_seed:
21
  seed = random.randint(0, MAX_SEED)
22
- generator = torch.Generator(device=device).manual_seed(seed)
23
- image = pipe(
24
- prompt=prompt,
25
- width=width,
26
- height=height,
27
- num_inference_steps=num_inference_steps,
28
- generator=generator,
29
- guidance_scale=guidance_scale
30
- ).images[0]
31
-
32
- # Convert image to desired format
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  if output_format.lower() != "png":
34
- image = image.convert(output_format.upper())
35
 
36
- return image, seed
37
 
 
38
  examples = [
39
- "a tiny astronaut hatching from an egg on the moon",
40
- "a cat holding a sign that says hello world",
41
- "an anime illustration of a wiener schnitzel",
42
  ]
43
 
 
44
  css = """
45
  #col-container {
46
  margin: 0 auto;
47
- max-width: 800px;
48
  padding: 20px;
49
  background-color: #f9f9f9;
50
  border-radius: 10px;
@@ -75,9 +126,9 @@ with gr.Blocks(css=css) as demo:
75
 
76
  # Title and Description
77
  gr.Markdown(f"""
78
- <h1 id="title">FLUX.1 [dev] - Advanced Text-to-Image Generator</h1>
79
  <p style="text-align:center; color:#555;">
80
- Experience the power of a 12B param rectified flow transformer. Customize your prompts and settings to generate unique images every time.
81
  </p>
82
  """)
83
 
@@ -90,12 +141,21 @@ with gr.Blocks(css=css) as demo:
90
  container=False,
91
  interactive=True
92
  )
93
-
 
 
 
 
 
 
 
 
 
94
  run_button = gr.Button("Generate", elem_id="generate-button")
95
 
96
- # Output image and settings
97
  with gr.Row(elem_id="output-container"):
98
- result = gr.Image(label="Generated Image", show_label=False).style(height=400)
99
  output_format = gr.Radio(
100
  label="Output Format",
101
  choices=["png", "jpeg", "bmp"],
@@ -151,21 +211,64 @@ with gr.Blocks(css=css) as demo:
151
  interactive=True
152
  )
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  # Interactive Examples
155
  gr.Examples(
156
  examples=examples,
157
  fn=infer,
158
- inputs=[prompt],
159
- outputs=[result, seed],
160
  cache_examples="lazy",
161
  label="Try these examples:"
162
  )
163
 
 
 
 
 
 
 
 
 
 
164
  # Link button to trigger inference
165
  run_button.click(
166
  fn=infer,
167
- inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, output_format],
168
- outputs=[result, seed]
169
  )
170
 
171
  demo.launch()
 
3
  import random
4
  import spaces
5
  import torch
6
+ from diffusers import DiffusionPipeline
7
+ from PIL import Image, ImageEnhance
8
+ import io
9
+ import zipfile
10
 
11
  # Initialize model and settings
12
  dtype = torch.bfloat16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
  MAX_IMAGE_SIZE = 2048
19
 
20
+ # Helper function to apply image filters
21
+ def apply_filters(image, brightness, contrast, saturation):
22
+ enhancer = ImageEnhance.Brightness(image)
23
+ image = enhancer.enhance(brightness)
24
+
25
+ enhancer = ImageEnhance.Contrast(image)
26
+ image = enhancer.enhance(contrast)
27
+
28
+ enhancer = ImageEnhance.Color(image)
29
+ image = enhancer.enhance(saturation)
30
+
31
+ return image
32
+
33
+ # Helper function to create a ZIP file of images
34
+ def create_zip(images):
35
+ zip_buffer = io.BytesIO()
36
+ with zipfile.ZipFile(zip_buffer, "a", zipfile.ZIP_DEFLATED) as zip_file:
37
+ for i, img in enumerate(images):
38
+ img_byte_arr = io.BytesIO()
39
+ img.save(img_byte_arr, format="PNG")
40
+ zip_file.writestr(f"image_{i+1}.png", img_byte_arr.getvalue())
41
+ return zip_buffer.getvalue()
42
+
43
  @spaces.GPU(duration=190)
44
+ def infer(prompt, num_outputs=1, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, output_format="png", brightness=1.0, contrast=1.0, saturation=1.0, style="None", progress=gr.Progress(track_tqdm=True)):
45
+ images = []
46
+ seeds = []
47
+
48
  if randomize_seed:
49
  seed = random.randint(0, MAX_SEED)
50
+
51
+ for _ in range(num_outputs):
52
+ generator = torch.Generator(device=device).manual_seed(seed)
53
+ image = pipe(
54
+ prompt=prompt,
55
+ width=width,
56
+ height=height,
57
+ num_inference_steps=num_inference_steps,
58
+ generator=generator,
59
+ guidance_scale=guidance_scale
60
+ ).images[0]
61
+
62
+ # Apply filters
63
+ image = apply_filters(image, brightness, contrast, saturation)
64
+
65
+ # Apply preset style if selected
66
+ if style != "None":
67
+ # Example of applying a filter based on a selected style
68
+ # You would expand this to include actual styles, e.g., cartoon, watercolor, etc.
69
+ if style == "Black & White":
70
+ image = image.convert("L").convert("RGB")
71
+ elif style == "Sepia":
72
+ sepia_filter = np.array(image)
73
+ sepia_filter = np.dot(sepia_filter[...,:3], [0.393, 0.769, 0.189])
74
+ sepia_filter = np.clip(sepia_filter, 0, 255)
75
+ image = Image.fromarray(sepia_filter.astype('uint8'))
76
+
77
+ images.append(image)
78
+ seeds.append(seed)
79
+ seed += 1 # Increment seed for the next image
80
+
81
+ # Optionally convert image format
82
  if output_format.lower() != "png":
83
+ images = [img.convert(output_format.upper()) for img in images]
84
 
85
+ return images, seeds, create_zip(images)
86
 
87
+ # Example prompts for users to try
88
  examples = [
89
+ ["a tiny astronaut hatching from an egg on the moon", 1],
90
+ ["a cat holding a sign that says hello world", 2],
91
+ ["an anime illustration of a wiener schnitzel", 3],
92
  ]
93
 
94
+ # CSS styling for modern look
95
  css = """
96
  #col-container {
97
  margin: 0 auto;
98
+ max-width: 1000px;
99
  padding: 20px;
100
  background-color: #f9f9f9;
101
  border-radius: 10px;
 
126
 
127
  # Title and Description
128
  gr.Markdown(f"""
129
+ <h1 id="title">FLUX.1 [dev] - Feature-Rich Text-to-Image Generator</h1>
130
  <p style="text-align:center; color:#555;">
131
+ Unleash your creativity with this advanced text-to-image generator. Customize prompts, generate multiple images, apply filters, and more!
132
  </p>
133
  """)
134
 
 
141
  container=False,
142
  interactive=True
143
  )
144
+
145
+ num_outputs = gr.Slider(
146
+ label="Number of Variations",
147
+ minimum=1,
148
+ maximum=10,
149
+ step=1,
150
+ value=1,
151
+ interactive=True
152
+ )
153
+
154
  run_button = gr.Button("Generate", elem_id="generate-button")
155
 
156
+ # Output image gallery and settings
157
  with gr.Row(elem_id="output-container"):
158
+ gallery = gr.Gallery(label="Generated Images", show_label=False).style(grid=[4], height="auto")
159
  output_format = gr.Radio(
160
  label="Output Format",
161
  choices=["png", "jpeg", "bmp"],
 
211
  interactive=True
212
  )
213
 
214
+ # Image filter sliders
215
+ brightness = gr.Slider(
216
+ label="Brightness",
217
+ minimum=0.5,
218
+ maximum=2.0,
219
+ step=0.1,
220
+ value=1.0,
221
+ interactive=True
222
+ )
223
+ contrast = gr.Slider(
224
+ label="Contrast",
225
+ minimum=0.5,
226
+ maximum=2.0,
227
+ step=0.1,
228
+ value=1.0,
229
+ interactive=True
230
+ )
231
+ saturation = gr.Slider(
232
+ label="Saturation",
233
+ minimum=0.5,
234
+ maximum=2.0,
235
+ step=0.1,
236
+ value=1.0,
237
+ interactive=True
238
+ )
239
+
240
+ # Preset styles
241
+ style = gr.Dropdown(
242
+ label="Preset Styles",
243
+ choices=["None", "Black & White", "Sepia", "Vivid Colors"],
244
+ value="None",
245
+ interactive=True
246
+ )
247
+
248
  # Interactive Examples
249
  gr.Examples(
250
  examples=examples,
251
  fn=infer,
252
+ inputs=[prompt, num_outputs],
253
+ outputs=[gallery, seed],
254
  cache_examples="lazy",
255
  label="Try these examples:"
256
  )
257
 
258
+ # Download all images as a ZIP file
259
+ download_button = gr.Button("Download All Images")
260
+ zip_file = gr.File(label="Download", show_label=False)
261
+ download_button.click(
262
+ fn=create_zip,
263
+ inputs=[gallery],
264
+ outputs=[zip_file]
265
+ )
266
+
267
  # Link button to trigger inference
268
  run_button.click(
269
  fn=infer,
270
+ inputs=[prompt, num_outputs, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, output_format, brightness, contrast, saturation, style],
271
+ outputs=[gallery, seed, zip_file]
272
  )
273
 
274
  demo.launch()