Kabilash10 commited on
Commit
dee565e
·
verified ·
1 Parent(s): cf061d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -65
app.py CHANGED
@@ -1,83 +1,157 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
- import os # Import os to access secrets
5
- from diffusers import DiffusionPipeline
6
  import torch
 
 
 
7
 
8
- # Set device
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
-
11
- # Use Hugging Face secret from your space environment
12
- access_token = os.getenv('HUGGINGFACE_TOKEN')
13
-
14
- # Replace with the correct model repo ID
15
- model_repo_id = "KingNish/Realtime-FLUX"
16
-
17
- # Set the torch dtype based on availability of CUDA
18
- if torch.cuda.is_available():
19
- torch_dtype = torch.float16
20
- else:
21
- torch_dtype = torch.float32
22
-
23
- # Load the pipeline with the access token
24
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype, use_auth_token=access_token).to(device)
25
-
26
  MAX_SEED = np.iinfo(np.int32).max
27
- MAX_IMAGE_SIZE = 1024
 
 
 
28
 
29
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
30
  if randomize_seed:
31
  seed = random.randint(0, MAX_SEED)
32
-
33
- generator = torch.Generator().manual_seed(seed)
34
-
35
- image = pipe(
36
- prompt=prompt,
37
- negative_prompt=negative_prompt,
38
- guidance_scale=guidance_scale,
39
- num_inference_steps=num_inference_steps,
40
- width=width,
41
  height=height,
 
42
  generator=generator
43
- ).images[0]
44
-
45
- return image, seed
46
 
 
47
  examples = [
48
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
49
- "An astronaut riding a green horse",
50
- "A delicious ceviche cheesecake slice",
 
 
 
 
51
  ]
52
 
53
- css = """
54
- #col-container {
55
- margin: 0 auto;
56
- max-width: 640px;
57
- }
58
- """
59
 
60
- with gr.Blocks(css=css) as demo:
61
- with gr.Column(elem_id="col-container"):
62
- gr.Markdown("# Text-to-Image Gradio Template")
63
  with gr.Row():
64
- prompt = gr.Text(label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False)
65
- run_button = gr.Button("Run", scale=0)
66
- result = gr.Image(label="Result", show_label=False)
67
-
68
- with gr.Accordion("Advanced Settings", open=False):
69
- negative_prompt = gr.Text(label="Negative prompt", max_lines=1, placeholder="Enter a negative prompt", visible=False)
70
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
71
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
72
- with gr.Row():
73
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
74
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
75
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=7.5)
76
- num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=25)
77
-
78
- gr.Examples(examples=examples, inputs=[prompt])
79
-
80
- gr.on(run_button.click, infer, inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps], outputs=[result, seed])
81
-
82
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ import spaces
 
5
  import torch
6
+ import time
7
+ from diffusers import DiffusionPipeline, AutoencoderTiny
8
+ from custom_pipeline import FLUXPipelineWithIntermediateOutputs
9
 
10
+ # Constants
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
+ MAX_IMAGE_SIZE = 2048
13
+ DEFAULT_WIDTH = 1024
14
+ DEFAULT_HEIGHT = 1024
15
+ DEFAULT_INFERENCE_STEPS = 1
16
 
17
+ # Device and model setup for CPU
18
+ dtype = torch.float32 # Change to float32 since it's more suitable for CPU
19
+ pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
20
+ "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
21
+ ).to("cpu") # Running on CPU
22
+ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float32).to("cpu")
23
+ torch.cuda.empty_cache() # No need for this line in CPU setup
24
+
25
+ # Inference function
26
+ @spaces.CPU(duration=25) # Adjusted to CPU decorator
27
+ def generate_image(prompt, seed=24, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
28
  if randomize_seed:
29
  seed = random.randint(0, MAX_SEED)
30
+ generator = torch.Generator().manual_seed(int(float(seed)))
31
+
32
+ start_time = time.time()
33
+
34
+ # Only generate the last image in the sequence
35
+ img = pipe.generate_images(
36
+ prompt=prompt,
37
+ width=width,
 
38
  height=height,
39
+ num_inference_steps=num_inference_steps,
40
  generator=generator
41
+ )
42
+ latency = f"Latency: {(time.time()-start_time):.2f} seconds"
43
+ return img, seed, latency
44
 
45
+ # Example prompts
46
  examples = [
47
+ "a tiny astronaut hatching from an egg on the moon",
48
+ "a cute white cat holding a sign that says hello world",
49
+ "an anime illustration of Steve Jobs",
50
+ "Create image of Modern house in minecraft style",
51
+ "photo of a woman on the beach, shot from above. She is facing the sea, while wearing a white dress. She has long blonde hair",
52
+ "Selfie photo of a wizard with long beard and purple robes, he is apparently in the middle of Tokyo. Probably taken from a phone.",
53
+ "Photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.",
54
  ]
55
 
56
+ # --- Gradio UI ---
57
+ with gr.Blocks() as demo:
58
+ with gr.Column(elem_id="app-container"):
59
+ gr.Markdown("# FLUX Image Generator Demo")
60
+ gr.Markdown("Generate stunning images in real-time with Modified Flux.Schnell pipeline.")
61
+ #gr.Markdown("<span style='color: red;'>Note: Sometimes it stucks or stops generating images (I don't know why). In that situation just refresh the site.</span>")
62
 
 
 
 
63
  with gr.Row():
64
+ with gr.Column(scale=2.5):
65
+ result = gr.Image(label="Generated Image", show_label=False, interactive=False)
66
+ with gr.Column(scale=1):
67
+ prompt = gr.Text(
68
+ label="Prompt",
69
+ placeholder="Describe the image you want to generate...",
70
+ lines=3,
71
+ show_label=False,
72
+ container=False,
73
+ )
74
+ generateBtn = gr.Button("🖼️ Generate Image")
75
+ enhanceBtn = gr.Button("🚀 Enhance Image")
76
+
77
+ with gr.Column("Advanced Options"):
78
+ with gr.Row():
79
+ realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more CPU resources but creates the image in real-time.", value=False)
80
+ latency = gr.Text(label="Latency")
81
+ with gr.Row():
82
+ seed = gr.Number(label="Seed", value=42)
83
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
84
+ with gr.Row():
85
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
86
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
87
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
88
+
89
+ with gr.Row():
90
+ gr.Markdown("### 🌟 Inspiration Gallery")
91
+ with gr.Row():
92
+ gr.Examples(
93
+ examples=examples,
94
+ fn=generate_image,
95
+ inputs=[prompt],
96
+ outputs=[result, seed, latency],
97
+ cache_examples="lazy"
98
+ )
99
+
100
+ enhanceBtn.click(
101
+ fn=generate_image,
102
+ inputs=[prompt, seed, width, height],
103
+ outputs=[result, seed, latency],
104
+ show_progress="full",
105
+ queue=False,
106
+ concurrency_limit=None
107
+ )
108
+
109
+ generateBtn.click(
110
+ fn=generate_image,
111
+ inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
112
+ outputs=[result, seed, latency],
113
+ show_progress="full",
114
+ api_name="RealtimeFlux",
115
+ queue=False
116
+ )
117
+
118
+ def update_ui(realtime_enabled):
119
+ return {
120
+ prompt: gr.update(interactive=True),
121
+ generateBtn: gr.update(visible=not realtime_enabled)
122
+ }
123
+
124
+ realtime.change(
125
+ fn=update_ui,
126
+ inputs=[realtime],
127
+ outputs=[prompt, generateBtn],
128
+ queue=False,
129
+ concurrency_limit=None
130
+ )
131
+
132
+ def realtime_generation(*args):
133
+ if args[0]: # If realtime is enabled
134
+ return next(generate_image(*args[1:]))
135
+
136
+ prompt.submit(
137
+ fn=generate_image,
138
+ inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
139
+ outputs=[result, seed, latency],
140
+ show_progress="full",
141
+ queue=False,
142
+ concurrency_limit=None
143
+ )
144
+
145
+ for component in [prompt, width, height, num_inference_steps]:
146
+ component.input(
147
+ fn=realtime_generation,
148
+ inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
149
+ outputs=[result, seed, latency],
150
+ show_progress="hidden",
151
+ trigger_mode="always_last",
152
+ queue=False,
153
+ concurrency_limit=None
154
+ )
155
 
156
+ # Launch the app
157
+ demo.launch()