ford442 commited on
Commit
d1930b9
·
verified ·
1 Parent(s): 85678a9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +725 -0
app.py ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
3
+ # of this software and associated documentation files (the "Software"), to deal
4
+ # in the Software without restriction, including without limitation the rights
5
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
6
+ # copies of the Software, and to permit persons to whom the Software is
7
+ #import spaces
8
+ #import subprocess
9
+ import os
10
+ #subprocess.run(['sh', './torch.sh'])
11
+
12
+ os.putenv('PYTORCH_NVML_BASED_CUDA_CHECK','1')
13
+ os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
14
+ alloc_conf_parts = [
15
+ 'expandable_segments:True',
16
+ 'pinned_use_background_threads:True' # Specific to pinned memory.
17
+ ]
18
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = ','.join(alloc_conf_parts)
19
+ os.environ["SAFETENSORS_FAST_GPU"] = "1"
20
+ os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
21
+
22
+ import gradio as gr
23
+ import numpy as np
24
+ from PIL import Image
25
+
26
+ DESCRIPTIONXX = """
27
+ ## ⚡⚡⚡⚡ REALVISXL V5.0 BF16 (Tester C) ⚡⚡⚡⚡
28
+ """
29
+
30
+ examples = [
31
+ "Many apples splashed with drops of water within a fancy bowl 4k, hdr --v 6.0 --style raw",
32
+ "A profile photo of a dog, brown background, shot on Leica M6 --ar 128:85 --v 6.0 --style raw",
33
+ ]
34
+
35
+ MODEL_OPTIONS = {
36
+ "REALVISXL V5.0 BF16": "ford442/RealVisXL_V5.0_BF16",
37
+ }
38
+
39
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
40
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
41
+
42
+ style_list = [
43
+ {
44
+ "name": "3840 x 2160",
45
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
46
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
47
+ },
48
+ {
49
+ "name": "2560 x 1440",
50
+ "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
51
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
52
+ },
53
+ {
54
+ "name": "HD+",
55
+ "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
56
+ "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
57
+ },
58
+ {
59
+ "name": "Style Zero",
60
+ "prompt": "{prompt}",
61
+ "negative_prompt": "",
62
+ },
63
+ ]
64
+
65
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
66
+ DEFAULT_STYLE_NAME = "Style Zero"
67
+ STYLE_NAMES = list(styles.keys())
68
+
69
+ MAX_SEED = np.iinfo(np.int32).max
70
+
71
+ import os
72
+
73
+ import torch
74
+ import paramiko
75
+ import socket
76
+ import threading # NEW IMPORT
77
+ import queue # NEW IMPORT
78
+
79
+ torch.backends.cuda.matmul.allow_tf32 = False
80
+ torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
81
+ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
82
+ torch.backends.cudnn.allow_tf32 = False
83
+ torch.backends.cudnn.deterministic = False
84
+ torch.backends.cudnn.benchmark = False
85
+ torch.backends.cuda.preferred_blas_library="cublas"
86
+ torch.backends.cuda.preferred_linalg_library="cusolver"
87
+ torch.set_float32_matmul_precision("highest")
88
+
89
+ HF_TOKEN = os.getenv("HF_TOKEN")
90
+ FTP_HOST = os.getenv("FTP_HOST")
91
+ FTP_USER = os.getenv("FTP_USER")
92
+ FTP_PASS = os.getenv("FTP_PASS")
93
+ FTP_DIR = os.getenv("FTP_DIR")
94
+ FTP_HOST_FALLBACK = os.getenv("FTP_HOST_FALLBACK")
95
+ FTP_DIR_FALLBACK = os.getenv("FTP_DIR_FALLBACK")
96
+
97
+ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
98
+ # adjust the batch_size of prompt_embeds according to guidance_scale
99
+ if step_index == int(pipeline.num_timesteps * 0.1):
100
+ print("-- swapping torch modes --")
101
+ # pipeline.scheduler = euler_scheduler
102
+ torch.set_float32_matmul_precision("high")
103
+ # pipe.vae = vae_b
104
+ torch.backends.cudnn.allow_tf32 = True
105
+ torch.backends.cuda.matmul.allow_tf32 = True
106
+ torch.backends.cudnn.deterministic = True
107
+ torch.backends.cuda.preferred_blas_library="cublaslt"
108
+ #if step_index == int(pipeline.num_timesteps * 0.5):
109
+ # torch.set_float32_matmul_precision("medium")
110
+ #callback_kwargs["latents"] = callback_kwargs["latents"].to(torch.float64)
111
+ #pipe.unet.to(torch.float64)
112
+ # pipe.guidance_scale=1.0
113
+ # pipe.scheduler.set_timesteps(num_inference_steps*.70)
114
+ # print(f"-- setting step {pipeline.num_timesteps * 0.1} --")
115
+ # pipeline.scheduler._step_index = pipeline.num_timesteps * 0.1
116
+ if step_index == int(pipeline.num_timesteps * 0.9):
117
+ torch.backends.cuda.preferred_blas_library="cublas"
118
+ torch.backends.cudnn.allow_tf32 = False
119
+ torch.backends.cuda.matmul.allow_tf32 = False
120
+ torch.set_float32_matmul_precision("highest")
121
+ #callback_kwargs["latents"] = callback_kwargs["latents"].to(torch.bfloat16)
122
+ #pipe.unet.to(torch.float64)
123
+ #pipeline.unet.set_default_attn_processor() ## custom ##
124
+ # pipe.vae = vae_a
125
+ # pipe.unet = unet_a
126
+ torch.backends.cudnn.deterministic = False
127
+ print("-- swapping torch modes --")
128
+ # pipeline.scheduler = heun_scheduler
129
+ #pipe.scheduler.set_timesteps(num_inference_steps*.70)
130
+ # print(f"-- setting step {pipeline.num_timesteps * 0.9} --")
131
+ # pipeline.scheduler._step_index = pipeline.num_timesteps * 0.9
132
+ return callback_kwargs
133
+
134
+
135
+ # --- WORKER FUNCTION FOR THREADING ---
136
+ # This function contains the logic to connect to a single host.
137
+ # It will be executed by each of our threads.
138
+ def connect_worker(host, result_queue):
139
+ """Tries to connect to a single host and puts the successful transport object into the queue."""
140
+ transport = None
141
+ try:
142
+ transport = paramiko.Transport((host, 22))
143
+ # We still use the 5-second timeout for the handshake
144
+ transport.start_client(timeout=5)
145
+ transport.auth_password(username=FTP_USER, password=FTP_PASS)
146
+
147
+ # If we reach here, the connection was successful.
148
+ # Put the result in the queue for the main thread to use.
149
+ print(f"✅ Connection to {host} succeeded first.")
150
+ result_queue.put(transport)
151
+
152
+ except (paramiko.SSHException, socket.timeout, EOFError) as e:
153
+ # This is an expected failure, just print a note.
154
+ print(f"ℹ️ Connection to {host} failed or was too slow: {e}")
155
+ if transport:
156
+ transport.close()
157
+ except Exception as e:
158
+ # Handle any other unexpected errors.
159
+ print(f"❌ Unexpected error connecting to {host}: {e}")
160
+ if transport:
161
+ transport.close()
162
+
163
+ def upload_to_ftp(filename):
164
+ """
165
+ Attempts to connect to two FTP hosts simultaneously and uses the first one that responds.
166
+ It now uses a corresponding directory for the primary and fallback hosts.
167
+ """
168
+ hosts = [FTP_HOST]
169
+ if FTP_HOST_FALLBACK:
170
+ hosts.append(FTP_HOST_FALLBACK)
171
+
172
+ result_queue = queue.Queue()
173
+ threads = []
174
+
175
+ print(f"--> Racing connections to {hosts} for uploading {filename}...")
176
+
177
+ for host in hosts:
178
+ thread = threading.Thread(target=connect_worker, args=(host, result_queue))
179
+ thread.daemon = True
180
+ thread.start()
181
+ threads.append(thread)
182
+
183
+ try:
184
+ winning_transport = result_queue.get(timeout=7)
185
+
186
+ # --- THIS IS THE NEW LOGIC ---
187
+ # 1. Determine which host won the race.
188
+ winning_host = winning_transport.getpeername()[0]
189
+
190
+ # 2. Select the correct destination directory based on the winning host.
191
+ # If the fallback directory isn't specified, it safely defaults to the primary directory.
192
+ if winning_host == FTP_HOST:
193
+ destination_directory = FTP_DIR
194
+ else:
195
+ destination_directory = FTP_DIR_FALLBACK if FTP_DIR_FALLBACK else FTP_DIR
196
+
197
+ print(f"--> Proceeding with upload to {winning_host} in directory {destination_directory}...")
198
+
199
+ # 3. Construct the full destination path using the selected directory.
200
+ sftp = paramiko.SFTPClient.from_transport(winning_transport)
201
+ destination_path = os.path.join(destination_directory, os.path.basename(filename))
202
+ sftp.put(filename, destination_path)
203
+
204
+ print(f"✅ Successfully uploaded {filename}.")
205
+
206
+ sftp.close()
207
+ winning_transport.close()
208
+
209
+ except queue.Empty:
210
+ print("❌ Critical Error: Neither FTP host responded in time.")
211
+ except Exception as e:
212
+ print(f"❌ An unexpected error occurred during SFTP operation: {e}")
213
+
214
+
215
+ def upload_to_ftp_old(filename):
216
+ try:
217
+ transport = paramiko.Transport((FTP_HOST, 22))
218
+ destination_path=FTP_DIR+filename
219
+ transport.connect(username = FTP_USER, password = FTP_PASS)
220
+ sftp = paramiko.SFTPClient.from_transport(transport)
221
+ sftp.put(filename, destination_path)
222
+ sftp.close()
223
+ transport.close()
224
+ print(f"Uploaded {filename} to FTP server")
225
+ except Exception as e:
226
+ print(f"FTP upload error: {e}")
227
+
228
+ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
229
+ filename= f'rv_C_{timestamp}.txt'
230
+ with open(filename, "w") as f:
231
+ f.write(f"Realvis 5.0 (Tester C) \n")
232
+ f.write(f"Date/time: {timestamp} \n")
233
+ f.write(f"Prompt: {prompt} \n")
234
+ f.write(f"Steps: {num_inference_steps} \n")
235
+ f.write(f"Guidance Scale: {guidance_scale} \n")
236
+ f.write(f"SPACE SETUP: \n")
237
+ f.write(f"Model VAE: sdxl-vae-bf16\n")
238
+ f.write(f"To cuda and bfloat \n")
239
+ return filename
240
+
241
+ import spaces
242
+ import torch.nn.functional as F
243
+ from sageattention import sageattn
244
+ import random
245
+ import uuid
246
+ import gradio as gr
247
+ import numpy as np
248
+ from PIL import Image
249
+
250
+ #from accelerate import Accelerator
251
+
252
+ #import diffusers
253
+ from diffusers import AutoencoderKL, StableDiffusionXLPipeline
254
+ from diffusers import EulerAncestralDiscreteScheduler
255
+ #from typing import Tuple
256
+ #import paramiko
257
+ import datetime
258
+ #import cyper
259
+ from image_gen_aux import UpscaleWithModel
260
+ #import torch
261
+ import time
262
+ import gc
263
+
264
+ MAX_SEED = np.iinfo(np.int32).max
265
+
266
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
267
+
268
+ #accelerator = Accelerator(mixed_precision="bf16") # Example
269
+
270
+ upscaler = UpscaleWithModel.from_pretrained("Kim2091/ClearRealityV1").to(torch.device("cuda:0"))
271
+
272
+ def load_and_prepare_model():
273
+ #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1 ,use_karras_sigmas=True)
274
+ vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
275
+ #vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=False).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
276
+ #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
277
+ #txt_1 = CLIPTextModel.from_pretrained(device_map??)
278
+ #txt_2 = CLIPTextModel.from_pretrained(vae too?)
279
+ #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
280
+ pipe = StableDiffusionXLPipeline.from_pretrained(
281
+ 'ford442/RealVisXL_V5.0_BF16',
282
+ use_safetensors=True,
283
+ add_watermarker=False,
284
+ token = HF_TOKEN,
285
+ )
286
+ #pipe.scheduler = sched
287
+ #pipe.vae.do_resize = False
288
+ pipe.vae.do_convert_rgb = True
289
+ pipe.vae.set_default_attn_processor()
290
+ print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
291
+ pipe.watermark=None
292
+ pipe.safety_checker=None
293
+ pipe.to(torch.device('cuda:0'), torch.bfloat16)
294
+ pipe.vae = vaeXL #.to(torch.bfloat16)
295
+ #pipe.to(torch.bfloat16)
296
+ #pipe.to(accelerator.device)
297
+ return pipe
298
+
299
+ #hidet.option.parallel_build(False)
300
+ #hidet.option.parallel_tune(2,2.0)
301
+ #torch._dynamo.config.suppress_errors = True
302
+ #torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
303
+
304
+ # more search
305
+ #hidet.torch.dynamo_config.search_space(0)
306
+ #hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
307
+ # hidet.option.cache_dir("local_cache")
308
+ # automatically transform the model to use float16 data type
309
+ #hidet.torch.dynamo_config.use_fp16(True)
310
+ # use float16 data type as the accumulate data type in operators with reduction
311
+ #hidet.torch.dynamo_config.use_fp16_reduction(True)
312
+ # use tensorcore
313
+ #hidet.torch.dynamo_config.use_tensor_core()
314
+ #hidet.torch.dynamo_config.steal_weights(False)
315
+
316
+ # Preload and compile both models
317
+
318
+ pipe = load_and_prepare_model()
319
+ neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
320
+
321
+ @spaces.GPU(duration=40)
322
+ def generate_30(
323
+ prompt: str,
324
+ negative_prompt: str = "",
325
+ use_negative_prompt: bool = False,
326
+ style_selection: str = "",
327
+ width: int = 768,
328
+ height: int = 768,
329
+ guidance_scale: float = 4,
330
+ num_inference_steps: int = 125,
331
+ sage: bool = False,
332
+ use_resolution_binning: bool = True,
333
+ progress=gr.Progress(track_tqdm=True)
334
+ ):
335
+ if sage==True:
336
+ F.scaled_dot_product_attention = sageattn
337
+ if sage==False:
338
+ F.scaled_dot_product_attention = F.scaled_dot_product_attention
339
+ seed = random.randint(0, MAX_SEED)
340
+ random.seed(seed)
341
+ np.random.seed(seed)
342
+ torch.manual_seed(seed)
343
+ torch.cuda.manual_seed_all(seed)
344
+ generator = torch.Generator(device='cpu').manual_seed(seed)
345
+ options = {
346
+ "prompt": [prompt],
347
+ "negative_prompt": [negative_prompt],
348
+ "negative_prompt_2": [neg_prompt_2],
349
+ "width": width,
350
+ "height": height,
351
+ "guidance_scale": guidance_scale,
352
+ "num_inference_steps": num_inference_steps,
353
+ "generator": generator,
354
+ "output_type": "pil",
355
+ # "callback_on_step_end": scheduler_swap_callback,
356
+ }
357
+ if use_resolution_binning:
358
+ options["use_resolution_binning"] = True
359
+ images = []
360
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
361
+ filename = uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
362
+ upload_to_ftp(filename)
363
+ batch_options = options.copy()
364
+ rv_image = pipe(**batch_options).images[0]
365
+ sd_image_path = f"rv_C_{timestamp}.png"
366
+ rv_image.save(sd_image_path,optimize=False,compress_level=0)
367
+ upload_to_ftp(sd_image_path)
368
+ torch.set_float32_matmul_precision("medium")
369
+ with torch.no_grad():
370
+ upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
371
+ downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
372
+ downscale_path = f"rv50_upscale_{timestamp}.png"
373
+ downscale1.save(downscale_path,optimize=False,compress_level=0)
374
+ upload_to_ftp(downscale_path)
375
+ unique_name = str(uuid.uuid4()) + ".png"
376
+ os.symlink(sd_image_path, unique_name)
377
+ return [unique_name]
378
+
379
+ @spaces.GPU(duration=70)
380
+ def generate_60(
381
+ prompt: str,
382
+ negative_prompt: str = "",
383
+ use_negative_prompt: bool = False,
384
+ style_selection: str = "",
385
+ width: int = 768,
386
+ height: int = 768,
387
+ guidance_scale: float = 4,
388
+ num_inference_steps: int = 125,
389
+ sage: bool = False,
390
+ use_resolution_binning: bool = True,
391
+ progress=gr.Progress(track_tqdm=True)
392
+ ):
393
+ if sage==True:
394
+ F.scaled_dot_product_attention = sageattn
395
+ if sage==False:
396
+ F.scaled_dot_product_attention = F.scaled_dot_product_attention
397
+ seed = random.randint(0, MAX_SEED)
398
+ random.seed(seed)
399
+ np.random.seed(seed)
400
+ torch.manual_seed(seed)
401
+ torch.cuda.manual_seed_all(seed)
402
+ generator = torch.Generator(device='cpu').manual_seed(seed)
403
+ options = {
404
+ "prompt": [prompt],
405
+ "negative_prompt": [negative_prompt],
406
+ "negative_prompt_2": [neg_prompt_2],
407
+ "width": width,
408
+ "height": height,
409
+ "guidance_scale": guidance_scale,
410
+ "num_inference_steps": num_inference_steps,
411
+ "generator": generator,
412
+ "output_type": "pil",
413
+ # "callback_on_step_end": scheduler_swap_callback,
414
+ }
415
+ if use_resolution_binning:
416
+ options["use_resolution_binning"] = True
417
+ images = []
418
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
419
+ filename = uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
420
+ upload_to_ftp(filename)
421
+ batch_options = options.copy()
422
+ rv_image = pipe(**batch_options).images[0]
423
+ sd_image_path = f"rv_C_{timestamp}.png"
424
+ rv_image.save(sd_image_path,optimize=False,compress_level=0)
425
+ upload_to_ftp(sd_image_path)
426
+ torch.set_float32_matmul_precision("medium")
427
+ with torch.no_grad():
428
+ upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
429
+ downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
430
+ downscale_path = f"rv50_upscale_{timestamp}.png"
431
+ downscale1.save(downscale_path,optimize=False,compress_level=0)
432
+ upload_to_ftp(downscale_path)
433
+ unique_name = str(uuid.uuid4()) + ".png"
434
+ os.symlink(sd_image_path, unique_name)
435
+ return [unique_name]
436
+
437
+ @spaces.GPU(duration=100)
438
+ def generate_90(
439
+ prompt: str,
440
+ negative_prompt: str = "",
441
+ use_negative_prompt: bool = False,
442
+ style_selection: str = "",
443
+ width: int = 768,
444
+ height: int = 768,
445
+ guidance_scale: float = 4,
446
+ num_inference_steps: int = 125,
447
+ sage: bool = False,
448
+ use_resolution_binning: bool = True,
449
+ progress=gr.Progress(track_tqdm=True)
450
+ ):
451
+ if sage==True:
452
+ F.scaled_dot_product_attention = sageattn
453
+ if sage==False:
454
+ F.scaled_dot_product_attention = F.scaled_dot_product_attention
455
+ seed = random.randint(0, MAX_SEED)
456
+ random.seed(seed)
457
+ np.random.seed(seed)
458
+ torch.manual_seed(seed)
459
+ torch.cuda.manual_seed_all(seed)
460
+ generator = torch.Generator(device='cpu').manual_seed(seed)
461
+ options = {
462
+ "prompt": [prompt],
463
+ "negative_prompt": [negative_prompt],
464
+ "negative_prompt_2": [neg_prompt_2],
465
+ "width": width,
466
+ "height": height,
467
+ "guidance_scale": guidance_scale,
468
+ "num_inference_steps": num_inference_steps,
469
+ "generator": generator,
470
+ "output_type": "pil",
471
+ # "callback_on_step_end": scheduler_swap_callback,
472
+ }
473
+ if use_resolution_binning:
474
+ options["use_resolution_binning"] = True
475
+ images = []
476
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
477
+ filename = uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
478
+ upload_to_ftp(filename)
479
+ batch_options = options.copy()
480
+ rv_image = pipe(**batch_options).images[0]
481
+ sd_image_path = f"rv_C_{timestamp}.png"
482
+ rv_image.save(sd_image_path,optimize=False,compress_level=0)
483
+ upload_to_ftp(sd_image_path)
484
+ torch.set_float32_matmul_precision("medium")
485
+ with torch.no_grad():
486
+ upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
487
+ downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
488
+ downscale_path = f"rv50_upscale_{timestamp}.png"
489
+ downscale1.save(downscale_path,optimize=False,compress_level=0)
490
+ upload_to_ftp(downscale_path)
491
+ unique_name = str(uuid.uuid4()) + ".png"
492
+ os.symlink(sd_image_path, unique_name)
493
+ return [unique_name]
494
+
495
+ infer_types=True, cdivision=True, language_level=3))
496
+
497
+ def load_predefined_images1():
498
+ predefined_images1 = [
499
+ "assets/7.png",
500
+ "assets/8.png",
501
+ "assets/9.png",
502
+ "assets/1.png",
503
+ "assets/2.png",
504
+ "assets/3.png",
505
+ "assets/4.png",
506
+ "assets/5.png",
507
+ "assets/6.png",
508
+ ]
509
+ return predefined_images1
510
+
511
+ css = '''
512
+ #col-container {
513
+ margin: 0 auto;
514
+ max-width: 640px;
515
+ }
516
+ h1{text-align:center}
517
+ footer {
518
+ visibility: hidden
519
+ }
520
+ body {
521
+ background-color: green;
522
+ }
523
+ '''
524
+
525
+ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
526
+ gr.Markdown(DESCRIPTIONXX)
527
+ with gr.Row():
528
+ prompt = gr.Text(
529
+ label="Prompt",
530
+ show_label=False,
531
+ max_lines=1,
532
+ placeholder="Enter your prompt",
533
+ container=False,
534
+ )
535
+ run_button_30 = gr.Button("Run 30 Seconds", scale=0)
536
+ run_button_60 = gr.Button("Run 60 Seconds", scale=0)
537
+ run_button_90 = gr.Button("Run 90 Seconds", scale=0)
538
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
539
+
540
+ with gr.Row():
541
+
542
+ style_selection = gr.Radio(
543
+ show_label=True,
544
+ container=True,
545
+ interactive=True,
546
+ choices=STYLE_NAMES,
547
+ value=DEFAULT_STYLE_NAME,
548
+ label="Quality Style",
549
+ )
550
+ with gr.Row():
551
+ with gr.Column(scale=1):
552
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
553
+ negative_prompt = gr.Text(
554
+ label="Negative prompt",
555
+ max_lines=5,
556
+ lines=4,
557
+ placeholder="Enter a negative prompt",
558
+ value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
559
+ visible=True,
560
+ )
561
+ with gr.Row():
562
+ width = gr.Slider(
563
+ label="Width",
564
+ minimum=448,
565
+ maximum=MAX_IMAGE_SIZE,
566
+ step=64,
567
+ value=768,
568
+ )
569
+ height = gr.Slider(
570
+ label="Height",
571
+ minimum=448,
572
+ maximum=MAX_IMAGE_SIZE,
573
+ step=64,
574
+ value=768,
575
+ )
576
+ with gr.Row():
577
+ guidance_scale = gr.Slider(
578
+ label="Guidance Scale",
579
+ minimum=0.1,
580
+ maximum=30,
581
+ step=0.1,
582
+ value=3.8,
583
+ )
584
+ num_inference_steps = gr.Slider(
585
+ label="Number of inference steps",
586
+ minimum=10,
587
+ maximum=1000,
588
+ step=10,
589
+ value=180,
590
+ )
591
+ options = [True, False]
592
+ sage = gr.Radio(
593
+ show_label=True,
594
+ container=True,
595
+ interactive=True,
596
+ choices=options,
597
+ value=False,
598
+ label="Use SageAttention: ",
599
+ )
600
+
601
+ gr.Examples(
602
+ examples=examples,
603
+ inputs=prompt,
604
+ cache_examples=False
605
+ )
606
+
607
+ use_negative_prompt.change(
608
+ fn=lambda x: gr.update(visible=x),
609
+ inputs=use_negative_prompt,
610
+ outputs=negative_prompt,
611
+ api_name=False,
612
+ )
613
+
614
+ gr.on(
615
+ triggers=[
616
+ run_button_30.click,
617
+ ],
618
+ # api_name="generate", # Add this line
619
+ fn=generate_30,
620
+ inputs=[
621
+ prompt,
622
+ negative_prompt,
623
+ use_negative_prompt,
624
+ style_selection,
625
+ width,
626
+ height,
627
+ guidance_scale,
628
+ num_inference_steps,
629
+ sage,
630
+ ],
631
+ outputs=[result],
632
+ )
633
+
634
+ gr.on(
635
+ triggers=[
636
+ run_button_60.click,
637
+ ],
638
+ # api_name="generate", # Add this line
639
+ fn=generate_60,
640
+ inputs=[
641
+ prompt,
642
+ negative_prompt,
643
+ use_negative_prompt,
644
+ style_selection,
645
+ width,
646
+ height,
647
+ guidance_scale,
648
+ num_inference_steps,
649
+ sage,
650
+ ],
651
+ outputs=[result],
652
+ )
653
+
654
+ gr.on(
655
+ triggers=[
656
+ run_button_90.click,
657
+ ],
658
+ # api_name="generate", # Add this line
659
+ fn=generate_90,
660
+ inputs=[
661
+ prompt,
662
+ negative_prompt,
663
+ use_negative_prompt,
664
+ style_selection,
665
+ width,
666
+ height,
667
+ guidance_scale,
668
+ num_inference_steps,
669
+ sage,
670
+ ],
671
+ outputs=[result],
672
+ )
673
+
674
+ gr.Markdown("### REALVISXL V5.0")
675
+ predefined_gallery = gr.Gallery(label="REALVISXL V5.0", columns=3, show_label=False, value=load_predefined_images1())
676
+
677
+ #gr.Markdown("### LIGHTNING V5.0")
678
+ #predefined_gallery = gr.Gallery(label="LIGHTNING V5.0", columns=3, show_label=False, value=load_predefined_images())
679
+
680
+ gr.Markdown(
681
+ """
682
+ <div style="text-align: justify;">
683
+ ⚡Models used in the playground <a href="https://huggingface.co/SG161222/RealVisXL_V5.0">[REALVISXL V5.0]</a>, <a href="https://huggingface.co/SG161222/RealVisXL_V5.0_Lightning">[REALVISXL V5.0 LIGHTNING]</a> for image generation. Stable Diffusion XL piped (SDXL) model HF. This is the demo space for generating images using the Stable Diffusion XL models, with multiple different variants available.
684
+ </div>
685
+ """)
686
+
687
+ gr.Markdown(
688
+ """
689
+ <div style="text-align: justify;">
690
+ ⚡This is the demo space for generating images using Stable Diffusion XL with quality styles, different models, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.
691
+ <a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.
692
+ </div>
693
+ """)
694
+
695
+ gr.Markdown(
696
+ """
697
+ <div style="text-align: justify;">
698
+ ⚠️ Users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
699
+ </div>
700
+ """)
701
+
702
+ def text_generation(input_text, seed):
703
+ full_prompt = "Text Generator Application by ecarbo"
704
+ return full_prompt
705
+
706
+ title = "Text Generator Demo GPT-Neo"
707
+ description = "Text Generator Application by ecarbo"
708
+
709
+ if __name__ == "__main__":
710
+
711
+ demo_interface = demo.queue(max_size=50) # Remove .launch() here
712
+
713
+ text_gen_interface = gr.Interface(
714
+ fn=text_generation,
715
+ inputs=[
716
+ gr.Textbox(lines=1, label="Expand the following prompt to be more detailed and descriptive for image generation: "),
717
+ gr.Number(value=10, label="Enter seed number")
718
+ ],
719
+ outputs=gr.Textbox(label="Text Generated"),
720
+ title=title,
721
+ description=description,
722
+ )
723
+
724
+ combined_interface = gr.TabbedInterface([demo_interface, text_gen_interface], ["Image Generation", "Text Generation"])
725
+ combined_interface.launch(show_api=False)