Kizi-Art commited on
Commit
d4a751e
·
verified ·
1 Parent(s): 7a1615c

Upload 10 files

Browse files
scripts/mainpaperspaceA1111.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from IPython.display import clear_output
3
+ from subprocess import call, getoutput, run
4
+ import time
5
+ import sys
6
+ import fileinput
7
+ import ipywidgets as widgets
8
+ from torch.hub import download_url_to_file
9
+ from urllib.parse import urlparse, parse_qs, unquote
10
+ import re
11
+ import requests
12
+ import six
13
+
14
+ from urllib.request import urlopen, Request
15
+ import tempfile
16
+ from tqdm import tqdm
17
+
18
+
19
+
20
+ def Deps(force_reinstall):
21
+
22
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
23
+ ntbk()
24
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
25
+ os.environ['PYTHONWARNINGS'] = 'ignore'
26
+ print('Modules and notebooks updated, dependencies already installed')
27
+
28
+ else:
29
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
30
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
31
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
32
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
33
+ ntbk()
34
+ if not os.path.exists('/models'):
35
+ call('mkdir /models', shell=True)
36
+ if not os.path.exists('/notebooks/models'):
37
+ call('ln -s /models /notebooks', shell=True)
38
+ if os.path.exists('/deps'):
39
+ call("rm -r /deps", shell=True)
40
+ call('mkdir /deps', shell=True)
41
+ if not os.path.exists('cache'):
42
+ call('mkdir cache', shell=True)
43
+ os.chdir('/deps')
44
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
45
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
46
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
47
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
48
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
49
+ os.chdir('/notebooks')
50
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
51
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
52
+ os.environ['PYTHONWARNINGS'] = 'ignore'
53
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.9/warnings.py", shell=True)
54
+ if not os.path.exists('/notebooks/diffusers'):
55
+ call('ln -s /diffusers /notebooks', shell=True)
56
+ call("rm -r /deps", shell=True)
57
+ os.chdir('/notebooks')
58
+ clear_output()
59
+
60
+ done()
61
+
62
+
63
+
64
+ def depsinst(url, dst):
65
+ file_size = None
66
+ req = Request(url, headers={"User-Agent": "torch.hub"})
67
+ u = urlopen(req)
68
+ meta = u.info()
69
+ if hasattr(meta, 'getheaders'):
70
+ content_length = meta.getheaders("Content-Length")
71
+ else:
72
+ content_length = meta.get_all("Content-Length")
73
+ if content_length is not None and len(content_length) > 0:
74
+ file_size = int(content_length[0])
75
+
76
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
77
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
78
+ with open(dst, "wb") as f:
79
+ while True:
80
+ buffer = u.read(8192)
81
+ if len(buffer) == 0:
82
+ break
83
+ f.write(buffer)
84
+ pbar.update(len(buffer))
85
+ f.close()
86
+
87
+
88
+
89
+ def dwn(url, dst, msg):
90
+ file_size = None
91
+ req = Request(url, headers={"User-Agent": "torch.hub"})
92
+ u = urlopen(req)
93
+ meta = u.info()
94
+ if hasattr(meta, 'getheaders'):
95
+ content_length = meta.getheaders("Content-Length")
96
+ else:
97
+ content_length = meta.get_all("Content-Length")
98
+ if content_length is not None and len(content_length) > 0:
99
+ file_size = int(content_length[0])
100
+
101
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
102
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
103
+ with open(dst, "wb") as f:
104
+ while True:
105
+ buffer = u.read(8192)
106
+ if len(buffer) == 0:
107
+ break
108
+ f.write(buffer)
109
+ pbar.update(len(buffer))
110
+ f.close()
111
+
112
+
113
+
114
+ def ntbk():
115
+
116
+ os.chdir('/notebooks')
117
+ if not os.path.exists('Latest_Notebooks'):
118
+ call('mkdir Latest_Notebooks', shell=True)
119
+ else:
120
+ call('rm -r Latest_Notebooks', shell=True)
121
+ call('mkdir Latest_Notebooks', shell=True)
122
+ os.chdir('/notebooks/Latest_Notebooks')
123
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
124
+ call('rm Notebooks.txt', shell=True)
125
+ os.chdir('/notebooks')
126
+
127
+
128
+
129
+ def repo():
130
+
131
+ print('Installing/Updating the repo...')
132
+ os.chdir('/notebooks')
133
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
134
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
135
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
136
+ call('rm sd_mrep.tar.zst', shell=True)
137
+
138
+ os.chdir('/notebooks/sd')
139
+ if not os.path.exists('stable-diffusion-webui'):
140
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
141
+
142
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
143
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
144
+ print('')
145
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
146
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
147
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
148
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
149
+ os.chdir('/notebooks')
150
+ clear_output()
151
+ done()
152
+
153
+
154
+
155
+
156
+
157
+ def mdls(Original_Model_Version, Path_to_MODEL, MODEL_LINK, Temporary_Storage):
158
+
159
+ import gdown
160
+
161
+
162
+ src=getsrc(MODEL_LINK)
163
+
164
+
165
+ call('ln -s /datasets/stable-diffusion-classic/SDv1.5.ckpt /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
166
+ call('ln -s /datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base/v2-1_512-nonema-pruned.safetensors /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
167
+ call('ln -s /datasets/stable-diffusion-v2-1/stable-diffusion-2-1/v2-1_768-nonema-pruned.safetensors /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
168
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
169
+
170
+ if Path_to_MODEL !='':
171
+ if os.path.exists(str(Path_to_MODEL)):
172
+ print('Using the custom model.')
173
+ model=Path_to_MODEL
174
+ else:
175
+ print('Wrong path, check that the path to the model is correct')
176
+
177
+ elif MODEL_LINK !="":
178
+
179
+ if src=='civitai':
180
+ modelname=get_name(MODEL_LINK, False)
181
+ if Temporary_Storage:
182
+ model=f'/models/{modelname}'
183
+ else:
184
+ model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
185
+ if not os.path.exists(model):
186
+ dwn(MODEL_LINK, model, 'Downloading the custom model')
187
+ clear_output()
188
+ else:
189
+ print('Model already exists')
190
+ elif src=='gdrive':
191
+ modelname=get_name(MODEL_LINK, True)
192
+ if Temporary_Storage:
193
+ model=f'/models/{modelname}'
194
+ else:
195
+ model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
196
+ if not os.path.exists(model):
197
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
198
+ clear_output()
199
+ else:
200
+ print('Model already exists')
201
+ else:
202
+ modelname=os.path.basename(MODEL_LINK)
203
+ if Temporary_Storage:
204
+ model=f'/models/{modelname}'
205
+ else:
206
+ model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
207
+ if not os.path.exists(model):
208
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
209
+ clear_output()
210
+ else:
211
+ print('Model already exists')
212
+
213
+ if os.path.exists(model) and os.path.getsize(model) > 1810671599:
214
+ print('Model downloaded, using the custom model.')
215
+ else:
216
+ call('rm '+model, shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
217
+ print('Wrong link, check that the link is valid')
218
+
219
+ else:
220
+ if Original_Model_Version == "v1.5":
221
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/SDv1.5.ckpt"
222
+ print('Using the original V1.5 model')
223
+ elif Original_Model_Version == "v2-512":
224
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/v2-1_512-nonema-pruned.safetensors"
225
+ print('Using the original V2-512 model')
226
+ elif Original_Model_Version == "v2-768":
227
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-nonema-pruned.safetensors"
228
+ print('Using the original V2-768 model')
229
+ elif Original_Model_Version == "SDXL":
230
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors"
231
+ print('Using the original SDXL model')
232
+ else:
233
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion"
234
+ print('Wrong model version, try again')
235
+ try:
236
+ model
237
+ except:
238
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion"
239
+
240
+ return model
241
+
242
+
243
+
244
+
245
+ def loradwn(LoRA_LINK):
246
+
247
+ import gdown
248
+
249
+ if LoRA_LINK=='':
250
+ print('Nothing to do')
251
+ else:
252
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/models/Lora', exist_ok=True)
253
+
254
+ src=getsrc(LoRA_LINK)
255
+
256
+ if src=='civitai':
257
+ modelname=get_name(LoRA_LINK, False)
258
+ loramodel=f'/notebooks/sd/stable-diffusion-webui/models/Lora/{modelname}'
259
+ if not os.path.exists(loramodel):
260
+ dwn(LoRA_LINK, loramodel, 'Downloading the LoRA model')
261
+ clear_output()
262
+ else:
263
+ print('Model already exists')
264
+ elif src=='gdrive':
265
+ modelname=get_name(LoRA_LINK, True)
266
+ loramodel=f'/notebooks/sd/stable-diffusion-webui/models/Lora/{modelname}'
267
+ if not os.path.exists(loramodel):
268
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
269
+ clear_output()
270
+ else:
271
+ print('Model already exists')
272
+ else:
273
+ modelname=os.path.basename(LoRA_LINK)
274
+ loramodel=f'/notebooks/sd/stable-diffusion-webui/models/Lora/{modelname}'
275
+ if not os.path.exists(loramodel):
276
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
277
+ clear_output()
278
+ else:
279
+ print('Model already exists')
280
+
281
+ if os.path.exists(loramodel) :
282
+ print('LoRA downloaded')
283
+ else:
284
+ print('Wrong link, check that the link is valid')
285
+
286
+
287
+
288
+ def CN(ControlNet_Model, ControlNet_XL_Model):
289
+
290
+ def download(url, model_dir):
291
+
292
+ filename = os.path.basename(urlparse(url).path)
293
+ pth = os.path.abspath(os.path.join(model_dir, filename))
294
+ if not os.path.exists(pth):
295
+ print('Downloading: '+os.path.basename(url))
296
+ download_url_to_file(url, pth, hash_prefix=None, progress=True)
297
+ else:
298
+ print(f"The model {filename} already exists")
299
+
300
+ wrngv1=False
301
+ os.chdir('/notebooks/sd/stable-diffusion-webui/extensions')
302
+ if not os.path.exists("sd-webui-controlnet"):
303
+ call('git clone https://github.com/Mikubill/sd-webui-controlnet.git', shell=True)
304
+ os.chdir('/notebooks')
305
+ else:
306
+ os.chdir('sd-webui-controlnet')
307
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
308
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
309
+ os.chdir('/notebooks')
310
+
311
+ mdldir="/notebooks/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/models"
312
+ for filename in os.listdir(mdldir):
313
+ if "_sd14v1" in filename:
314
+ renamed = re.sub("_sd14v1", "-fp16", filename)
315
+ os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
316
+
317
+ call('wget -q -O CN_models.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models.txt', shell=True)
318
+ call('wget -q -O CN_models_XL.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_XL.txt', shell=True)
319
+
320
+ with open("CN_models.txt", 'r') as f:
321
+ mdllnk = f.read().splitlines()
322
+ with open("CN_models_XL.txt", 'r') as d:
323
+ mdllnk_XL = d.read().splitlines()
324
+ call('rm CN_models.txt CN_models_XL.txt', shell=True)
325
+
326
+ os.chdir('/notebooks')
327
+
328
+ if ControlNet_Model == "All" or ControlNet_Model == "all" :
329
+ for lnk in mdllnk:
330
+ download(lnk, mdldir)
331
+ clear_output()
332
+
333
+
334
+ elif ControlNet_Model == "15":
335
+ mdllnk=list(filter(lambda x: 't2i' in x, mdllnk))
336
+ for lnk in mdllnk:
337
+ download(lnk, mdldir)
338
+ clear_output()
339
+
340
+
341
+ elif ControlNet_Model.isdigit() and int(ControlNet_Model)-1<14 and int(ControlNet_Model)>0:
342
+ download(mdllnk[int(ControlNet_Model)-1], mdldir)
343
+ clear_output()
344
+
345
+ elif ControlNet_Model == "none":
346
+ pass
347
+ clear_output()
348
+
349
+ else:
350
+ print('Wrong ControlNet V1 choice, try again')
351
+ wrngv1=True
352
+
353
+
354
+ if ControlNet_XL_Model == "All" or ControlNet_XL_Model == "all" :
355
+ for lnk_XL in mdllnk_XL:
356
+ download(lnk_XL, mdldir)
357
+ if not wrngv1:
358
+ clear_output()
359
+ done()
360
+
361
+ elif ControlNet_XL_Model.isdigit() and int(ControlNet_XL_Model)-1<5:
362
+ download(mdllnk_XL[int(ControlNet_XL_Model)-1], mdldir)
363
+ if not wrngv1:
364
+ clear_output()
365
+ done()
366
+
367
+ elif ControlNet_XL_Model == "none":
368
+ pass
369
+ if not wrngv1:
370
+ clear_output()
371
+ done()
372
+
373
+ else:
374
+ print('Wrong ControlNet XL choice, try again')
375
+
376
+
377
+
378
+ def sdui(User, Password, model):
379
+
380
+ auth=f"--gradio-auth {User}:{Password}"
381
+ if User =="" or Password=="":
382
+ auth=""
383
+
384
+ call('wget -q -O /notebooks/sd/stable-diffusion-webui/modules/styles.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/styles.py', shell=True)
385
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
386
+
387
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
388
+
389
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
390
+ if line.strip().startswith('self.server_name ='):
391
+ line = f' self.server_name = "{localurl}"\n'
392
+ if line.strip().startswith('self.protocol = "https"'):
393
+ line = ' self.protocol = "https"\n'
394
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
395
+ line = ''
396
+ if line.strip().startswith('else "http"'):
397
+ line = ''
398
+ sys.stdout.write(line)
399
+
400
+
401
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
402
+
403
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
404
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
405
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
406
+
407
+ call("sed -i 's@-> Network | None@@g' /notebooks/sd/stable-diffusion-webui/extensions-builtin/Lora/network.py", shell=True)
408
+ call("sed -i 's@|@or@' /notebooks/sd/stable-diffusion-webui/extensions/adetailer/aaaaaa/helper.py", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
409
+
410
+ call("sed -i 's@\"quicksettings\": OptionInfo(.*@\"quicksettings\": OptionInfo(\"sd_model_checkpoint, sd_vae, CLIP_stop_at_last_layers, inpainting_mask_weight, initial_noise_multiplier\", \"Quicksettings list\"),@' /notebooks/sd/stable-diffusion-webui/modules/shared.py", shell=True)
411
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
412
+ clear_output()
413
+
414
+
415
+ if model=="":
416
+ mdlpth=""
417
+ else:
418
+ if os.path.isfile(model):
419
+ mdlpth="--ckpt "+model
420
+ else:
421
+ mdlpth="--ckpt-dir "+model
422
+
423
+
424
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt-dir /models "+auth+" "+mdlpth
425
+
426
+ return configf
427
+
428
+
429
+
430
+ def getsrc(url):
431
+ parsed_url = urlparse(url)
432
+ if parsed_url.netloc == 'civitai.com':
433
+ src='civitai'
434
+ elif parsed_url.netloc == 'drive.google.com':
435
+ src='gdrive'
436
+ elif parsed_url.netloc == 'huggingface.co':
437
+ src='huggingface'
438
+ else:
439
+ src='others'
440
+ return src
441
+
442
+
443
+
444
+ def get_name(url, gdrive):
445
+
446
+ from gdown.download import get_url_from_gdrive_confirmation
447
+
448
+ if not gdrive:
449
+ response = requests.get(url, allow_redirects=False)
450
+ if "Location" in response.headers:
451
+ redirected_url = response.headers["Location"]
452
+ quer = parse_qs(urlparse(redirected_url).query)
453
+ if "response-content-disposition" in quer:
454
+ disp_val = quer["response-content-disposition"][0].split(";")
455
+ for vals in disp_val:
456
+ if vals.strip().startswith("filename="):
457
+ filenm=unquote(vals.split("=", 1)[1].strip())
458
+ return filenm.replace("\"","")
459
+ else:
460
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
461
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
462
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
463
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
464
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
465
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
466
+ return filenm
467
+
468
+
469
+
470
+ def done():
471
+ done = widgets.Button(
472
+ description='Done!',
473
+ disabled=True,
474
+ button_style='success',
475
+ tooltip='',
476
+ icon='check'
477
+ )
478
+ display(done)
scripts/mainpaperspaceA1111_311.py ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from IPython.display import clear_output
3
+ from subprocess import call, getoutput, run
4
+ import time
5
+ import sys
6
+ import fileinput
7
+ import ipywidgets as widgets
8
+ from torch.hub import download_url_to_file
9
+ from urllib.parse import urlparse, parse_qs, unquote
10
+ import re
11
+ import requests
12
+ import six
13
+
14
+ from urllib.request import urlopen, Request
15
+ import tempfile
16
+ from tqdm import tqdm
17
+
18
+
19
+
20
+ def Deps(force_reinstall):
21
+
22
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
23
+ ntbk()
24
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
25
+ os.environ['PYTHONWARNINGS'] = 'ignore'
26
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
27
+ print('Modules and notebooks updated, dependencies already installed')
28
+
29
+ else:
30
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
31
+ call("pip uninstall -qq deepspeed -y", shell=True, stdout=open('/dev/null', 'w'))
32
+ ntbk()
33
+ if not os.path.exists('/models'):
34
+ call('mkdir /models', shell=True)
35
+ if not os.path.exists('/notebooks/models'):
36
+ call('ln -s /models /notebooks', shell=True)
37
+ if os.path.exists('/deps'):
38
+ call("rm -r /deps", shell=True)
39
+ call('mkdir /deps', shell=True)
40
+ if not os.path.exists('cache'):
41
+ call('mkdir cache', shell=True)
42
+ os.chdir('/deps')
43
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps_311.txt', shell=True)
44
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
45
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
46
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
47
+ os.chdir('/notebooks')
48
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
49
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
50
+ os.environ['PYTHONWARNINGS'] = 'ignore'
51
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
52
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
53
+ if not os.path.exists('/notebooks/diffusers'):
54
+ call('ln -s /diffusers /notebooks', shell=True)
55
+ call("rm -r /deps", shell=True)
56
+ os.chdir('/notebooks')
57
+ clear_output()
58
+
59
+ done()
60
+
61
+
62
+
63
+ def depsinst(url, dst):
64
+ file_size = None
65
+ req = Request(url, headers={"User-Agent": "torch.hub"})
66
+ u = urlopen(req)
67
+ meta = u.info()
68
+ if hasattr(meta, 'getheaders'):
69
+ content_length = meta.getheaders("Content-Length")
70
+ else:
71
+ content_length = meta.get_all("Content-Length")
72
+ if content_length is not None and len(content_length) > 0:
73
+ file_size = int(content_length[0])
74
+
75
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
76
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
77
+ with open(dst, "wb") as f:
78
+ while True:
79
+ buffer = u.read(8192)
80
+ if len(buffer) == 0:
81
+ break
82
+ f.write(buffer)
83
+ pbar.update(len(buffer))
84
+ f.close()
85
+
86
+
87
+
88
+ def dwn(url, dst, msg):
89
+ file_size = None
90
+ req = Request(url, headers={"User-Agent": "torch.hub"})
91
+ u = urlopen(req)
92
+ meta = u.info()
93
+ if hasattr(meta, 'getheaders'):
94
+ content_length = meta.getheaders("Content-Length")
95
+ else:
96
+ content_length = meta.get_all("Content-Length")
97
+ if content_length is not None and len(content_length) > 0:
98
+ file_size = int(content_length[0])
99
+
100
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
101
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
102
+ with open(dst, "wb") as f:
103
+ while True:
104
+ buffer = u.read(8192)
105
+ if len(buffer) == 0:
106
+ break
107
+ f.write(buffer)
108
+ pbar.update(len(buffer))
109
+ f.close()
110
+
111
+
112
+
113
+ def ntbk():
114
+
115
+ os.chdir('/notebooks')
116
+ if not os.path.exists('Latest_Notebooks'):
117
+ call('mkdir Latest_Notebooks', shell=True)
118
+ else:
119
+ call('rm -r Latest_Notebooks', shell=True)
120
+ call('mkdir Latest_Notebooks', shell=True)
121
+ os.chdir('/notebooks/Latest_Notebooks')
122
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
123
+ call('rm Notebooks.txt', shell=True)
124
+ os.chdir('/notebooks')
125
+
126
+
127
+
128
+ def repo():
129
+
130
+ print('Installing/Updating the repo...')
131
+ os.chdir('/notebooks')
132
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
133
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
134
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
135
+ call('rm sd_mrep.tar.zst', shell=True)
136
+
137
+ os.chdir('/notebooks/sd')
138
+ if not os.path.exists('stable-diffusion-webui'):
139
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
140
+
141
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
142
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
143
+ print('')
144
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
145
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
146
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
147
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
148
+ os.chdir('/notebooks')
149
+ clear_output()
150
+ done()
151
+
152
+
153
+
154
+
155
+
156
+ def mdls(Original_Model_Version, Path_to_MODEL, MODEL_LINK, Temporary_Storage):
157
+
158
+ import gdown
159
+
160
+
161
+ src=getsrc(MODEL_LINK)
162
+
163
+
164
+ call('ln -s /datasets/stable-diffusion-classic/SDv1.5.ckpt /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
165
+ call('ln -s /datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base/v2-1_512-nonema-pruned.safetensors /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
166
+ call('ln -s /datasets/stable-diffusion-v2-1/stable-diffusion-2-1/v2-1_768-nonema-pruned.safetensors /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
167
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
168
+
169
+ if Path_to_MODEL !='':
170
+ if os.path.exists(str(Path_to_MODEL)):
171
+ print('Using the custom model.')
172
+ model=Path_to_MODEL
173
+ else:
174
+ print('Wrong path, check that the path to the model is correct')
175
+
176
+ elif MODEL_LINK !="":
177
+
178
+ if src=='civitai':
179
+ modelname=get_name(MODEL_LINK, False)
180
+ if Temporary_Storage:
181
+ model=f'/models/{modelname}'
182
+ else:
183
+ model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
184
+ if not os.path.exists(model):
185
+ dwn(MODEL_LINK, model, 'Downloading the custom model')
186
+ clear_output()
187
+ else:
188
+ print('Model already exists')
189
+ elif src=='gdrive':
190
+ modelname=get_name(MODEL_LINK, True)
191
+ if Temporary_Storage:
192
+ model=f'/models/{modelname}'
193
+ else:
194
+ model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
195
+ if not os.path.exists(model):
196
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
197
+ clear_output()
198
+ else:
199
+ print('Model already exists')
200
+ else:
201
+ modelname=os.path.basename(MODEL_LINK)
202
+ if Temporary_Storage:
203
+ model=f'/models/{modelname}'
204
+ else:
205
+ model=f'/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}'
206
+ if not os.path.exists(model):
207
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
208
+ clear_output()
209
+ else:
210
+ print('Model already exists')
211
+
212
+ if os.path.exists(model) and os.path.getsize(model) > 1810671599:
213
+ print('Model downloaded, using the custom model.')
214
+ else:
215
+ call('rm '+model, shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
216
+ print('Wrong link, check that the link is valid')
217
+
218
+ else:
219
+ if Original_Model_Version == "v1.5":
220
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/SDv1.5.ckpt"
221
+ print('Using the original V1.5 model')
222
+ elif Original_Model_Version == "v2-512":
223
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/v2-1_512-nonema-pruned.safetensors"
224
+ print('Using the original V2-512 model')
225
+ elif Original_Model_Version == "v2-768":
226
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-nonema-pruned.safetensors"
227
+ print('Using the original V2-768 model')
228
+ elif Original_Model_Version == "SDXL":
229
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors"
230
+ print('Using the original SDXL model')
231
+ else:
232
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion"
233
+ print('Wrong model version, try again')
234
+ try:
235
+ model
236
+ except:
237
+ model="/notebooks/sd/stable-diffusion-webui/models/Stable-diffusion"
238
+
239
+ return model
240
+
241
+
242
+
243
+
244
+ def loradwn(LoRA_LINK):
245
+
246
+ import gdown
247
+
248
+ if LoRA_LINK=='':
249
+ print('Nothing to do')
250
+ else:
251
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/models/Lora', exist_ok=True)
252
+
253
+ src=getsrc(LoRA_LINK)
254
+
255
+ if src=='civitai':
256
+ modelname=get_name(LoRA_LINK, False)
257
+ loramodel=f'/notebooks/sd/stable-diffusion-webui/models/Lora/{modelname}'
258
+ if not os.path.exists(loramodel):
259
+ dwn(LoRA_LINK, loramodel, 'Downloading the LoRA model')
260
+ clear_output()
261
+ else:
262
+ print('Model already exists')
263
+ elif src=='gdrive':
264
+ modelname=get_name(LoRA_LINK, True)
265
+ loramodel=f'/notebooks/sd/stable-diffusion-webui/models/Lora/{modelname}'
266
+ if not os.path.exists(loramodel):
267
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
268
+ clear_output()
269
+ else:
270
+ print('Model already exists')
271
+ else:
272
+ modelname=os.path.basename(LoRA_LINK)
273
+ loramodel=f'/notebooks/sd/stable-diffusion-webui/models/Lora/{modelname}'
274
+ if not os.path.exists(loramodel):
275
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
276
+ clear_output()
277
+ else:
278
+ print('Model already exists')
279
+
280
+ if os.path.exists(loramodel) :
281
+ print('LoRA downloaded')
282
+ else:
283
+ print('Wrong link, check that the link is valid')
284
+
285
+
286
+
287
+ def CN(ControlNet_Model, ControlNet_XL_Model):
288
+
289
+ def download(url, model_dir):
290
+
291
+ filename = os.path.basename(urlparse(url).path)
292
+ pth = os.path.abspath(os.path.join(model_dir, filename))
293
+ if not os.path.exists(pth):
294
+ print('Downloading: '+os.path.basename(url))
295
+ download_url_to_file(url, pth, hash_prefix=None, progress=True)
296
+ else:
297
+ print(f"The model {filename} already exists")
298
+
299
+ wrngv1=False
300
+ os.chdir('/notebooks/sd/stable-diffusion-webui/extensions')
301
+ if not os.path.exists("sd-webui-controlnet"):
302
+ call('git clone https://github.com/Mikubill/sd-webui-controlnet.git', shell=True)
303
+ os.chdir('/notebooks')
304
+ else:
305
+ os.chdir('sd-webui-controlnet')
306
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
307
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
308
+ os.chdir('/notebooks')
309
+
310
+ mdldir="/notebooks/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/models"
311
+ for filename in os.listdir(mdldir):
312
+ if "_sd14v1" in filename:
313
+ renamed = re.sub("_sd14v1", "-fp16", filename)
314
+ os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
315
+
316
+ call('wget -q -O CN_models.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models.txt', shell=True)
317
+ call('wget -q -O CN_models_XL.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_XL.txt', shell=True)
318
+
319
+ with open("CN_models.txt", 'r') as f:
320
+ mdllnk = f.read().splitlines()
321
+ with open("CN_models_XL.txt", 'r') as d:
322
+ mdllnk_XL = d.read().splitlines()
323
+ call('rm CN_models.txt CN_models_XL.txt', shell=True)
324
+
325
+ os.chdir('/notebooks')
326
+
327
+ if ControlNet_Model == "All" or ControlNet_Model == "all" :
328
+ for lnk in mdllnk:
329
+ download(lnk, mdldir)
330
+ clear_output()
331
+
332
+
333
+ elif ControlNet_Model == "15":
334
+ mdllnk=list(filter(lambda x: 't2i' in x, mdllnk))
335
+ for lnk in mdllnk:
336
+ download(lnk, mdldir)
337
+ clear_output()
338
+
339
+
340
+ elif ControlNet_Model.isdigit() and int(ControlNet_Model)-1<14 and int(ControlNet_Model)>0:
341
+ download(mdllnk[int(ControlNet_Model)-1], mdldir)
342
+ clear_output()
343
+
344
+ elif ControlNet_Model == "none":
345
+ pass
346
+ clear_output()
347
+
348
+ else:
349
+ print('Wrong ControlNet V1 choice, try again')
350
+ wrngv1=True
351
+
352
+
353
+ if ControlNet_XL_Model == "All" or ControlNet_XL_Model == "all" :
354
+ for lnk_XL in mdllnk_XL:
355
+ download(lnk_XL, mdldir)
356
+ if not wrngv1:
357
+ clear_output()
358
+ done()
359
+
360
+ elif ControlNet_XL_Model.isdigit() and int(ControlNet_XL_Model)-1<5:
361
+ download(mdllnk_XL[int(ControlNet_XL_Model)-1], mdldir)
362
+ if not wrngv1:
363
+ clear_output()
364
+ done()
365
+
366
+ elif ControlNet_XL_Model == "none":
367
+ pass
368
+ if not wrngv1:
369
+ clear_output()
370
+ done()
371
+
372
+ else:
373
+ print('Wrong ControlNet XL choice, try again')
374
+
375
+
376
+
377
+ def sdui(User, Password, model):
378
+
379
+ auth=f"--gradio-auth {User}:{Password}"
380
+ if User =="" or Password=="":
381
+ auth=""
382
+
383
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
384
+
385
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
386
+
387
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
388
+ if line.strip().startswith('self.server_name ='):
389
+ line = f' self.server_name = "{localurl}"\n'
390
+ if line.strip().startswith('self.protocol = "https"'):
391
+ line = ' self.protocol = "https"\n'
392
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
393
+ line = ''
394
+ if line.strip().startswith('else "http"'):
395
+ line = ''
396
+ sys.stdout.write(line)
397
+
398
+
399
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
400
+
401
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
402
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
403
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
404
+
405
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
406
+ clear_output()
407
+
408
+ if model=="":
409
+ mdlpth=""
410
+ else:
411
+ if os.path.isfile(model):
412
+ mdlpth="--ckpt "+model
413
+ else:
414
+ mdlpth="--ckpt-dir "+model
415
+
416
+
417
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt-dir /models "+auth+" "+mdlpth
418
+
419
+ return configf
420
+
421
+
422
+
423
+ def getsrc(url):
424
+ parsed_url = urlparse(url)
425
+ if parsed_url.netloc == 'civitai.com':
426
+ src='civitai'
427
+ elif parsed_url.netloc == 'drive.google.com':
428
+ src='gdrive'
429
+ elif parsed_url.netloc == 'huggingface.co':
430
+ src='huggingface'
431
+ else:
432
+ src='others'
433
+ return src
434
+
435
+
436
+
437
+ def get_name(url, gdrive):
438
+
439
+ from gdown.download import get_url_from_gdrive_confirmation
440
+
441
+ if not gdrive:
442
+ response = requests.get(url, allow_redirects=False)
443
+ if "Location" in response.headers:
444
+ redirected_url = response.headers["Location"]
445
+ quer = parse_qs(urlparse(redirected_url).query)
446
+ if "response-content-disposition" in quer:
447
+ disp_val = quer["response-content-disposition"][0].split(";")
448
+ for vals in disp_val:
449
+ if vals.strip().startswith("filename="):
450
+ filenm=unquote(vals.split("=", 1)[1].strip())
451
+ return filenm.replace("\"","")
452
+ else:
453
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
454
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
455
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
456
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
457
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
458
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
459
+ return filenm
460
+
461
+
462
+
463
+ def done():
464
+ done = widgets.Button(
465
+ description='Done!',
466
+ disabled=True,
467
+ button_style='success',
468
+ tooltip='',
469
+ icon='check'
470
+ )
471
+ display(done)
scripts/mainpaperspacev1.py ADDED
@@ -0,0 +1,1337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+ import six
23
+ import re
24
+
25
+ from urllib.parse import urlparse, parse_qs, unquote
26
+ from urllib.request import urlopen, Request
27
+ import tempfile
28
+ from tqdm import tqdm
29
+
30
+
31
+
32
+
33
+ def Deps(force_reinstall):
34
+
35
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
36
+ ntbk()
37
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq ./diffusers', shell=True, stdout=open('/dev/null', 'w'))
38
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
39
+ os.environ['PYTHONWARNINGS'] = 'ignore'
40
+ print('Modules and notebooks updated, dependencies already installed')
41
+
42
+ else:
43
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
44
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
45
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
46
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
47
+ ntbk()
48
+ if not os.path.exists('/models'):
49
+ call('mkdir /models', shell=True)
50
+ if not os.path.exists('/notebooks/models'):
51
+ call('ln -s /models /notebooks', shell=True)
52
+ if os.path.exists('/deps'):
53
+ call("rm -r /deps", shell=True)
54
+ call('mkdir /deps', shell=True)
55
+ if not os.path.exists('cache'):
56
+ call('mkdir cache', shell=True)
57
+ os.chdir('/deps')
58
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
59
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
60
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
61
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
62
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
63
+ os.chdir('/notebooks')
64
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
65
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
66
+ os.environ['PYTHONWARNINGS'] = 'ignore'
67
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.9/warnings.py", shell=True)
68
+ if not os.path.exists('/notebooks/diffusers'):
69
+ call('ln -s /diffusers /notebooks', shell=True)
70
+ call("rm -r /deps", shell=True)
71
+ os.chdir('/notebooks')
72
+ clear_output()
73
+
74
+ done()
75
+
76
+
77
+
78
+ def depsinst(url, dst):
79
+ file_size = None
80
+ req = Request(url, headers={"User-Agent": "torch.hub"})
81
+ u = urlopen(req)
82
+ meta = u.info()
83
+ if hasattr(meta, 'getheaders'):
84
+ content_length = meta.getheaders("Content-Length")
85
+ else:
86
+ content_length = meta.get_all("Content-Length")
87
+ if content_length is not None and len(content_length) > 0:
88
+ file_size = int(content_length[0])
89
+
90
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
91
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
92
+ with open(dst, "wb") as f:
93
+ while True:
94
+ buffer = u.read(8192)
95
+ if len(buffer) == 0:
96
+ break
97
+ f.write(buffer)
98
+ pbar.update(len(buffer))
99
+ f.close()
100
+
101
+
102
+ def ntbk():
103
+
104
+ os.chdir('/notebooks')
105
+ if not os.path.exists('Latest_Notebooks'):
106
+ call('mkdir Latest_Notebooks', shell=True)
107
+ else:
108
+ call('rm -r Latest_Notebooks', shell=True)
109
+ call('mkdir Latest_Notebooks', shell=True)
110
+ os.chdir('/notebooks/Latest_Notebooks')
111
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
112
+ call('rm Notebooks.txt', shell=True)
113
+ os.chdir('/notebooks')
114
+
115
+
116
+
117
+
118
+ def downloadmodel_hf(Path_to_HuggingFace):
119
+ import wget
120
+
121
+ if os.path.exists('/models/stable-diffusion-custom'):
122
+ call("rm -r /models/stable-diffusion-custom", shell=True)
123
+ clear_output()
124
+
125
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
126
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
127
+ token = f.read()
128
+ authe=f'https://USER:{token}@'
129
+ else:
130
+ authe="https://"
131
+
132
+ clear_output()
133
+ call("mkdir /models/stable-diffusion-custom", shell=True)
134
+ os.chdir("/models/stable-diffusion-custom")
135
+ call("git init", shell=True)
136
+ call("git lfs install --system --skip-repo", shell=True)
137
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
138
+ call("git config core.sparsecheckout true", shell=True)
139
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
140
+ call("git pull origin main", shell=True)
141
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
142
+ call("rm -r .git", shell=True)
143
+ call("rm model_index.json", shell=True)
144
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')
145
+ os.chdir('/notebooks')
146
+ clear_output()
147
+ done()
148
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
149
+ print('Check the link you provided')
150
+ os.chdir('/notebooks')
151
+ time.sleep(5)
152
+
153
+
154
+
155
+
156
+ def downloadmodel_path(MODEL_PATH):
157
+
158
+ modelname=os.path.basename(MODEL_PATH)
159
+ sftnsr=""
160
+ if modelname.split('.')[-1]=='safetensors':
161
+ sftnsr="--from_safetensors"
162
+
163
+ import wget
164
+ os.chdir('/notebooks')
165
+ clear_output()
166
+ if os.path.exists(str(MODEL_PATH)):
167
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
168
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
169
+ clear_output()
170
+ call('rm config.yaml', shell=True)
171
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
172
+ clear_output()
173
+ done()
174
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
175
+ print('Conversion error')
176
+ time.sleep(5)
177
+
178
+ else:
179
+ while not os.path.exists(str(MODEL_PATH)):
180
+ print('Wrong path, use the file explorer to copy the path')
181
+ time.sleep(5)
182
+
183
+
184
+
185
+
186
+ def downloadmodel_link(MODEL_LINK):
187
+
188
+ import wget
189
+ import gdown
190
+ from gdown.download import get_url_from_gdrive_confirmation
191
+
192
+
193
+ def getsrc(url):
194
+ parsed_url = urlparse(url)
195
+ if parsed_url.netloc == 'civitai.com':
196
+ src='civitai'
197
+ elif parsed_url.netloc == 'drive.google.com':
198
+ src='gdrive'
199
+ elif parsed_url.netloc == 'huggingface.co':
200
+ src='huggingface'
201
+ else:
202
+ src='others'
203
+ return src
204
+
205
+ src=getsrc(MODEL_LINK)
206
+
207
+ def get_name(url, gdrive):
208
+ if not gdrive:
209
+ response = requests.get(url, allow_redirects=False)
210
+ if "Location" in response.headers:
211
+ redirected_url = response.headers["Location"]
212
+ quer = parse_qs(urlparse(redirected_url).query)
213
+ if "response-content-disposition" in quer:
214
+ disp_val = quer["response-content-disposition"][0].split(";")
215
+ for vals in disp_val:
216
+ if vals.strip().startswith("filename="):
217
+ filenm=unquote(vals.split("=", 1)[1].strip())
218
+ return filenm.replace("\"","")
219
+ else:
220
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
221
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
222
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
223
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
224
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
225
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
226
+ return filenm
227
+
228
+ if src=='civitai':
229
+ modelname=get_name(MODEL_LINK, False)
230
+ elif src=='gdrive':
231
+ modelname=get_name(MODEL_LINK, True)
232
+ else:
233
+ modelname=os.path.basename(MODEL_LINK)
234
+
235
+ sftnsr=""
236
+ if modelname.split('.')[-1]!='safetensors':
237
+ modelnm="model.ckpt"
238
+ else:
239
+ modelnm="model.safetensors"
240
+ sftnsr="--from_safetensors"
241
+
242
+ os.chdir('/notebooks')
243
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
244
+
245
+ if os.path.exists(modelnm):
246
+ if os.path.getsize(modelnm) > 1810671599:
247
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
248
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelnm+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
249
+ clear_output()
250
+ call('rm config.yaml', shell=True)
251
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
252
+ call('rm '+modelnm, shell=True)
253
+ clear_output()
254
+ done()
255
+ else:
256
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
257
+ print('Conversion error')
258
+ time.sleep(5)
259
+ else:
260
+ while os.path.getsize(modelnm) < 1810671599:
261
+ print('Wrong link, check that the link is valid')
262
+ time.sleep(5)
263
+
264
+
265
+
266
+
267
+ def dls(Path_to_HuggingFace, Model_Path, Model_Link):
268
+
269
+ if Path_to_HuggingFace != "":
270
+ downloadmodel_hf(Path_to_HuggingFace)
271
+ MODEL_NAME="/models/stable-diffusion-custom"
272
+ elif Model_Path !="":
273
+ downloadmodel_path(Model_Path)
274
+ MODEL_NAME="/models/stable-diffusion-custom"
275
+ elif Model_Link !="":
276
+ downloadmodel_link(Model_Link)
277
+ MODEL_NAME="/models/stable-diffusion-custom"
278
+ else:
279
+ MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
280
+ print('Using the original V1.5 model')
281
+
282
+ return MODEL_NAME
283
+
284
+
285
+
286
+ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
287
+ import wget, gdown
288
+ os.chdir('/notebooks')
289
+ PT=""
290
+
291
+ while Session_Name=="":
292
+ print('Input the Session Name:')
293
+ Session_Name=input("")
294
+ Session_Name=Session_Name.replace(" ","_")
295
+
296
+ WORKSPACE='/notebooks/Fast-Dreambooth'
297
+
298
+ if Session_Link_optional !="":
299
+ print('Downloading session...')
300
+
301
+ if Session_Link_optional != "":
302
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
303
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
304
+ time.sleep(1)
305
+ os.chdir(WORKSPACE+'/Sessions')
306
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
307
+ os.chdir(Session_Name)
308
+ call("rm -r " +instance_images, shell=True)
309
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
310
+ call("rm -r " +concept_images, shell=True)
311
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
312
+ call("rm -r " +captions, shell=True)
313
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
314
+ os.chdir('/notebooks')
315
+ clear_output()
316
+
317
+ INSTANCE_NAME=Session_Name
318
+ OUTPUT_DIR="/models/"+Session_Name
319
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
320
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
321
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
322
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
323
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
324
+ resume=False
325
+
326
+ if os.path.exists(str(SESSION_DIR)):
327
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
328
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
329
+
330
+ def f(n):
331
+ k=0
332
+ for i in mdls:
333
+ if k==n:
334
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
335
+ k=k+1
336
+
337
+ k=0
338
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
339
+
340
+ for i in mdls:
341
+ print(str(k)+'- '+i)
342
+ k=k+1
343
+ n=input()
344
+ while int(n)>k-1:
345
+ n=input()
346
+ if n!="000":
347
+ f(int(n))
348
+ print('Using the model '+ mdls[int(n)]+" ...")
349
+ time.sleep(4)
350
+ clear_output()
351
+ else:
352
+ print('Skipping the intermediary checkpoints.')
353
+
354
+
355
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
356
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
357
+ if MODEL_NAME=="":
358
+ print('No model found, use the "Model Download" cell to download a model.')
359
+ else:
360
+ print('Session Loaded, proceed to uploading instance images')
361
+
362
+ elif os.path.exists(MDLPTH):
363
+ print('Session found, loading the trained model ...')
364
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
365
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MDLPTH+' --dump_path '+OUTPUT_DIR+' --original_config_file config.yaml', shell=True)
366
+ clear_output()
367
+
368
+ call('rm config.yaml', shell=True)
369
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
370
+ resume=True
371
+ clear_output()
372
+ print('Session loaded.')
373
+ else:
374
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
375
+
376
+ elif not os.path.exists(str(SESSION_DIR)):
377
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
378
+ print('Creating session...')
379
+ if MODEL_NAME=="":
380
+ print('No model found, use the "Model Download" cell to download a model.')
381
+ else:
382
+ print('Session created, proceed to uploading instance images')
383
+
384
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume
385
+
386
+
387
+
388
+ def done():
389
+ done = widgets.Button(
390
+ description='Done!',
391
+ disabled=True,
392
+ button_style='success',
393
+ tooltip='',
394
+ icon='check'
395
+ )
396
+ display(done)
397
+
398
+
399
+
400
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
401
+
402
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
403
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
404
+
405
+
406
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
407
+ Upload = widgets.Button(
408
+ description='Upload',
409
+ disabled=False,
410
+ button_style='info',
411
+ tooltip='Click to upload the chosen instance images',
412
+ icon=''
413
+ )
414
+
415
+
416
+ def up(Upload):
417
+ with out:
418
+ uploader.close()
419
+ Upload.close()
420
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
421
+ done()
422
+ out=widgets.Output()
423
+
424
+ if IMAGES_FOLDER_OPTIONAL=="":
425
+ Upload.on_click(up)
426
+ display(uploader, Upload, out)
427
+ else:
428
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
429
+ done()
430
+
431
+
432
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
433
+
434
+
435
+ if Remove_existing_instance_images:
436
+ if os.path.exists(str(INSTANCE_DIR)):
437
+ call("rm -r " +INSTANCE_DIR, shell=True)
438
+ if os.path.exists(str(CAPTIONS_DIR)):
439
+ call("rm -r " +CAPTIONS_DIR, shell=True)
440
+
441
+
442
+ if not os.path.exists(str(INSTANCE_DIR)):
443
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
444
+ if not os.path.exists(str(CAPTIONS_DIR)):
445
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
446
+
447
+
448
+ if IMAGES_FOLDER_OPTIONAL !="":
449
+
450
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
451
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
452
+
453
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
454
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
455
+ if Crop_images:
456
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
457
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
458
+ os.chdir('/notebooks')
459
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
460
+ extension = filename.split(".")[-1]
461
+ identifier=filename.split(".")[0]
462
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
463
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
464
+ file=file.convert("RGB")
465
+ file=ImageOps.exif_transpose(file)
466
+ width, height = file.size
467
+ if file.size !=(Crop_size, Crop_size):
468
+ image=crop_image(file, Crop_size)
469
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
470
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
471
+ else:
472
+ image[0].save(new_path_with_file, format=extension.upper())
473
+
474
+ else:
475
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
476
+
477
+ else:
478
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
479
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
480
+
481
+ elif IMAGES_FOLDER_OPTIONAL =="":
482
+ up=""
483
+ for file in uploader.value:
484
+ filename = file['name']
485
+ if filename.split(".")[-1]=="txt":
486
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
487
+ f.write(bytes(file['content']).decode())
488
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
489
+ if Crop_images:
490
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
491
+ filename = file['name']
492
+ img = Image.open(io.BytesIO(file['content']))
493
+ extension = filename.split(".")[-1]
494
+ identifier=filename.split(".")[0]
495
+ img=img.convert("RGB")
496
+ img=ImageOps.exif_transpose(img)
497
+
498
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
499
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
500
+ else:
501
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
502
+
503
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
504
+ file = Image.open(new_path_with_file)
505
+ width, height = file.size
506
+ if file.size !=(Crop_size, Crop_size):
507
+ image=crop_image(file, Crop_size)
508
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
509
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
510
+ else:
511
+ image[0].save(new_path_with_file, format=extension.upper())
512
+
513
+ else:
514
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
515
+ filename = file['name']
516
+ img = Image.open(io.BytesIO(file['content']))
517
+ img=img.convert("RGB")
518
+ extension = filename.split(".")[-1]
519
+ identifier=filename.split(".")[0]
520
+
521
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
522
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
523
+ else:
524
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
525
+
526
+ if ren:
527
+ i=0
528
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
529
+ extension = filename.split(".")[-1]
530
+ identifier=filename.split(".")[0]
531
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
532
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
533
+ i=i+1
534
+
535
+ os.chdir(INSTANCE_DIR)
536
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
537
+ os.chdir(CAPTIONS_DIR)
538
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
539
+ os.chdir('/notebooks')
540
+
541
+
542
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
543
+
544
+ paths=""
545
+ out=""
546
+ widgets_l=""
547
+ clear_output()
548
+ def Caption(path):
549
+ if path!="Select an instance image to caption":
550
+
551
+ name = os.path.splitext(os.path.basename(path))[0]
552
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
553
+ if ext=="jpg" or "JPG":
554
+ ext="JPEG"
555
+
556
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
557
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
558
+ text = f.read()
559
+ else:
560
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
561
+ f.write("")
562
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
563
+ text = f.read()
564
+
565
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
566
+ img=img.convert("RGB")
567
+ img=img.resize((420, 420))
568
+ image_bytes = BytesIO()
569
+ img.save(image_bytes, format=ext, qualiy=10)
570
+ image_bytes.seek(0)
571
+ image_data = image_bytes.read()
572
+ img= image_data
573
+ image = widgets.Image(
574
+ value=img,
575
+ width=420,
576
+ height=420
577
+ )
578
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
579
+
580
+
581
+ def update_text(text):
582
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
583
+ f.write(text)
584
+
585
+ button = widgets.Button(description='Save', button_style='success')
586
+ button.on_click(lambda b: update_text(text_area.value))
587
+
588
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
589
+
590
+
591
+ paths = os.listdir(INSTANCE_DIR)
592
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
593
+
594
+
595
+ out = widgets.Output()
596
+
597
+ def click(change):
598
+ with out:
599
+ out.clear_output()
600
+ display(Caption(change.new))
601
+
602
+ widgets_l.observe(click, names='value')
603
+ display(widgets.HBox([widgets_l, out]))
604
+
605
+
606
+
607
+ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
608
+
609
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
610
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
611
+ if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
612
+ call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
613
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
614
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
615
+
616
+ if resume and not Resume_Training:
617
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resume the training of the previous model?  yes or no ?')
618
+ while True:
619
+ ansres=input('')
620
+ if ansres=='no':
621
+ Resume_Training = True
622
+ resume= False
623
+ break
624
+ elif ansres=='yes':
625
+ Resume_Training = False
626
+ resume= False
627
+ break
628
+
629
+ while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
630
+ print('No model found, use the "Model Download" cell to download a model.')
631
+ time.sleep(5)
632
+
633
+ MODELT_NAME=MODEL_NAME
634
+
635
+ Seed=random.randint(1, 999999)
636
+
637
+ ofstnse=""
638
+ if Offset_Noise:
639
+ ofstnse="--offset_noise"
640
+
641
+ extrnlcptn=""
642
+ if External_Captions:
643
+ extrnlcptn="--external_captions"
644
+
645
+ precision="fp16"
646
+
647
+
648
+ resuming=""
649
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
650
+ MODELT_NAME=OUTPUT_DIR
651
+ print('Resuming Training...')
652
+ resuming="Yes"
653
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
654
+ print('Previous model not found, training a new model...')
655
+ MODELT_NAME=MODEL_NAME
656
+ while MODEL_NAME=="":
657
+ print('No model found, use the "Model Download" cell to download a model.')
658
+ time.sleep(5)
659
+
660
+
661
+ trnonltxt=""
662
+ if UNet_Training_Steps==0:
663
+ trnonltxt="--train_only_text_encoder"
664
+
665
+ Enable_text_encoder_training= True
666
+ Enable_Text_Encoder_Concept_Training= True
667
+
668
+
669
+ if Text_Encoder_Training_Steps==0:
670
+ Enable_text_encoder_training= False
671
+ else:
672
+ stptxt=Text_Encoder_Training_Steps
673
+
674
+ if Text_Encoder_Concept_Training_Steps==0:
675
+ Enable_Text_Encoder_Concept_Training= False
676
+ else:
677
+ stptxtc=Text_Encoder_Concept_Training_Steps
678
+
679
+
680
+ if Save_Checkpoint_Every==None:
681
+ Save_Checkpoint_Every=1
682
+ stp=0
683
+ if Start_saving_from_the_step==None:
684
+ Start_saving_from_the_step=0
685
+ if (Start_saving_from_the_step < 200):
686
+ Start_saving_from_the_step=Save_Checkpoint_Every
687
+ stpsv=Start_saving_from_the_step
688
+ if Save_Checkpoint_Every_n_Steps:
689
+ stp=Save_Checkpoint_Every
690
+
691
+
692
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
693
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
694
+ '+trnonltxt+' \
695
+ '+extrnlcptn+' \
696
+ '+ofstnse+' \
697
+ --train_text_encoder \
698
+ --image_captions_filename \
699
+ --dump_only_text_encoder \
700
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
701
+ --instance_data_dir='+INSTANCE_DIR+' \
702
+ --output_dir='+OUTPUT_DIR+' \
703
+ --captions_dir='+CAPTIONS_DIR+' \
704
+ --instance_prompt='+PT+' \
705
+ --seed='+str(Seed)+' \
706
+ --resolution='+str(Resolution)+' \
707
+ --mixed_precision='+str(precision)+' \
708
+ --train_batch_size=1 \
709
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
710
+ --use_8bit_adam \
711
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
712
+ --lr_scheduler="linear" \
713
+ --lr_warmup_steps=0 \
714
+ --max_train_steps='+str(Training_Steps), shell=True)
715
+
716
+
717
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
718
+ clear_output()
719
+ if resuming=="Yes":
720
+ print('Resuming Training...')
721
+ print('Training the UNet...')
722
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
723
+ '+extrnlcptn+' \
724
+ '+ofstnse+' \
725
+ --image_captions_filename \
726
+ --train_only_unet \
727
+ --Session_dir='+SESSION_DIR+' \
728
+ --save_starting_step='+str(stpsv)+' \
729
+ --save_n_steps='+str(stp)+' \
730
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
731
+ --instance_data_dir='+INSTANCE_DIR+' \
732
+ --output_dir='+OUTPUT_DIR+' \
733
+ --captions_dir='+CAPTIONS_DIR+' \
734
+ --instance_prompt='+PT+' \
735
+ --seed='+str(Seed)+' \
736
+ --resolution='+str(Resolution)+' \
737
+ --mixed_precision='+str(precision)+' \
738
+ --train_batch_size=1 \
739
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
740
+ --use_8bit_adam \
741
+ --learning_rate='+str(UNet_Learning_Rate)+' \
742
+ --lr_scheduler="linear" \
743
+ --lr_warmup_steps=0 \
744
+ --max_train_steps='+str(Training_Steps), shell=True)
745
+
746
+ if Enable_text_encoder_training :
747
+ print('Training the text encoder...')
748
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
749
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
750
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
751
+
752
+ if Enable_Text_Encoder_Concept_Training:
753
+ if os.path.exists(CONCEPT_DIR):
754
+ if os.listdir(CONCEPT_DIR)!=[]:
755
+ clear_output()
756
+ if resuming=="Yes":
757
+ print('Resuming Training...')
758
+ print('Training the text encoder on the concept...')
759
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
760
+ else:
761
+ clear_output()
762
+ if resuming=="Yes":
763
+ print('Resuming Training...')
764
+ print('No concept images found, skipping concept training...')
765
+ Text_Encoder_Concept_Training_Steps=0
766
+ time.sleep(8)
767
+ else:
768
+ clear_output()
769
+ if resuming=="Yes":
770
+ print('Resuming Training...')
771
+ print('No concept images found, skipping concept training...')
772
+ Text_Encoder_Concept_Training_Steps=0
773
+ time.sleep(8)
774
+
775
+ if UNet_Training_Steps!=0:
776
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
777
+
778
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
779
+ print('Nothing to do')
780
+ else:
781
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
782
+
783
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
784
+ clear_output()
785
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
786
+ clear_output()
787
+ print("DONE, the CKPT model is in the session's folder")
788
+ else:
789
+ print("Something went wrong")
790
+
791
+ else:
792
+ print("Something went wrong")
793
+
794
+ return resume
795
+
796
+
797
+
798
+ def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
799
+
800
+
801
+ if Previous_Session_Name!="":
802
+ print("Loading a previous session model")
803
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
804
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
805
+
806
+
807
+ while not os.path.exists(path_to_trained_model):
808
+ print("There is no trained model in the previous session")
809
+ time.sleep(5)
810
+
811
+ elif Custom_Path!="":
812
+ print("Loading model from a custom path")
813
+ path_to_trained_model=Custom_Path
814
+
815
+
816
+ while not os.path.exists(path_to_trained_model):
817
+ print("Wrong Path")
818
+ time.sleep(5)
819
+
820
+ else:
821
+ print("Loading the trained model")
822
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
823
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
824
+
825
+
826
+ while not os.path.exists(path_to_trained_model):
827
+ print("There is no trained model in this session")
828
+ time.sleep(5)
829
+
830
+ auth=f"--gradio-auth {User}:{Password}"
831
+ if User =="" or Password=="":
832
+ auth=""
833
+
834
+ os.chdir('/notebooks')
835
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
836
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
837
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
838
+ call('rm sd_mrep.tar.zst', shell=True)
839
+
840
+ os.chdir('/notebooks/sd')
841
+ if not os.path.exists('stable-diffusion-webui'):
842
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
843
+
844
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
845
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
846
+ print('')
847
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
848
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
849
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
850
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
851
+ os.chdir('/notebooks')
852
+ clear_output()
853
+
854
+ call('wget -q -O /notebooks/sd/stable-diffusion-webui/modules/styles.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/styles.py', shell=True)
855
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
856
+
857
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
858
+
859
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
860
+ if line.strip().startswith('self.server_name ='):
861
+ line = f' self.server_name = "{localurl}"\n'
862
+ if line.strip().startswith('self.protocol = "https"'):
863
+ line = ' self.protocol = "https"\n'
864
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
865
+ line = ''
866
+ if line.strip().startswith('else "http"'):
867
+ line = ''
868
+ sys.stdout.write(line)
869
+
870
+
871
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
872
+
873
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
874
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
875
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
876
+
877
+ call("sed -i 's@-> Network | None@@g' /notebooks/sd/stable-diffusion-webui/extensions-builtin/Lora/network.py", shell=True)
878
+ call("sed -i 's@|@or@' /notebooks/sd/stable-diffusion-webui/extensions/adetailer/aaaaaa/helper.py", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
879
+
880
+ call("sed -i 's@\"quicksettings\": OptionInfo(.*@\"quicksettings\": OptionInfo(\"sd_model_checkpoint, sd_vae, CLIP_stop_at_last_layers, inpainting_mask_weight, initial_noise_multiplier\", \"Quicksettings list\"),@' /notebooks/sd/stable-diffusion-webui/modules/shared.py", shell=True)
881
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
882
+ clear_output()
883
+
884
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
885
+
886
+ return configf
887
+
888
+
889
+
890
+ def clean():
891
+
892
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
893
+
894
+ s = widgets.Select(
895
+ options=Sessions,
896
+ rows=5,
897
+ description='',
898
+ disabled=False
899
+ )
900
+
901
+ out=widgets.Output()
902
+
903
+ d = widgets.Button(
904
+ description='Remove',
905
+ disabled=False,
906
+ button_style='warning',
907
+ tooltip='Removet the selected session',
908
+ icon='warning'
909
+ )
910
+
911
+ def rem(d):
912
+ with out:
913
+ if s.value is not None:
914
+ clear_output()
915
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
916
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
917
+ if os.path.exists('/notebooks/models/'+s.value):
918
+ call('rm -r /notebooks/models/'+s.value, shell=True)
919
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
920
+
921
+
922
+ else:
923
+ d.close()
924
+ s.close()
925
+ clear_output()
926
+ print("NOTHING TO REMOVE")
927
+
928
+ d.on_click(rem)
929
+ if s.value is not None:
930
+ display(s,d,out)
931
+ else:
932
+ print("NOTHING TO REMOVE")
933
+
934
+
935
+
936
+ def hf(Name_of_your_concept, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
937
+
938
+ from slugify import slugify
939
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
940
+ from huggingface_hub import create_repo
941
+ from IPython.display import display_markdown
942
+
943
+
944
+ if(Name_of_your_concept == ""):
945
+ Name_of_your_concept = Session_Name
946
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
947
+
948
+
949
+
950
+ if hf_token_write =="":
951
+ print('Your Hugging Face write access token : ')
952
+ hf_token_write=input()
953
+
954
+ hf_token = hf_token_write
955
+
956
+ api = HfApi()
957
+ your_username = api.whoami(token=hf_token)["name"]
958
+
959
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
960
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
961
+
962
+ def bar(prg):
963
+ clear_output()
964
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
965
+ return br
966
+
967
+ print("Loading...")
968
+
969
+ os.chdir(OUTPUT_DIR)
970
+ call('rm -r safety_checker feature_extractor .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
971
+ call('rm model_index.json', shell=True)
972
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
973
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
974
+ call('git remote add -f origin https://huggingface.co/runwayml/stable-diffusion-v1-5', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
975
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
976
+ call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
977
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
978
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
979
+ os.chdir('/notebooks')
980
+
981
+ print(bar(1))
982
+
983
+ readme_text = f'''---
984
+ license: creativeml-openrail-m
985
+ tags:
986
+ - text-to-image
987
+ - stable-diffusion
988
+ ---
989
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
990
+
991
+ '''
992
+ #Save the readme to a file
993
+ readme_file = open("README.md", "w")
994
+ readme_file.write(readme_text)
995
+ readme_file.close()
996
+
997
+ operations = [
998
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
999
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
1000
+
1001
+ ]
1002
+ create_repo(repo_id,private=True, token=hf_token)
1003
+
1004
+ api.create_commit(
1005
+ repo_id=repo_id,
1006
+ operations=operations,
1007
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1008
+ token=hf_token
1009
+ )
1010
+
1011
+ api.upload_folder(
1012
+ folder_path=OUTPUT_DIR+"/feature_extractor",
1013
+ path_in_repo="feature_extractor",
1014
+ repo_id=repo_id,
1015
+ token=hf_token
1016
+ )
1017
+
1018
+ print(bar(4))
1019
+
1020
+ api.upload_folder(
1021
+ folder_path=OUTPUT_DIR+"/safety_checker",
1022
+ path_in_repo="safety_checker",
1023
+ repo_id=repo_id,
1024
+ token=hf_token
1025
+ )
1026
+
1027
+ print(bar(8))
1028
+
1029
+ api.upload_folder(
1030
+ folder_path=OUTPUT_DIR+"/scheduler",
1031
+ path_in_repo="scheduler",
1032
+ repo_id=repo_id,
1033
+ token=hf_token
1034
+ )
1035
+
1036
+ print(bar(9))
1037
+
1038
+ api.upload_folder(
1039
+ folder_path=OUTPUT_DIR+"/text_encoder",
1040
+ path_in_repo="text_encoder",
1041
+ repo_id=repo_id,
1042
+ token=hf_token
1043
+ )
1044
+
1045
+ print(bar(12))
1046
+
1047
+ api.upload_folder(
1048
+ folder_path=OUTPUT_DIR+"/tokenizer",
1049
+ path_in_repo="tokenizer",
1050
+ repo_id=repo_id,
1051
+ token=hf_token
1052
+ )
1053
+
1054
+ print(bar(13))
1055
+
1056
+ api.upload_folder(
1057
+ folder_path=OUTPUT_DIR+"/unet",
1058
+ path_in_repo="unet",
1059
+ repo_id=repo_id,
1060
+ token=hf_token
1061
+ )
1062
+
1063
+ print(bar(21))
1064
+
1065
+ api.upload_folder(
1066
+ folder_path=OUTPUT_DIR+"/vae",
1067
+ path_in_repo="vae",
1068
+ repo_id=repo_id,
1069
+ token=hf_token
1070
+ )
1071
+
1072
+ print(bar(23))
1073
+
1074
+ api.upload_file(
1075
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1076
+ path_in_repo="model_index.json",
1077
+ repo_id=repo_id,
1078
+ token=hf_token
1079
+ )
1080
+
1081
+ print(bar(25))
1082
+
1083
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1084
+ done()
1085
+
1086
+
1087
+
1088
+ def crop_image(im, size):
1089
+
1090
+ GREEN = "#0F0"
1091
+ BLUE = "#00F"
1092
+ RED = "#F00"
1093
+
1094
+ def focal_point(im, settings):
1095
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1096
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1097
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1098
+
1099
+ pois = []
1100
+
1101
+ weight_pref_total = 0
1102
+ if len(corner_points) > 0:
1103
+ weight_pref_total += settings.corner_points_weight
1104
+ if len(entropy_points) > 0:
1105
+ weight_pref_total += settings.entropy_points_weight
1106
+ if len(face_points) > 0:
1107
+ weight_pref_total += settings.face_points_weight
1108
+
1109
+ corner_centroid = None
1110
+ if len(corner_points) > 0:
1111
+ corner_centroid = centroid(corner_points)
1112
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1113
+ pois.append(corner_centroid)
1114
+
1115
+ entropy_centroid = None
1116
+ if len(entropy_points) > 0:
1117
+ entropy_centroid = centroid(entropy_points)
1118
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1119
+ pois.append(entropy_centroid)
1120
+
1121
+ face_centroid = None
1122
+ if len(face_points) > 0:
1123
+ face_centroid = centroid(face_points)
1124
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1125
+ pois.append(face_centroid)
1126
+
1127
+ average_point = poi_average(pois, settings)
1128
+
1129
+ return average_point
1130
+
1131
+
1132
+ def image_face_points(im, settings):
1133
+
1134
+ np_im = np.array(im)
1135
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1136
+
1137
+ tries = [
1138
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1139
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1140
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1141
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1142
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1143
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1144
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1145
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1146
+ ]
1147
+ for t in tries:
1148
+ classifier = cv2.CascadeClassifier(t[0])
1149
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1150
+ try:
1151
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1152
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1153
+ except:
1154
+ continue
1155
+
1156
+ if len(faces) > 0:
1157
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1158
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1159
+ return []
1160
+
1161
+
1162
+ def image_corner_points(im, settings):
1163
+ grayscale = im.convert("L")
1164
+
1165
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
1166
+ gd = ImageDraw.Draw(grayscale)
1167
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1168
+
1169
+ np_im = np.array(grayscale)
1170
+
1171
+ points = cv2.goodFeaturesToTrack(
1172
+ np_im,
1173
+ maxCorners=100,
1174
+ qualityLevel=0.04,
1175
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1176
+ useHarrisDetector=False,
1177
+ )
1178
+
1179
+ if points is None:
1180
+ return []
1181
+
1182
+ focal_points = []
1183
+ for point in points:
1184
+ x, y = point.ravel()
1185
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1186
+
1187
+ return focal_points
1188
+
1189
+
1190
+ def image_entropy_points(im, settings):
1191
+ landscape = im.height < im.width
1192
+ portrait = im.height > im.width
1193
+ if landscape:
1194
+ move_idx = [0, 2]
1195
+ move_max = im.size[0]
1196
+ elif portrait:
1197
+ move_idx = [1, 3]
1198
+ move_max = im.size[1]
1199
+ else:
1200
+ return []
1201
+
1202
+ e_max = 0
1203
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1204
+ crop_best = crop_current
1205
+ while crop_current[move_idx[1]] < move_max:
1206
+ crop = im.crop(tuple(crop_current))
1207
+ e = image_entropy(crop)
1208
+
1209
+ if (e > e_max):
1210
+ e_max = e
1211
+ crop_best = list(crop_current)
1212
+
1213
+ crop_current[move_idx[0]] += 4
1214
+ crop_current[move_idx[1]] += 4
1215
+
1216
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1217
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1218
+
1219
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1220
+
1221
+
1222
+ def image_entropy(im):
1223
+ # greyscale image entropy
1224
+ # band = np.asarray(im.convert("L"))
1225
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1226
+ hist, _ = np.histogram(band, bins=range(0, 256))
1227
+ hist = hist[hist > 0]
1228
+ return -np.log2(hist / hist.sum()).sum()
1229
+
1230
+ def centroid(pois):
1231
+ x = [poi.x for poi in pois]
1232
+ y = [poi.y for poi in pois]
1233
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1234
+
1235
+
1236
+ def poi_average(pois, settings):
1237
+ weight = 0.0
1238
+ x = 0.0
1239
+ y = 0.0
1240
+ for poi in pois:
1241
+ weight += poi.weight
1242
+ x += poi.x * poi.weight
1243
+ y += poi.y * poi.weight
1244
+ avg_x = round(weight and x / weight)
1245
+ avg_y = round(weight and y / weight)
1246
+
1247
+ return PointOfInterest(avg_x, avg_y)
1248
+
1249
+
1250
+ def is_landscape(w, h):
1251
+ return w > h
1252
+
1253
+
1254
+ def is_portrait(w, h):
1255
+ return h > w
1256
+
1257
+
1258
+ def is_square(w, h):
1259
+ return w == h
1260
+
1261
+
1262
+ class PointOfInterest:
1263
+ def __init__(self, x, y, weight=1.0, size=10):
1264
+ self.x = x
1265
+ self.y = y
1266
+ self.weight = weight
1267
+ self.size = size
1268
+
1269
+ def bounding(self, size):
1270
+ return [
1271
+ self.x - size//2,
1272
+ self.y - size//2,
1273
+ self.x + size//2,
1274
+ self.y + size//2
1275
+ ]
1276
+
1277
+ class Settings:
1278
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1279
+ self.crop_width = crop_width
1280
+ self.crop_height = crop_height
1281
+ self.corner_points_weight = corner_points_weight
1282
+ self.entropy_points_weight = entropy_points_weight
1283
+ self.face_points_weight = face_points_weight
1284
+
1285
+ settings = Settings(
1286
+ crop_width = size,
1287
+ crop_height = size,
1288
+ face_points_weight = 0.9,
1289
+ entropy_points_weight = 0.15,
1290
+ corner_points_weight = 0.5,
1291
+ )
1292
+
1293
+ scale_by = 1
1294
+ if is_landscape(im.width, im.height):
1295
+ scale_by = settings.crop_height / im.height
1296
+ elif is_portrait(im.width, im.height):
1297
+ scale_by = settings.crop_width / im.width
1298
+ elif is_square(im.width, im.height):
1299
+ if is_square(settings.crop_width, settings.crop_height):
1300
+ scale_by = settings.crop_width / im.width
1301
+ elif is_landscape(settings.crop_width, settings.crop_height):
1302
+ scale_by = settings.crop_width / im.width
1303
+ elif is_portrait(settings.crop_width, settings.crop_height):
1304
+ scale_by = settings.crop_height / im.height
1305
+
1306
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1307
+ im_debug = im.copy()
1308
+
1309
+ focus = focal_point(im_debug, settings)
1310
+
1311
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1312
+ # point but then get adjusted back into the frame
1313
+ y_half = int(settings.crop_height / 2)
1314
+ x_half = int(settings.crop_width / 2)
1315
+
1316
+ x1 = focus.x - x_half
1317
+ if x1 < 0:
1318
+ x1 = 0
1319
+ elif x1 + settings.crop_width > im.width:
1320
+ x1 = im.width - settings.crop_width
1321
+
1322
+ y1 = focus.y - y_half
1323
+ if y1 < 0:
1324
+ y1 = 0
1325
+ elif y1 + settings.crop_height > im.height:
1326
+ y1 = im.height - settings.crop_height
1327
+
1328
+ x2 = x1 + settings.crop_width
1329
+ y2 = y1 + settings.crop_height
1330
+
1331
+ crop = [x1, y1, x2, y2]
1332
+
1333
+ results = []
1334
+
1335
+ results.append(im.crop(tuple(crop)))
1336
+
1337
+ return results
scripts/mainpaperspacev1_311.py ADDED
@@ -0,0 +1,1330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+ import six
23
+ import re
24
+
25
+ from urllib.parse import urlparse, parse_qs, unquote
26
+ from urllib.request import urlopen, Request
27
+ import tempfile
28
+ from tqdm import tqdm
29
+
30
+
31
+
32
+
33
+ def Deps(force_reinstall):
34
+
35
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
36
+ ntbk()
37
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
38
+ os.environ['PYTHONWARNINGS'] = 'ignore'
39
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
40
+ print('Modules and notebooks updated, dependencies already installed')
41
+
42
+ else:
43
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
44
+ call("pip uninstall -qq deepspeed -y", shell=True, stdout=open('/dev/null', 'w'))
45
+ ntbk()
46
+ if not os.path.exists('/models'):
47
+ call('mkdir /models', shell=True)
48
+ if not os.path.exists('/notebooks/models'):
49
+ call('ln -s /models /notebooks', shell=True)
50
+ if os.path.exists('/deps'):
51
+ call("rm -r /deps", shell=True)
52
+ call('mkdir /deps', shell=True)
53
+ if not os.path.exists('cache'):
54
+ call('mkdir cache', shell=True)
55
+ os.chdir('/deps')
56
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps_311.txt', shell=True)
57
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
58
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
59
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
60
+ os.chdir('/notebooks')
61
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
62
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
63
+ os.environ['PYTHONWARNINGS'] = 'ignore'
64
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
65
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
66
+ if not os.path.exists('/notebooks/diffusers'):
67
+ call('ln -s /diffusers /notebooks', shell=True)
68
+ call("rm -r /deps", shell=True)
69
+ os.chdir('/notebooks')
70
+ clear_output()
71
+
72
+ done()
73
+
74
+
75
+
76
+ def depsinst(url, dst):
77
+ file_size = None
78
+ req = Request(url, headers={"User-Agent": "torch.hub"})
79
+ u = urlopen(req)
80
+ meta = u.info()
81
+ if hasattr(meta, 'getheaders'):
82
+ content_length = meta.getheaders("Content-Length")
83
+ else:
84
+ content_length = meta.get_all("Content-Length")
85
+ if content_length is not None and len(content_length) > 0:
86
+ file_size = int(content_length[0])
87
+
88
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
89
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
90
+ with open(dst, "wb") as f:
91
+ while True:
92
+ buffer = u.read(8192)
93
+ if len(buffer) == 0:
94
+ break
95
+ f.write(buffer)
96
+ pbar.update(len(buffer))
97
+ f.close()
98
+
99
+
100
+ def ntbk():
101
+
102
+ os.chdir('/notebooks')
103
+ if not os.path.exists('Latest_Notebooks'):
104
+ call('mkdir Latest_Notebooks', shell=True)
105
+ else:
106
+ call('rm -r Latest_Notebooks', shell=True)
107
+ call('mkdir Latest_Notebooks', shell=True)
108
+ os.chdir('/notebooks/Latest_Notebooks')
109
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
110
+ call('rm Notebooks.txt', shell=True)
111
+ os.chdir('/notebooks')
112
+
113
+
114
+
115
+
116
+ def downloadmodel_hf(Path_to_HuggingFace):
117
+ import wget
118
+
119
+ if os.path.exists('/models/stable-diffusion-custom'):
120
+ call("rm -r /models/stable-diffusion-custom", shell=True)
121
+ clear_output()
122
+
123
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
124
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
125
+ token = f.read()
126
+ authe=f'https://USER:{token}@'
127
+ else:
128
+ authe="https://"
129
+
130
+ clear_output()
131
+ call("mkdir /models/stable-diffusion-custom", shell=True)
132
+ os.chdir("/models/stable-diffusion-custom")
133
+ call("git init", shell=True)
134
+ call("git lfs install --system --skip-repo", shell=True)
135
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
136
+ call("git config core.sparsecheckout true", shell=True)
137
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
138
+ call("git pull origin main", shell=True)
139
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
140
+ call("rm -r .git", shell=True)
141
+ call("rm model_index.json", shell=True)
142
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')
143
+ os.chdir('/notebooks')
144
+ clear_output()
145
+ done()
146
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
147
+ print('Check the link you provided')
148
+ os.chdir('/notebooks')
149
+ time.sleep(5)
150
+
151
+
152
+
153
+
154
+ def downloadmodel_path(MODEL_PATH):
155
+
156
+ modelname=os.path.basename(MODEL_PATH)
157
+ sftnsr=""
158
+ if modelname.split('.')[-1]=='safetensors':
159
+ sftnsr="--from_safetensors"
160
+
161
+ import wget
162
+ os.chdir('/notebooks')
163
+ clear_output()
164
+ if os.path.exists(str(MODEL_PATH)):
165
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
166
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
167
+ clear_output()
168
+ call('rm config.yaml', shell=True)
169
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
170
+ clear_output()
171
+ done()
172
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
173
+ print('Conversion error')
174
+ time.sleep(5)
175
+
176
+ else:
177
+ while not os.path.exists(str(MODEL_PATH)):
178
+ print('Wrong path, use the file explorer to copy the path')
179
+ time.sleep(5)
180
+
181
+
182
+
183
+
184
+ def downloadmodel_link(MODEL_LINK):
185
+
186
+ import wget
187
+ import gdown
188
+ from gdown.download import get_url_from_gdrive_confirmation
189
+
190
+
191
+ def getsrc(url):
192
+ parsed_url = urlparse(url)
193
+ if parsed_url.netloc == 'civitai.com':
194
+ src='civitai'
195
+ elif parsed_url.netloc == 'drive.google.com':
196
+ src='gdrive'
197
+ elif parsed_url.netloc == 'huggingface.co':
198
+ src='huggingface'
199
+ else:
200
+ src='others'
201
+ return src
202
+
203
+ src=getsrc(MODEL_LINK)
204
+
205
+ def get_name(url, gdrive):
206
+ if not gdrive:
207
+ response = requests.get(url, allow_redirects=False)
208
+ if "Location" in response.headers:
209
+ redirected_url = response.headers["Location"]
210
+ quer = parse_qs(urlparse(redirected_url).query)
211
+ if "response-content-disposition" in quer:
212
+ disp_val = quer["response-content-disposition"][0].split(";")
213
+ for vals in disp_val:
214
+ if vals.strip().startswith("filename="):
215
+ filenm=unquote(vals.split("=", 1)[1].strip())
216
+ return filenm.replace("\"","")
217
+ else:
218
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
219
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
220
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
221
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
222
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
223
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
224
+ return filenm
225
+
226
+ if src=='civitai':
227
+ modelname=get_name(MODEL_LINK, False)
228
+ elif src=='gdrive':
229
+ modelname=get_name(MODEL_LINK, True)
230
+ else:
231
+ modelname=os.path.basename(MODEL_LINK)
232
+
233
+ sftnsr=""
234
+ if modelname.split('.')[-1]!='safetensors':
235
+ modelnm="model.ckpt"
236
+ else:
237
+ modelnm="model.safetensors"
238
+ sftnsr="--from_safetensors"
239
+
240
+ os.chdir('/notebooks')
241
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
242
+
243
+ if os.path.exists(modelnm):
244
+ if os.path.getsize(modelnm) > 1810671599:
245
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
246
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelnm+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
247
+ clear_output()
248
+ call('rm config.yaml', shell=True)
249
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
250
+ call('rm '+modelnm, shell=True)
251
+ clear_output()
252
+ done()
253
+ else:
254
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
255
+ print('Conversion error')
256
+ time.sleep(5)
257
+ else:
258
+ while os.path.getsize(modelnm) < 1810671599:
259
+ print('Wrong link, check that the link is valid')
260
+ time.sleep(5)
261
+
262
+
263
+
264
+
265
+ def dls(Path_to_HuggingFace, Model_Path, Model_Link):
266
+
267
+ if Path_to_HuggingFace != "":
268
+ downloadmodel_hf(Path_to_HuggingFace)
269
+ MODEL_NAME="/models/stable-diffusion-custom"
270
+ elif Model_Path !="":
271
+ downloadmodel_path(Model_Path)
272
+ MODEL_NAME="/models/stable-diffusion-custom"
273
+ elif Model_Link !="":
274
+ downloadmodel_link(Model_Link)
275
+ MODEL_NAME="/models/stable-diffusion-custom"
276
+ else:
277
+ MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
278
+ print('Using the original V1.5 model')
279
+
280
+ return MODEL_NAME
281
+
282
+
283
+
284
+ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
285
+ import wget, gdown
286
+ os.chdir('/notebooks')
287
+ PT=""
288
+
289
+ while Session_Name=="":
290
+ print('Input the Session Name:')
291
+ Session_Name=input("")
292
+ Session_Name=Session_Name.replace(" ","_")
293
+
294
+ WORKSPACE='/notebooks/Fast-Dreambooth'
295
+
296
+ if Session_Link_optional !="":
297
+ print('Downloading session...')
298
+
299
+ if Session_Link_optional != "":
300
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
301
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
302
+ time.sleep(1)
303
+ os.chdir(WORKSPACE+'/Sessions')
304
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
305
+ os.chdir(Session_Name)
306
+ call("rm -r " +instance_images, shell=True)
307
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
308
+ call("rm -r " +concept_images, shell=True)
309
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
310
+ call("rm -r " +captions, shell=True)
311
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
312
+ os.chdir('/notebooks')
313
+ clear_output()
314
+
315
+ INSTANCE_NAME=Session_Name
316
+ OUTPUT_DIR="/models/"+Session_Name
317
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
318
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
319
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
320
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
321
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
322
+ resume=False
323
+
324
+ if os.path.exists(str(SESSION_DIR)):
325
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
326
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
327
+
328
+ def f(n):
329
+ k=0
330
+ for i in mdls:
331
+ if k==n:
332
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
333
+ k=k+1
334
+
335
+ k=0
336
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
337
+
338
+ for i in mdls:
339
+ print(str(k)+'- '+i)
340
+ k=k+1
341
+ n=input()
342
+ while int(n)>k-1:
343
+ n=input()
344
+ if n!="000":
345
+ f(int(n))
346
+ print('Using the model '+ mdls[int(n)]+" ...")
347
+ time.sleep(4)
348
+ clear_output()
349
+ else:
350
+ print('Skipping the intermediary checkpoints.')
351
+
352
+
353
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
354
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
355
+ if MODEL_NAME=="":
356
+ print('No model found, use the "Model Download" cell to download a model.')
357
+ else:
358
+ print('Session Loaded, proceed to uploading instance images')
359
+
360
+ elif os.path.exists(MDLPTH):
361
+ print('Session found, loading the trained model ...')
362
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
363
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MDLPTH+' --dump_path '+OUTPUT_DIR+' --original_config_file config.yaml', shell=True)
364
+ clear_output()
365
+
366
+ call('rm config.yaml', shell=True)
367
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
368
+ resume=True
369
+ clear_output()
370
+ print('Session loaded.')
371
+ else:
372
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
373
+
374
+ elif not os.path.exists(str(SESSION_DIR)):
375
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
376
+ print('Creating session...')
377
+ if MODEL_NAME=="":
378
+ print('No model found, use the "Model Download" cell to download a model.')
379
+ else:
380
+ print('Session created, proceed to uploading instance images')
381
+
382
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume
383
+
384
+
385
+
386
+ def done():
387
+ done = widgets.Button(
388
+ description='Done!',
389
+ disabled=True,
390
+ button_style='success',
391
+ tooltip='',
392
+ icon='check'
393
+ )
394
+ display(done)
395
+
396
+
397
+
398
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
399
+
400
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
401
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
402
+
403
+
404
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
405
+ Upload = widgets.Button(
406
+ description='Upload',
407
+ disabled=False,
408
+ button_style='info',
409
+ tooltip='Click to upload the chosen instance images',
410
+ icon=''
411
+ )
412
+
413
+
414
+ def up(Upload):
415
+ with out:
416
+ uploader.close()
417
+ Upload.close()
418
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
419
+ done()
420
+ out=widgets.Output()
421
+
422
+ if IMAGES_FOLDER_OPTIONAL=="":
423
+ Upload.on_click(up)
424
+ display(uploader, Upload, out)
425
+ else:
426
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
427
+ done()
428
+
429
+
430
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
431
+
432
+
433
+ if Remove_existing_instance_images:
434
+ if os.path.exists(str(INSTANCE_DIR)):
435
+ call("rm -r " +INSTANCE_DIR, shell=True)
436
+ if os.path.exists(str(CAPTIONS_DIR)):
437
+ call("rm -r " +CAPTIONS_DIR, shell=True)
438
+
439
+
440
+ if not os.path.exists(str(INSTANCE_DIR)):
441
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
442
+ if not os.path.exists(str(CAPTIONS_DIR)):
443
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
444
+
445
+
446
+ if IMAGES_FOLDER_OPTIONAL !="":
447
+
448
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
449
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
450
+
451
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
452
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
453
+ if Crop_images:
454
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
455
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
456
+ os.chdir('/notebooks')
457
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
458
+ extension = filename.split(".")[-1]
459
+ identifier=filename.split(".")[0]
460
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
461
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
462
+ file=file.convert("RGB")
463
+ file=ImageOps.exif_transpose(file)
464
+ width, height = file.size
465
+ if file.size !=(Crop_size, Crop_size):
466
+ image=crop_image(file, Crop_size)
467
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
468
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
469
+ else:
470
+ image[0].save(new_path_with_file, format=extension.upper())
471
+
472
+ else:
473
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
474
+
475
+ else:
476
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
477
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
478
+
479
+ elif IMAGES_FOLDER_OPTIONAL =="":
480
+ up=""
481
+ for file in uploader.value:
482
+ filename = file['name']
483
+ if filename.split(".")[-1]=="txt":
484
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
485
+ f.write(bytes(file['content']).decode())
486
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
487
+ if Crop_images:
488
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
489
+ filename = file['name']
490
+ img = Image.open(io.BytesIO(file['content']))
491
+ extension = filename.split(".")[-1]
492
+ identifier=filename.split(".")[0]
493
+ img=img.convert("RGB")
494
+ img=ImageOps.exif_transpose(img)
495
+
496
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
497
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
498
+ else:
499
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
500
+
501
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
502
+ file = Image.open(new_path_with_file)
503
+ width, height = file.size
504
+ if file.size !=(Crop_size, Crop_size):
505
+ image=crop_image(file, Crop_size)
506
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
507
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
508
+ else:
509
+ image[0].save(new_path_with_file, format=extension.upper())
510
+
511
+ else:
512
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
513
+ filename = file['name']
514
+ img = Image.open(io.BytesIO(file['content']))
515
+ img=img.convert("RGB")
516
+ extension = filename.split(".")[-1]
517
+ identifier=filename.split(".")[0]
518
+
519
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
520
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
521
+ else:
522
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
523
+
524
+ if ren:
525
+ i=0
526
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
527
+ extension = filename.split(".")[-1]
528
+ identifier=filename.split(".")[0]
529
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
530
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
531
+ i=i+1
532
+
533
+ os.chdir(INSTANCE_DIR)
534
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
535
+ os.chdir(CAPTIONS_DIR)
536
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
537
+ os.chdir('/notebooks')
538
+
539
+
540
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
541
+
542
+ paths=""
543
+ out=""
544
+ widgets_l=""
545
+ clear_output()
546
+ def Caption(path):
547
+ if path!="Select an instance image to caption":
548
+
549
+ name = os.path.splitext(os.path.basename(path))[0]
550
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
551
+ if ext=="jpg" or "JPG":
552
+ ext="JPEG"
553
+
554
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
555
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
556
+ text = f.read()
557
+ else:
558
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
559
+ f.write("")
560
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
561
+ text = f.read()
562
+
563
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
564
+ img=img.convert("RGB")
565
+ img=img.resize((420, 420))
566
+ image_bytes = BytesIO()
567
+ img.save(image_bytes, format=ext, qualiy=10)
568
+ image_bytes.seek(0)
569
+ image_data = image_bytes.read()
570
+ img= image_data
571
+ image = widgets.Image(
572
+ value=img,
573
+ width=420,
574
+ height=420
575
+ )
576
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
577
+
578
+
579
+ def update_text(text):
580
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
581
+ f.write(text)
582
+
583
+ button = widgets.Button(description='Save', button_style='success')
584
+ button.on_click(lambda b: update_text(text_area.value))
585
+
586
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
587
+
588
+
589
+ paths = os.listdir(INSTANCE_DIR)
590
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
591
+
592
+
593
+ out = widgets.Output()
594
+
595
+ def click(change):
596
+ with out:
597
+ out.clear_output()
598
+ display(Caption(change.new))
599
+
600
+ widgets_l.observe(click, names='value')
601
+ display(widgets.HBox([widgets_l, out]))
602
+
603
+
604
+
605
+ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
606
+
607
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
608
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
609
+ if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
610
+ call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
611
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
612
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
613
+
614
+ if resume and not Resume_Training:
615
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resume the training of the previous model? yes or no ?')
616
+ while True:
617
+ ansres=input('')
618
+ if ansres=='no':
619
+ Resume_Training = True
620
+ resume= False
621
+ break
622
+ elif ansres=='yes':
623
+ Resume_Training = False
624
+ resume= False
625
+ break
626
+
627
+ while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
628
+ print('No model found, use the "Model Download" cell to download a model.')
629
+ time.sleep(5)
630
+
631
+ MODELT_NAME=MODEL_NAME
632
+
633
+ Seed=random.randint(1, 999999)
634
+
635
+ ofstnse=""
636
+ if Offset_Noise:
637
+ ofstnse="--offset_noise"
638
+
639
+ extrnlcptn=""
640
+ if External_Captions:
641
+ extrnlcptn="--external_captions"
642
+
643
+ precision="fp16"
644
+
645
+
646
+ resuming=""
647
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
648
+ MODELT_NAME=OUTPUT_DIR
649
+ print('Resuming Training...')
650
+ resuming="Yes"
651
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
652
+ print('Previous model not found, training a new model...')
653
+ MODELT_NAME=MODEL_NAME
654
+ while MODEL_NAME=="":
655
+ print('No model found, use the "Model Download" cell to download a model.')
656
+ time.sleep(5)
657
+
658
+
659
+ trnonltxt=""
660
+ if UNet_Training_Steps==0:
661
+ trnonltxt="--train_only_text_encoder"
662
+
663
+ Enable_text_encoder_training= True
664
+ Enable_Text_Encoder_Concept_Training= True
665
+
666
+
667
+ if Text_Encoder_Training_Steps==0:
668
+ Enable_text_encoder_training= False
669
+ else:
670
+ stptxt=Text_Encoder_Training_Steps
671
+
672
+ if Text_Encoder_Concept_Training_Steps==0:
673
+ Enable_Text_Encoder_Concept_Training= False
674
+ else:
675
+ stptxtc=Text_Encoder_Concept_Training_Steps
676
+
677
+
678
+ if Save_Checkpoint_Every==None:
679
+ Save_Checkpoint_Every=1
680
+ stp=0
681
+ if Start_saving_from_the_step==None:
682
+ Start_saving_from_the_step=0
683
+ if (Start_saving_from_the_step < 200):
684
+ Start_saving_from_the_step=Save_Checkpoint_Every
685
+ stpsv=Start_saving_from_the_step
686
+ if Save_Checkpoint_Every_n_Steps:
687
+ stp=Save_Checkpoint_Every
688
+
689
+
690
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
691
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
692
+ '+trnonltxt+' \
693
+ '+extrnlcptn+' \
694
+ '+ofstnse+' \
695
+ --train_text_encoder \
696
+ --image_captions_filename \
697
+ --dump_only_text_encoder \
698
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
699
+ --instance_data_dir='+INSTANCE_DIR+' \
700
+ --output_dir='+OUTPUT_DIR+' \
701
+ --captions_dir='+CAPTIONS_DIR+' \
702
+ --instance_prompt='+PT+' \
703
+ --seed='+str(Seed)+' \
704
+ --resolution='+str(Resolution)+' \
705
+ --mixed_precision='+str(precision)+' \
706
+ --train_batch_size=1 \
707
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
708
+ --use_8bit_adam \
709
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
710
+ --lr_scheduler="linear" \
711
+ --lr_warmup_steps=0 \
712
+ --max_train_steps='+str(Training_Steps), shell=True)
713
+
714
+
715
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
716
+ clear_output()
717
+ if resuming=="Yes":
718
+ print('Resuming Training...')
719
+ print('Training the UNet...')
720
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
721
+ '+extrnlcptn+' \
722
+ '+ofstnse+' \
723
+ --image_captions_filename \
724
+ --train_only_unet \
725
+ --Session_dir='+SESSION_DIR+' \
726
+ --save_starting_step='+str(stpsv)+' \
727
+ --save_n_steps='+str(stp)+' \
728
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
729
+ --instance_data_dir='+INSTANCE_DIR+' \
730
+ --output_dir='+OUTPUT_DIR+' \
731
+ --captions_dir='+CAPTIONS_DIR+' \
732
+ --instance_prompt='+PT+' \
733
+ --seed='+str(Seed)+' \
734
+ --resolution='+str(Resolution)+' \
735
+ --mixed_precision='+str(precision)+' \
736
+ --train_batch_size=1 \
737
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
738
+ --use_8bit_adam \
739
+ --learning_rate='+str(UNet_Learning_Rate)+' \
740
+ --lr_scheduler="linear" \
741
+ --lr_warmup_steps=0 \
742
+ --max_train_steps='+str(Training_Steps), shell=True)
743
+
744
+ if Enable_text_encoder_training :
745
+ print('Training the text encoder...')
746
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
747
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
748
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
749
+
750
+ if Enable_Text_Encoder_Concept_Training:
751
+ if os.path.exists(CONCEPT_DIR):
752
+ if os.listdir(CONCEPT_DIR)!=[]:
753
+ clear_output()
754
+ if resuming=="Yes":
755
+ print('Resuming Training...')
756
+ print('Training the text encoder on the concept...')
757
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
758
+ else:
759
+ clear_output()
760
+ if resuming=="Yes":
761
+ print('Resuming Training...')
762
+ print('No concept images found, skipping concept training...')
763
+ Text_Encoder_Concept_Training_Steps=0
764
+ time.sleep(8)
765
+ else:
766
+ clear_output()
767
+ if resuming=="Yes":
768
+ print('Resuming Training...')
769
+ print('No concept images found, skipping concept training...')
770
+ Text_Encoder_Concept_Training_Steps=0
771
+ time.sleep(8)
772
+
773
+ if UNet_Training_Steps!=0:
774
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
775
+
776
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
777
+ print('Nothing to do')
778
+ else:
779
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
780
+
781
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
782
+ clear_output()
783
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
784
+ clear_output()
785
+ print("DONE, the CKPT model is in the session's folder")
786
+ else:
787
+ print("Something went wrong")
788
+
789
+ else:
790
+ print("Something went wrong")
791
+
792
+ return resume
793
+
794
+
795
+
796
+ def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
797
+
798
+
799
+ if Previous_Session_Name!="":
800
+ print("Loading a previous session model")
801
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
802
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
803
+
804
+
805
+ while not os.path.exists(path_to_trained_model):
806
+ print("There is no trained model in the previous session")
807
+ time.sleep(5)
808
+
809
+ elif Custom_Path!="":
810
+ print("Loading model from a custom path")
811
+ path_to_trained_model=Custom_Path
812
+
813
+
814
+ while not os.path.exists(path_to_trained_model):
815
+ print("Wrong Path")
816
+ time.sleep(5)
817
+
818
+ else:
819
+ print("Loading the trained model")
820
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
821
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
822
+
823
+
824
+ while not os.path.exists(path_to_trained_model):
825
+ print("There is no trained model in this session")
826
+ time.sleep(5)
827
+
828
+ auth=f"--gradio-auth {User}:{Password}"
829
+ if User =="" or Password=="":
830
+ auth=""
831
+
832
+ os.chdir('/notebooks')
833
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
834
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
835
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
836
+ call('rm sd_mrep.tar.zst', shell=True)
837
+
838
+ os.chdir('/notebooks/sd')
839
+ if not os.path.exists('stable-diffusion-webui'):
840
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
841
+
842
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
843
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
844
+ print('')
845
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
846
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
847
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
848
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
849
+ os.chdir('/notebooks')
850
+ clear_output()
851
+
852
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
853
+
854
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
855
+
856
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
857
+ if line.strip().startswith('self.server_name ='):
858
+ line = f' self.server_name = "{localurl}"\n'
859
+ if line.strip().startswith('self.protocol = "https"'):
860
+ line = ' self.protocol = "https"\n'
861
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
862
+ line = ''
863
+ if line.strip().startswith('else "http"'):
864
+ line = ''
865
+ sys.stdout.write(line)
866
+
867
+
868
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
869
+
870
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
871
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
872
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
873
+
874
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
875
+ clear_output()
876
+
877
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
878
+
879
+ return configf
880
+
881
+
882
+
883
+ def clean():
884
+
885
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
886
+
887
+ s = widgets.Select(
888
+ options=Sessions,
889
+ rows=5,
890
+ description='',
891
+ disabled=False
892
+ )
893
+
894
+ out=widgets.Output()
895
+
896
+ d = widgets.Button(
897
+ description='Remove',
898
+ disabled=False,
899
+ button_style='warning',
900
+ tooltip='Removet the selected session',
901
+ icon='warning'
902
+ )
903
+
904
+ def rem(d):
905
+ with out:
906
+ if s.value is not None:
907
+ clear_output()
908
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
909
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
910
+ if os.path.exists('/notebooks/models/'+s.value):
911
+ call('rm -r /notebooks/models/'+s.value, shell=True)
912
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
913
+
914
+
915
+ else:
916
+ d.close()
917
+ s.close()
918
+ clear_output()
919
+ print("NOTHING TO REMOVE")
920
+
921
+ d.on_click(rem)
922
+ if s.value is not None:
923
+ display(s,d,out)
924
+ else:
925
+ print("NOTHING TO REMOVE")
926
+
927
+
928
+
929
+ def hf(Name_of_your_concept, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
930
+
931
+ from slugify import slugify
932
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
933
+ from huggingface_hub import create_repo
934
+ from IPython.display import display_markdown
935
+
936
+
937
+ if(Name_of_your_concept == ""):
938
+ Name_of_your_concept = Session_Name
939
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
940
+
941
+
942
+
943
+ if hf_token_write =="":
944
+ print('Your Hugging Face write access token : ')
945
+ hf_token_write=input()
946
+
947
+ hf_token = hf_token_write
948
+
949
+ api = HfApi()
950
+ your_username = api.whoami(token=hf_token)["name"]
951
+
952
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
953
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
954
+
955
+ def bar(prg):
956
+ clear_output()
957
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
958
+ return br
959
+
960
+ print("Loading...")
961
+
962
+ os.chdir(OUTPUT_DIR)
963
+ call('rm -r safety_checker feature_extractor .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
964
+ call('rm model_index.json', shell=True)
965
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
966
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
967
+ call('git remote add -f origin https://huggingface.co/runwayml/stable-diffusion-v1-5', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
968
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
969
+ call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
970
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
971
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
972
+ os.chdir('/notebooks')
973
+
974
+ print(bar(1))
975
+
976
+ readme_text = f'''---
977
+ license: creativeml-openrail-m
978
+ tags:
979
+ - text-to-image
980
+ - stable-diffusion
981
+ ---
982
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
983
+
984
+ '''
985
+ #Save the readme to a file
986
+ readme_file = open("README.md", "w")
987
+ readme_file.write(readme_text)
988
+ readme_file.close()
989
+
990
+ operations = [
991
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
992
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
993
+
994
+ ]
995
+ create_repo(repo_id,private=True, token=hf_token)
996
+
997
+ api.create_commit(
998
+ repo_id=repo_id,
999
+ operations=operations,
1000
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1001
+ token=hf_token
1002
+ )
1003
+
1004
+ api.upload_folder(
1005
+ folder_path=OUTPUT_DIR+"/feature_extractor",
1006
+ path_in_repo="feature_extractor",
1007
+ repo_id=repo_id,
1008
+ token=hf_token
1009
+ )
1010
+
1011
+ print(bar(4))
1012
+
1013
+ api.upload_folder(
1014
+ folder_path=OUTPUT_DIR+"/safety_checker",
1015
+ path_in_repo="safety_checker",
1016
+ repo_id=repo_id,
1017
+ token=hf_token
1018
+ )
1019
+
1020
+ print(bar(8))
1021
+
1022
+ api.upload_folder(
1023
+ folder_path=OUTPUT_DIR+"/scheduler",
1024
+ path_in_repo="scheduler",
1025
+ repo_id=repo_id,
1026
+ token=hf_token
1027
+ )
1028
+
1029
+ print(bar(9))
1030
+
1031
+ api.upload_folder(
1032
+ folder_path=OUTPUT_DIR+"/text_encoder",
1033
+ path_in_repo="text_encoder",
1034
+ repo_id=repo_id,
1035
+ token=hf_token
1036
+ )
1037
+
1038
+ print(bar(12))
1039
+
1040
+ api.upload_folder(
1041
+ folder_path=OUTPUT_DIR+"/tokenizer",
1042
+ path_in_repo="tokenizer",
1043
+ repo_id=repo_id,
1044
+ token=hf_token
1045
+ )
1046
+
1047
+ print(bar(13))
1048
+
1049
+ api.upload_folder(
1050
+ folder_path=OUTPUT_DIR+"/unet",
1051
+ path_in_repo="unet",
1052
+ repo_id=repo_id,
1053
+ token=hf_token
1054
+ )
1055
+
1056
+ print(bar(21))
1057
+
1058
+ api.upload_folder(
1059
+ folder_path=OUTPUT_DIR+"/vae",
1060
+ path_in_repo="vae",
1061
+ repo_id=repo_id,
1062
+ token=hf_token
1063
+ )
1064
+
1065
+ print(bar(23))
1066
+
1067
+ api.upload_file(
1068
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1069
+ path_in_repo="model_index.json",
1070
+ repo_id=repo_id,
1071
+ token=hf_token
1072
+ )
1073
+
1074
+ print(bar(25))
1075
+
1076
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1077
+ done()
1078
+
1079
+
1080
+
1081
+ def crop_image(im, size):
1082
+
1083
+ GREEN = "#0F0"
1084
+ BLUE = "#00F"
1085
+ RED = "#F00"
1086
+
1087
+ def focal_point(im, settings):
1088
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1089
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1090
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1091
+
1092
+ pois = []
1093
+
1094
+ weight_pref_total = 0
1095
+ if len(corner_points) > 0:
1096
+ weight_pref_total += settings.corner_points_weight
1097
+ if len(entropy_points) > 0:
1098
+ weight_pref_total += settings.entropy_points_weight
1099
+ if len(face_points) > 0:
1100
+ weight_pref_total += settings.face_points_weight
1101
+
1102
+ corner_centroid = None
1103
+ if len(corner_points) > 0:
1104
+ corner_centroid = centroid(corner_points)
1105
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1106
+ pois.append(corner_centroid)
1107
+
1108
+ entropy_centroid = None
1109
+ if len(entropy_points) > 0:
1110
+ entropy_centroid = centroid(entropy_points)
1111
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1112
+ pois.append(entropy_centroid)
1113
+
1114
+ face_centroid = None
1115
+ if len(face_points) > 0:
1116
+ face_centroid = centroid(face_points)
1117
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1118
+ pois.append(face_centroid)
1119
+
1120
+ average_point = poi_average(pois, settings)
1121
+
1122
+ return average_point
1123
+
1124
+
1125
+ def image_face_points(im, settings):
1126
+
1127
+ np_im = np.array(im)
1128
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1129
+
1130
+ tries = [
1131
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1132
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1133
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1134
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1135
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1136
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1137
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1138
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1139
+ ]
1140
+ for t in tries:
1141
+ classifier = cv2.CascadeClassifier(t[0])
1142
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1143
+ try:
1144
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1145
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1146
+ except:
1147
+ continue
1148
+
1149
+ if len(faces) > 0:
1150
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1151
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1152
+ return []
1153
+
1154
+
1155
+ def image_corner_points(im, settings):
1156
+ grayscale = im.convert("L")
1157
+
1158
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
1159
+ gd = ImageDraw.Draw(grayscale)
1160
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1161
+
1162
+ np_im = np.array(grayscale)
1163
+
1164
+ points = cv2.goodFeaturesToTrack(
1165
+ np_im,
1166
+ maxCorners=100,
1167
+ qualityLevel=0.04,
1168
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1169
+ useHarrisDetector=False,
1170
+ )
1171
+
1172
+ if points is None:
1173
+ return []
1174
+
1175
+ focal_points = []
1176
+ for point in points:
1177
+ x, y = point.ravel()
1178
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1179
+
1180
+ return focal_points
1181
+
1182
+
1183
+ def image_entropy_points(im, settings):
1184
+ landscape = im.height < im.width
1185
+ portrait = im.height > im.width
1186
+ if landscape:
1187
+ move_idx = [0, 2]
1188
+ move_max = im.size[0]
1189
+ elif portrait:
1190
+ move_idx = [1, 3]
1191
+ move_max = im.size[1]
1192
+ else:
1193
+ return []
1194
+
1195
+ e_max = 0
1196
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1197
+ crop_best = crop_current
1198
+ while crop_current[move_idx[1]] < move_max:
1199
+ crop = im.crop(tuple(crop_current))
1200
+ e = image_entropy(crop)
1201
+
1202
+ if (e > e_max):
1203
+ e_max = e
1204
+ crop_best = list(crop_current)
1205
+
1206
+ crop_current[move_idx[0]] += 4
1207
+ crop_current[move_idx[1]] += 4
1208
+
1209
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1210
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1211
+
1212
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1213
+
1214
+
1215
+ def image_entropy(im):
1216
+ # greyscale image entropy
1217
+ # band = np.asarray(im.convert("L"))
1218
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1219
+ hist, _ = np.histogram(band, bins=range(0, 256))
1220
+ hist = hist[hist > 0]
1221
+ return -np.log2(hist / hist.sum()).sum()
1222
+
1223
+ def centroid(pois):
1224
+ x = [poi.x for poi in pois]
1225
+ y = [poi.y for poi in pois]
1226
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1227
+
1228
+
1229
+ def poi_average(pois, settings):
1230
+ weight = 0.0
1231
+ x = 0.0
1232
+ y = 0.0
1233
+ for poi in pois:
1234
+ weight += poi.weight
1235
+ x += poi.x * poi.weight
1236
+ y += poi.y * poi.weight
1237
+ avg_x = round(weight and x / weight)
1238
+ avg_y = round(weight and y / weight)
1239
+
1240
+ return PointOfInterest(avg_x, avg_y)
1241
+
1242
+
1243
+ def is_landscape(w, h):
1244
+ return w > h
1245
+
1246
+
1247
+ def is_portrait(w, h):
1248
+ return h > w
1249
+
1250
+
1251
+ def is_square(w, h):
1252
+ return w == h
1253
+
1254
+
1255
+ class PointOfInterest:
1256
+ def __init__(self, x, y, weight=1.0, size=10):
1257
+ self.x = x
1258
+ self.y = y
1259
+ self.weight = weight
1260
+ self.size = size
1261
+
1262
+ def bounding(self, size):
1263
+ return [
1264
+ self.x - size//2,
1265
+ self.y - size//2,
1266
+ self.x + size//2,
1267
+ self.y + size//2
1268
+ ]
1269
+
1270
+ class Settings:
1271
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1272
+ self.crop_width = crop_width
1273
+ self.crop_height = crop_height
1274
+ self.corner_points_weight = corner_points_weight
1275
+ self.entropy_points_weight = entropy_points_weight
1276
+ self.face_points_weight = face_points_weight
1277
+
1278
+ settings = Settings(
1279
+ crop_width = size,
1280
+ crop_height = size,
1281
+ face_points_weight = 0.9,
1282
+ entropy_points_weight = 0.15,
1283
+ corner_points_weight = 0.5,
1284
+ )
1285
+
1286
+ scale_by = 1
1287
+ if is_landscape(im.width, im.height):
1288
+ scale_by = settings.crop_height / im.height
1289
+ elif is_portrait(im.width, im.height):
1290
+ scale_by = settings.crop_width / im.width
1291
+ elif is_square(im.width, im.height):
1292
+ if is_square(settings.crop_width, settings.crop_height):
1293
+ scale_by = settings.crop_width / im.width
1294
+ elif is_landscape(settings.crop_width, settings.crop_height):
1295
+ scale_by = settings.crop_width / im.width
1296
+ elif is_portrait(settings.crop_width, settings.crop_height):
1297
+ scale_by = settings.crop_height / im.height
1298
+
1299
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1300
+ im_debug = im.copy()
1301
+
1302
+ focus = focal_point(im_debug, settings)
1303
+
1304
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1305
+ # point but then get adjusted back into the frame
1306
+ y_half = int(settings.crop_height / 2)
1307
+ x_half = int(settings.crop_width / 2)
1308
+
1309
+ x1 = focus.x - x_half
1310
+ if x1 < 0:
1311
+ x1 = 0
1312
+ elif x1 + settings.crop_width > im.width:
1313
+ x1 = im.width - settings.crop_width
1314
+
1315
+ y1 = focus.y - y_half
1316
+ if y1 < 0:
1317
+ y1 = 0
1318
+ elif y1 + settings.crop_height > im.height:
1319
+ y1 = im.height - settings.crop_height
1320
+
1321
+ x2 = x1 + settings.crop_width
1322
+ y2 = y1 + settings.crop_height
1323
+
1324
+ crop = [x1, y1, x2, y2]
1325
+
1326
+ results = []
1327
+
1328
+ results.append(im.crop(tuple(crop)))
1329
+
1330
+ return results
scripts/mainpaperspacev2.py ADDED
@@ -0,0 +1,1354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+ from subprocess import check_output
23
+ import six
24
+ import re
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ from urllib.request import urlopen, Request
28
+ import tempfile
29
+ from tqdm import tqdm
30
+
31
+
32
+
33
+
34
+ def Deps(force_reinstall):
35
+
36
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
37
+ ntbk()
38
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq ./diffusers', shell=True, stdout=open('/dev/null', 'w'))
39
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
40
+ os.environ['PYTHONWARNINGS'] = 'ignore'
41
+ print('Modules and notebooks updated, dependencies already installed')
42
+
43
+ else:
44
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
45
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
46
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
47
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
48
+ ntbk()
49
+ if not os.path.exists('/models'):
50
+ call('mkdir /models', shell=True)
51
+ if not os.path.exists('/notebooks/models'):
52
+ call('ln -s /models /notebooks', shell=True)
53
+ if os.path.exists('/deps'):
54
+ call("rm -r /deps", shell=True)
55
+ call('mkdir /deps', shell=True)
56
+ if not os.path.exists('cache'):
57
+ call('mkdir cache', shell=True)
58
+ os.chdir('/deps')
59
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
60
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
61
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
62
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
63
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
64
+ os.chdir('/notebooks')
65
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
66
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
67
+ os.environ['PYTHONWARNINGS'] = 'ignore'
68
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.9/warnings.py", shell=True)
69
+ if not os.path.exists('/notebooks/diffusers'):
70
+ call('ln -s /diffusers /notebooks', shell=True)
71
+ call("rm -r /deps", shell=True)
72
+ os.chdir('/notebooks')
73
+ clear_output()
74
+
75
+ done()
76
+
77
+
78
+
79
+ def depsinst(url, dst):
80
+ file_size = None
81
+ req = Request(url, headers={"User-Agent": "torch.hub"})
82
+ u = urlopen(req)
83
+ meta = u.info()
84
+ if hasattr(meta, 'getheaders'):
85
+ content_length = meta.getheaders("Content-Length")
86
+ else:
87
+ content_length = meta.get_all("Content-Length")
88
+ if content_length is not None and len(content_length) > 0:
89
+ file_size = int(content_length[0])
90
+
91
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
92
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
93
+ with open(dst, "wb") as f:
94
+ while True:
95
+ buffer = u.read(8192)
96
+ if len(buffer) == 0:
97
+ break
98
+ f.write(buffer)
99
+ pbar.update(len(buffer))
100
+ f.close()
101
+
102
+
103
+ def ntbk():
104
+
105
+ os.chdir('/notebooks')
106
+ if not os.path.exists('Latest_Notebooks'):
107
+ call('mkdir Latest_Notebooks', shell=True)
108
+ else:
109
+ call('rm -r Latest_Notebooks', shell=True)
110
+ call('mkdir Latest_Notebooks', shell=True)
111
+ os.chdir('/notebooks/Latest_Notebooks')
112
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
113
+ call('rm Notebooks.txt', shell=True)
114
+ os.chdir('/notebooks')
115
+
116
+
117
+
118
+ def downloadmodel_hfv2(Path_to_HuggingFace):
119
+ import wget
120
+
121
+ if os.path.exists('/models/stable-diffusion-custom'):
122
+ call("rm -r /models/stable-diffusion-custom", shell=True)
123
+ clear_output()
124
+
125
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
126
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
127
+ token = f.read()
128
+ authe=f'https://USER:{token}@'
129
+ else:
130
+ authe="https://"
131
+
132
+ clear_output()
133
+ call("mkdir /models/stable-diffusion-custom", shell=True)
134
+ os.chdir("/models/stable-diffusion-custom")
135
+ call("git init", shell=True)
136
+ call("git lfs install --system --skip-repo", shell=True)
137
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
138
+ call("git config core.sparsecheckout true", shell=True)
139
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
140
+ call("git pull origin main", shell=True)
141
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
142
+ call("rm -r .git", shell=True)
143
+ os.chdir('/notebooks')
144
+ clear_output()
145
+ done()
146
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
147
+ print('Check the link you provided')
148
+ os.chdir('/notebooks')
149
+ time.sleep(5)
150
+
151
+
152
+
153
+
154
+
155
+ def downloadmodel_path_v2(MODEL_PATH):
156
+
157
+ modelname=os.path.basename(MODEL_PATH)
158
+ sftnsr=""
159
+ if modelname.split('.')[-1]=='safetensors':
160
+ sftnsr="--from_safetensors"
161
+
162
+ import wget
163
+ os.chdir('/models')
164
+ clear_output()
165
+ if os.path.exists(str(MODEL_PATH)):
166
+
167
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
168
+ print('Detecting model version...')
169
+ Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+MODEL_PATH, shell=True).decode('utf-8').replace('\n', '')
170
+ clear_output()
171
+ print(''+Custom_Model_Version+' Detected')
172
+ call('rm det.py', shell=True)
173
+
174
+ if Custom_Model_Version=='V2.1-512px':
175
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
176
+ call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
177
+
178
+ elif Custom_Model_Version=='V2.1-768px':
179
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
180
+ call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
181
+
182
+ call('rm convertodiffv2.py', shell=True)
183
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
184
+ clear_output()
185
+ done()
186
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
187
+ print('Conversion error')
188
+ os.chdir('/workspace')
189
+ time.sleep(5)
190
+ else:
191
+ while not os.path.exists(str(MODEL_PATH)):
192
+ print('Wrong path, use the file explorer to copy the path')
193
+ os.chdir('/workspace')
194
+ time.sleep(5)
195
+
196
+
197
+
198
+
199
+ def downloadmodel_link_v2(MODEL_LINK):
200
+
201
+ import wget
202
+ import gdown
203
+ from gdown.download import get_url_from_gdrive_confirmation
204
+
205
+ def getsrc(url):
206
+ parsed_url = urlparse(url)
207
+ if parsed_url.netloc == 'civitai.com':
208
+ src='civitai'
209
+ elif parsed_url.netloc == 'drive.google.com':
210
+ src='gdrive'
211
+ elif parsed_url.netloc == 'huggingface.co':
212
+ src='huggingface'
213
+ else:
214
+ src='others'
215
+ return src
216
+
217
+ src=getsrc(MODEL_LINK)
218
+
219
+ def get_name(url, gdrive):
220
+ if not gdrive:
221
+ response = requests.get(url, allow_redirects=False)
222
+ if "Location" in response.headers:
223
+ redirected_url = response.headers["Location"]
224
+ quer = parse_qs(urlparse(redirected_url).query)
225
+ if "response-content-disposition" in quer:
226
+ disp_val = quer["response-content-disposition"][0].split(";")
227
+ for vals in disp_val:
228
+ if vals.strip().startswith("filename="):
229
+ filenm=unquote(vals.split("=", 1)[1].strip())
230
+ return filenm.replace("\"","")
231
+ else:
232
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
233
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
234
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
235
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
236
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
237
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
238
+ return filenm
239
+
240
+ if src=='civitai':
241
+ modelname=get_name(MODEL_LINK, False)
242
+ elif src=='gdrive':
243
+ modelname=get_name(MODEL_LINK, True)
244
+ else:
245
+ modelname=os.path.basename(MODEL_LINK)
246
+
247
+ sftnsr=""
248
+ if modelname.split('.')[-1]!='safetensors':
249
+ modelnm="model.ckpt"
250
+ else:
251
+ modelnm="model.safetensors"
252
+ sftnsr="--from_safetensors"
253
+
254
+ os.chdir('/models')
255
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
256
+
257
+ if os.path.exists(modelnm):
258
+ if os.path.getsize(modelnm) > 1810671599:
259
+
260
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
261
+ print('Detecting model version...')
262
+ Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+modelnm, shell=True).decode('utf-8').replace('\n', '')
263
+ clear_output()
264
+ print(''+Custom_Model_Version+' Detected')
265
+ call('rm det.py', shell=True)
266
+
267
+ if Custom_Model_Version=='V2.1-512px':
268
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
269
+ call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
270
+
271
+ elif Custom_Model_Version=='V2.1-768px':
272
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
273
+ call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
274
+ call('rm convertodiffv2.py', shell=True)
275
+
276
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
277
+ call('rm '+modelnm, shell=True)
278
+ os.chdir('/workspace')
279
+ clear_output()
280
+ done()
281
+ else:
282
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
283
+ print('Conversion error')
284
+ os.chdir('/workspace')
285
+ time.sleep(5)
286
+ else:
287
+ while os.path.getsize(modelnm) < 1810671599:
288
+ print('Wrong link, check that the link is valid')
289
+ os.chdir('/workspace')
290
+ time.sleep(5)
291
+
292
+
293
+
294
+
295
+ def dlsv2(Path_to_HuggingFace, Model_Path, Model_Link, Model_Version):
296
+
297
+ if Path_to_HuggingFace != "":
298
+ downloadmodel_hfv2(Path_to_HuggingFace)
299
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
300
+ elif Model_Path !="":
301
+ downloadmodel_path_v2(Model_Path)
302
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
303
+ elif Model_Link !="":
304
+ downloadmodel_link_v2(Model_Link)
305
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
306
+ else:
307
+ if Model_Version=="512":
308
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base"
309
+ print('Using the original V2-512 model')
310
+ elif Model_Version=="768":
311
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
312
+ print('Using the original V2-768 model')
313
+ else:
314
+ MODEL_NAMEv2=""
315
+ print('Wrong model version')
316
+
317
+ return MODEL_NAMEv2
318
+
319
+
320
+
321
+
322
+ def sessv2(Session_Name, Session_Link_optional, MODEL_NAMEv2):
323
+ import gdown
324
+ import wget
325
+ os.chdir('/notebooks')
326
+ PT=""
327
+
328
+ while Session_Name=="":
329
+ print('Input the Session Name:')
330
+ Session_Name=input("")
331
+ Session_Name=Session_Name.replace(" ","_")
332
+
333
+ WORKSPACE='/notebooks/Fast-Dreambooth'
334
+
335
+ if Session_Link_optional !="":
336
+ print('Downloading session...')
337
+
338
+ if Session_Link_optional != "":
339
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
340
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
341
+ time.sleep(1)
342
+ os.chdir(WORKSPACE+'/Sessions')
343
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
344
+ os.chdir(Session_Name)
345
+ call("rm -r " +instance_images, shell=True)
346
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
347
+ call("rm -r " +concept_images, shell=True)
348
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
349
+ call("rm -r " +captions, shell=True)
350
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
351
+ os.chdir('/notebooks')
352
+ clear_output()
353
+
354
+ INSTANCE_NAME=Session_Name
355
+ OUTPUT_DIR="/models/"+Session_Name
356
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
357
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
358
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
359
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
360
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
361
+ resumev2=False
362
+
363
+ if os.path.exists(str(SESSION_DIR)):
364
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
365
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
366
+
367
+ def f(n):
368
+ k=0
369
+ for i in mdls:
370
+ if k==n:
371
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
372
+ k=k+1
373
+
374
+ k=0
375
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
376
+
377
+ for i in mdls:
378
+ print(str(k)+'- '+i)
379
+ k=k+1
380
+ n=input()
381
+ while int(n)>k-1:
382
+ n=input()
383
+ if n!="000":
384
+ f(int(n))
385
+ print('Using the model '+ mdls[int(n)]+" ...")
386
+ time.sleep(4)
387
+ else:
388
+ print('Skipping the intermediary checkpoints.')
389
+
390
+
391
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
392
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
393
+ if MODEL_NAMEv2=="":
394
+ print('No model found, use the "Model Download" cell to download a model.')
395
+ else:
396
+ print('Session Loaded, proceed to uploading instance images')
397
+
398
+ elif os.path.exists(MDLPTH):
399
+ print('Session found, loading the trained model ...')
400
+
401
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
402
+ print('Detecting model version...')
403
+ Model_Version=check_output('python det.py --MODEL_PATH '+MDLPTH, shell=True).decode('utf-8').replace('\n', '')
404
+ clear_output()
405
+ print(''+Model_Version+' Detected')
406
+ call('rm det.py', shell=True)
407
+
408
+ if Model_Version=='V2.1-512px':
409
+ call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
410
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
411
+ elif Model_Version=='V2.1-768px':
412
+ call('wget -q -O convertodiff.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
413
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
414
+ clear_output()
415
+ call('rm convertodiff.py', shell=True)
416
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
417
+ resumev2=True
418
+ clear_output()
419
+ print('Session loaded.')
420
+ else:
421
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
422
+
423
+ elif not os.path.exists(str(SESSION_DIR)):
424
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
425
+ print('Creating session...')
426
+ if MODEL_NAMEv2=="":
427
+ print('No model found, use the "Model Download" cell to download a model.')
428
+ else:
429
+ print('Session created, proceed to uploading instance images')
430
+
431
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
432
+
433
+
434
+
435
+ def done():
436
+ done = widgets.Button(
437
+ description='Done!',
438
+ disabled=True,
439
+ button_style='success',
440
+ tooltip='',
441
+ icon='check'
442
+ )
443
+ display(done)
444
+
445
+
446
+
447
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
448
+
449
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
450
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
451
+
452
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
453
+ Upload = widgets.Button(
454
+ description='Upload',
455
+ disabled=False,
456
+ button_style='info',
457
+ tooltip='Click to upload the chosen instance images',
458
+ icon=''
459
+ )
460
+
461
+
462
+ def up(Upload):
463
+ with out:
464
+ uploader.close()
465
+ Upload.close()
466
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
467
+ done()
468
+ out=widgets.Output()
469
+
470
+ if IMAGES_FOLDER_OPTIONAL=="":
471
+ Upload.on_click(up)
472
+ display(uploader, Upload, out)
473
+ else:
474
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
475
+ done()
476
+
477
+
478
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
479
+
480
+
481
+ if Remove_existing_instance_images:
482
+ if os.path.exists(str(INSTANCE_DIR)):
483
+ call("rm -r " +INSTANCE_DIR, shell=True)
484
+ if os.path.exists(str(CAPTIONS_DIR)):
485
+ call("rm -r " +CAPTIONS_DIR, shell=True)
486
+
487
+
488
+ if not os.path.exists(str(INSTANCE_DIR)):
489
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
490
+ if not os.path.exists(str(CAPTIONS_DIR)):
491
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
492
+
493
+
494
+ if IMAGES_FOLDER_OPTIONAL !="":
495
+
496
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
497
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
498
+
499
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
500
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
501
+ if Crop_images:
502
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
503
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
504
+ os.chdir('/notebooks')
505
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
506
+ extension = filename.split(".")[-1]
507
+ identifier=filename.split(".")[0]
508
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
509
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
510
+ file=file.convert("RGB")
511
+ file=ImageOps.exif_transpose(file)
512
+ width, height = file.size
513
+ if file.size !=(Crop_size, Crop_size):
514
+ image=crop_image(file, Crop_size)
515
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
516
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
517
+ else:
518
+ image[0].save(new_path_with_file, format=extension.upper())
519
+
520
+ else:
521
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
522
+
523
+ else:
524
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
525
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
526
+
527
+ elif IMAGES_FOLDER_OPTIONAL =="":
528
+ up=""
529
+ for file in uploader.value:
530
+ filename = file['name']
531
+ if filename.split(".")[-1]=="txt":
532
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
533
+ f.write(bytes(file['content']).decode())
534
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
535
+ if Crop_images:
536
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
537
+ filename = file['name']
538
+ img = Image.open(io.BytesIO(file['content']))
539
+ extension = filename.split(".")[-1]
540
+ identifier=filename.split(".")[0]
541
+ img=img.convert("RGB")
542
+ img=ImageOps.exif_transpose(img)
543
+
544
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
545
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
546
+ else:
547
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
548
+
549
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
550
+ file = Image.open(new_path_with_file)
551
+ width, height = file.size
552
+ if file.size !=(Crop_size, Crop_size):
553
+ image=crop_image(file, Crop_size)
554
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
555
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
556
+ else:
557
+ image[0].save(new_path_with_file, format=extension.upper())
558
+
559
+ else:
560
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
561
+ filename = file['name']
562
+ img = Image.open(io.BytesIO(file['content']))
563
+ img=img.convert("RGB")
564
+ extension = filename.split(".")[-1]
565
+ identifier=filename.split(".")[0]
566
+
567
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
568
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
569
+ else:
570
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
571
+
572
+ if ren:
573
+ i=0
574
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
575
+ extension = filename.split(".")[-1]
576
+ identifier=filename.split(".")[0]
577
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
578
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
579
+ i=i+1
580
+
581
+ os.chdir(INSTANCE_DIR)
582
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
583
+ os.chdir(CAPTIONS_DIR)
584
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
585
+ os.chdir('/notebooks')
586
+
587
+
588
+
589
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
590
+
591
+ paths=""
592
+ out=""
593
+ widgets_l=""
594
+ clear_output()
595
+ def Caption(path):
596
+ if path!="Select an instance image to caption":
597
+
598
+ name = os.path.splitext(os.path.basename(path))[0]
599
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
600
+ if ext=="jpg" or "JPG":
601
+ ext="JPEG"
602
+
603
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
604
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
605
+ text = f.read()
606
+ else:
607
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
608
+ f.write("")
609
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
610
+ text = f.read()
611
+
612
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
613
+ img=img.convert("RGB")
614
+ img=img.resize((420, 420))
615
+ image_bytes = BytesIO()
616
+ img.save(image_bytes, format=ext, qualiy=10)
617
+ image_bytes.seek(0)
618
+ image_data = image_bytes.read()
619
+ img= image_data
620
+ image = widgets.Image(
621
+ value=img,
622
+ width=420,
623
+ height=420
624
+ )
625
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
626
+
627
+
628
+ def update_text(text):
629
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
630
+ f.write(text)
631
+
632
+ button = widgets.Button(description='Save', button_style='success')
633
+ button.on_click(lambda b: update_text(text_area.value))
634
+
635
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
636
+
637
+
638
+ paths = os.listdir(INSTANCE_DIR)
639
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
640
+
641
+
642
+ out = widgets.Output()
643
+
644
+ def click(change):
645
+ with out:
646
+ out.clear_output()
647
+ display(Caption(change.new))
648
+
649
+ widgets_l.observe(click, names='value')
650
+ display(widgets.HBox([widgets_l, out]))
651
+
652
+
653
+
654
+ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
655
+
656
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
657
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
658
+ if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
659
+ call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
660
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
661
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
662
+
663
+ if resumev2 and not Resume_Training:
664
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model?  yes or no ?')
665
+ while True:
666
+ ansres=input('')
667
+ if ansres=='no':
668
+ Resume_Training = True
669
+ resumev2= False
670
+ break
671
+ elif ansres=='yes':
672
+ Resume_Training = False
673
+ resumev2= False
674
+ break
675
+
676
+ while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
677
+ print('No model found, use the "Model Download" cell to download a model.')
678
+ time.sleep(5)
679
+
680
+ MODELT_NAME=MODEL_NAMEv2
681
+
682
+ Seed=random.randint(1, 999999)
683
+
684
+ ofstnse=""
685
+ if Offset_Noise:
686
+ ofstnse="--offset_noise"
687
+
688
+ extrnlcptn=""
689
+ if External_Captions:
690
+ extrnlcptn="--external_captions"
691
+
692
+ precision="fp16"
693
+
694
+
695
+ resuming=""
696
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
697
+ MODELT_NAME=OUTPUT_DIR
698
+ print('Resuming Training...')
699
+ resuming="Yes"
700
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
701
+ print('Previous model not found, training a new model...')
702
+ MODELT_NAME=MODEL_NAMEv2
703
+ while MODEL_NAMEv2=="":
704
+ print('No model found, use the "Model Download" cell to download a model.')
705
+ time.sleep(5)
706
+
707
+
708
+ trnonltxt=""
709
+ if UNet_Training_Steps==0:
710
+ trnonltxt="--train_only_text_encoder"
711
+
712
+ Enable_text_encoder_training= True
713
+ Enable_Text_Encoder_Concept_Training= True
714
+
715
+
716
+ if Text_Encoder_Training_Steps==0:
717
+ Enable_text_encoder_training= False
718
+ else:
719
+ stptxt=Text_Encoder_Training_Steps
720
+
721
+ if Text_Encoder_Concept_Training_Steps==0:
722
+ Enable_Text_Encoder_Concept_Training= False
723
+ else:
724
+ stptxtc=Text_Encoder_Concept_Training_Steps
725
+
726
+
727
+ if Save_Checkpoint_Every==None:
728
+ Save_Checkpoint_Every=1
729
+ stp=0
730
+ if Start_saving_from_the_step==None:
731
+ Start_saving_from_the_step=0
732
+ if (Start_saving_from_the_step < 200):
733
+ Start_saving_from_the_step=Save_Checkpoint_Every
734
+ stpsv=Start_saving_from_the_step
735
+ if Save_Checkpoint_Every_n_Steps:
736
+ stp=Save_Checkpoint_Every
737
+
738
+
739
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
740
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
741
+ '+trnonltxt+' \
742
+ '+extrnlcptn+' \
743
+ '+ofstnse+' \
744
+ --train_text_encoder \
745
+ --image_captions_filename \
746
+ --dump_only_text_encoder \
747
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
748
+ --instance_data_dir='+INSTANCE_DIR+' \
749
+ --output_dir='+OUTPUT_DIR+' \
750
+ --captions_dir='+CAPTIONS_DIR+' \
751
+ --instance_prompt='+PT+' \
752
+ --seed='+str(Seed)+' \
753
+ --resolution='+str(Resolution)+' \
754
+ --mixed_precision='+str(precision)+' \
755
+ --train_batch_size=1 \
756
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
757
+ --use_8bit_adam \
758
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
759
+ --lr_scheduler="linear" \
760
+ --lr_warmup_steps=0 \
761
+ --max_train_steps='+str(Training_Steps), shell=True)
762
+
763
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
764
+ clear_output()
765
+ if resuming=="Yes":
766
+ print('Resuming Training...')
767
+ print('Training the UNet...')
768
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
769
+ '+extrnlcptn+' \
770
+ '+ofstnse+' \
771
+ --image_captions_filename \
772
+ --train_only_unet \
773
+ --Session_dir='+SESSION_DIR+' \
774
+ --save_starting_step='+str(stpsv)+' \
775
+ --save_n_steps='+str(stp)+' \
776
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
777
+ --instance_data_dir='+INSTANCE_DIR+' \
778
+ --output_dir='+OUTPUT_DIR+' \
779
+ --captions_dir='+CAPTIONS_DIR+' \
780
+ --instance_prompt='+PT+' \
781
+ --seed='+str(Seed)+' \
782
+ --resolution='+str(Resolution)+' \
783
+ --mixed_precision='+str(precision)+' \
784
+ --train_batch_size=1 \
785
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
786
+ --use_8bit_adam \
787
+ --learning_rate='+str(UNet_Learning_Rate)+' \
788
+ --lr_scheduler="linear" \
789
+ --lr_warmup_steps=0 \
790
+ --max_train_steps='+str(Training_Steps), shell=True)
791
+
792
+ if Enable_text_encoder_training :
793
+ print('Training the text encoder...')
794
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
795
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
796
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
797
+
798
+ if Enable_Text_Encoder_Concept_Training:
799
+ if os.path.exists(CONCEPT_DIR):
800
+ if os.listdir(CONCEPT_DIR)!=[]:
801
+ clear_output()
802
+ if resuming=="Yes":
803
+ print('Resuming Training...')
804
+ print('Training the text encoder on the concept...')
805
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
806
+ else:
807
+ clear_output()
808
+ if resuming=="Yes":
809
+ print('Resuming Training...')
810
+ print('No concept images found, skipping concept training...')
811
+ Text_Encoder_Concept_Training_Steps=0
812
+ time.sleep(8)
813
+ else:
814
+ clear_output()
815
+ if resuming=="Yes":
816
+ print('Resuming Training...')
817
+ print('No concept images found, skipping concept training...')
818
+ Text_Encoder_Concept_Training_Steps=0
819
+ time.sleep(8)
820
+
821
+ if UNet_Training_Steps!=0:
822
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
823
+
824
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
825
+ print('Nothing to do')
826
+ else:
827
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
828
+
829
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
830
+ clear_output()
831
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
832
+ clear_output()
833
+ print("DONE, the CKPT model is in the session's folder")
834
+ else:
835
+ print("Something went wrong")
836
+
837
+ else:
838
+ print("Something went wrong")
839
+
840
+ return resumev2
841
+
842
+
843
+
844
+
845
+ def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
846
+
847
+
848
+ if Previous_Session_Name!="":
849
+ print("Loading a previous session model")
850
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
851
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
852
+
853
+
854
+ while not os.path.exists(path_to_trained_model):
855
+ print("There is no trained model in the previous session")
856
+ time.sleep(5)
857
+
858
+ elif Custom_Path!="":
859
+ print("Loading model from a custom path")
860
+ path_to_trained_model=Custom_Path
861
+
862
+
863
+ while not os.path.exists(path_to_trained_model):
864
+ print("Wrong Path")
865
+ time.sleep(5)
866
+
867
+ else:
868
+ print("Loading the trained model")
869
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
870
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
871
+
872
+
873
+ while not os.path.exists(path_to_trained_model):
874
+ print("There is no trained model in this session")
875
+ time.sleep(5)
876
+
877
+ auth=f"--gradio-auth {User}:{Password}"
878
+ if User =="" or Password=="":
879
+ auth=""
880
+
881
+ os.chdir('/notebooks')
882
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
883
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
884
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
885
+ call('rm sd_mrep.tar.zst', shell=True)
886
+
887
+ os.chdir('/notebooks/sd')
888
+ if not os.path.exists('stable-diffusion-webui'):
889
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
890
+
891
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
892
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
893
+ print('')
894
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
895
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
896
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
897
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
898
+ os.chdir('/notebooks')
899
+ clear_output()
900
+
901
+ call('wget -q -O /notebooks/sd/stable-diffusion-webui/modules/styles.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/styles.py', shell=True)
902
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
903
+
904
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
905
+
906
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
907
+ if line.strip().startswith('self.server_name ='):
908
+ line = f' self.server_name = "{localurl}"\n'
909
+ if line.strip().startswith('self.protocol = "https"'):
910
+ line = ' self.protocol = "https"\n'
911
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
912
+ line = ''
913
+ if line.strip().startswith('else "http"'):
914
+ line = ''
915
+ sys.stdout.write(line)
916
+
917
+
918
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
919
+
920
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
921
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
922
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
923
+
924
+ call("sed -i 's@-> Network | None@@g' /notebooks/sd/stable-diffusion-webui/extensions-builtin/Lora/network.py", shell=True)
925
+ call("sed -i 's@|@or@' /notebooks/sd/stable-diffusion-webui/extensions/adetailer/aaaaaa/helper.py", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
926
+
927
+ call("sed -i 's@\"quicksettings\": OptionInfo(.*@\"quicksettings\": OptionInfo(\"sd_model_checkpoint, sd_vae, CLIP_stop_at_last_layers, inpainting_mask_weight, initial_noise_multiplier\", \"Quicksettings list\"),@' /notebooks/sd/stable-diffusion-webui/modules/shared.py", shell=True)
928
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
929
+ clear_output()
930
+
931
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
932
+
933
+ return configf
934
+
935
+
936
+
937
+
938
+ def clean():
939
+
940
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
941
+
942
+ s = widgets.Select(
943
+ options=Sessions,
944
+ rows=5,
945
+ description='',
946
+ disabled=False
947
+ )
948
+
949
+ out=widgets.Output()
950
+
951
+ d = widgets.Button(
952
+ description='Remove',
953
+ disabled=False,
954
+ button_style='warning',
955
+ tooltip='Removet the selected session',
956
+ icon='warning'
957
+ )
958
+
959
+ def rem(d):
960
+ with out:
961
+ if s.value is not None:
962
+ clear_output()
963
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
964
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
965
+ if os.path.exists('/notebooks/models/'+s.value):
966
+ call('rm -r /notebooks/models/'+s.value, shell=True)
967
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
968
+
969
+
970
+ else:
971
+ d.close()
972
+ s.close()
973
+ clear_output()
974
+ print("NOTHING TO REMOVE")
975
+
976
+ d.on_click(rem)
977
+ if s.value is not None:
978
+ display(s,d,out)
979
+ else:
980
+ print("NOTHING TO REMOVE")
981
+
982
+
983
+
984
+ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
985
+
986
+ from slugify import slugify
987
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
988
+ from huggingface_hub import create_repo
989
+ from IPython.display import display_markdown
990
+
991
+ if(Name_of_your_concept == ""):
992
+ Name_of_your_concept = Session_Name
993
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
994
+
995
+
996
+
997
+ if hf_token_write =="":
998
+ print('Your Hugging Face write access token : ')
999
+ hf_token_write=input()
1000
+
1001
+ hf_token = hf_token_write
1002
+
1003
+ api = HfApi()
1004
+ your_username = api.whoami(token=hf_token)["name"]
1005
+
1006
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
1007
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
1008
+
1009
+ def bar(prg):
1010
+ clear_output()
1011
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
1012
+ return br
1013
+
1014
+ print(bar(1))
1015
+
1016
+ readme_text = f'''---
1017
+ license: creativeml-openrail-m
1018
+ tags:
1019
+ - text-to-image
1020
+ - stable-diffusion
1021
+ ---
1022
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
1023
+
1024
+ '''
1025
+ #Save the readme to a file
1026
+ readme_file = open("README.md", "w")
1027
+ readme_file.write(readme_text)
1028
+ readme_file.close()
1029
+
1030
+ operations = [
1031
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
1032
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
1033
+
1034
+ ]
1035
+ create_repo(repo_id,private=True, token=hf_token)
1036
+
1037
+ api.create_commit(
1038
+ repo_id=repo_id,
1039
+ operations=operations,
1040
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1041
+ token=hf_token
1042
+ )
1043
+
1044
+ print(bar(8))
1045
+
1046
+ api.upload_folder(
1047
+ folder_path=OUTPUT_DIR+"/scheduler",
1048
+ path_in_repo="scheduler",
1049
+ repo_id=repo_id,
1050
+ token=hf_token
1051
+ )
1052
+
1053
+ print(bar(9))
1054
+
1055
+ api.upload_folder(
1056
+ folder_path=OUTPUT_DIR+"/text_encoder",
1057
+ path_in_repo="text_encoder",
1058
+ repo_id=repo_id,
1059
+ token=hf_token
1060
+ )
1061
+
1062
+ print(bar(12))
1063
+
1064
+ api.upload_folder(
1065
+ folder_path=OUTPUT_DIR+"/tokenizer",
1066
+ path_in_repo="tokenizer",
1067
+ repo_id=repo_id,
1068
+ token=hf_token
1069
+ )
1070
+
1071
+ print(bar(13))
1072
+
1073
+ api.upload_folder(
1074
+ folder_path=OUTPUT_DIR+"/unet",
1075
+ path_in_repo="unet",
1076
+ repo_id=repo_id,
1077
+ token=hf_token
1078
+ )
1079
+
1080
+ print(bar(21))
1081
+
1082
+ api.upload_folder(
1083
+ folder_path=OUTPUT_DIR+"/vae",
1084
+ path_in_repo="vae",
1085
+ repo_id=repo_id,
1086
+ token=hf_token
1087
+ )
1088
+
1089
+ print(bar(23))
1090
+
1091
+ api.upload_file(
1092
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1093
+ path_in_repo="model_index.json",
1094
+ repo_id=repo_id,
1095
+ token=hf_token
1096
+ )
1097
+
1098
+ print(bar(25))
1099
+
1100
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1101
+ done()
1102
+
1103
+
1104
+
1105
+ def crop_image(im, size):
1106
+
1107
+ GREEN = "#0F0"
1108
+ BLUE = "#00F"
1109
+ RED = "#F00"
1110
+
1111
+ def focal_point(im, settings):
1112
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1113
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1114
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1115
+
1116
+ pois = []
1117
+
1118
+ weight_pref_total = 0
1119
+ if len(corner_points) > 0:
1120
+ weight_pref_total += settings.corner_points_weight
1121
+ if len(entropy_points) > 0:
1122
+ weight_pref_total += settings.entropy_points_weight
1123
+ if len(face_points) > 0:
1124
+ weight_pref_total += settings.face_points_weight
1125
+
1126
+ corner_centroid = None
1127
+ if len(corner_points) > 0:
1128
+ corner_centroid = centroid(corner_points)
1129
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1130
+ pois.append(corner_centroid)
1131
+
1132
+ entropy_centroid = None
1133
+ if len(entropy_points) > 0:
1134
+ entropy_centroid = centroid(entropy_points)
1135
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1136
+ pois.append(entropy_centroid)
1137
+
1138
+ face_centroid = None
1139
+ if len(face_points) > 0:
1140
+ face_centroid = centroid(face_points)
1141
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1142
+ pois.append(face_centroid)
1143
+
1144
+ average_point = poi_average(pois, settings)
1145
+
1146
+ return average_point
1147
+
1148
+
1149
+ def image_face_points(im, settings):
1150
+
1151
+ np_im = np.array(im)
1152
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1153
+
1154
+ tries = [
1155
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1156
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1157
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1158
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1159
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1160
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1161
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1162
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1163
+ ]
1164
+ for t in tries:
1165
+ classifier = cv2.CascadeClassifier(t[0])
1166
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1167
+ try:
1168
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1169
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1170
+ except:
1171
+ continue
1172
+
1173
+ if len(faces) > 0:
1174
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1175
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1176
+ return []
1177
+
1178
+
1179
+ def image_corner_points(im, settings):
1180
+ grayscale = im.convert("L")
1181
+
1182
+
1183
+ gd = ImageDraw.Draw(grayscale)
1184
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1185
+
1186
+ np_im = np.array(grayscale)
1187
+
1188
+ points = cv2.goodFeaturesToTrack(
1189
+ np_im,
1190
+ maxCorners=100,
1191
+ qualityLevel=0.04,
1192
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1193
+ useHarrisDetector=False,
1194
+ )
1195
+
1196
+ if points is None:
1197
+ return []
1198
+
1199
+ focal_points = []
1200
+ for point in points:
1201
+ x, y = point.ravel()
1202
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1203
+
1204
+ return focal_points
1205
+
1206
+
1207
+ def image_entropy_points(im, settings):
1208
+ landscape = im.height < im.width
1209
+ portrait = im.height > im.width
1210
+ if landscape:
1211
+ move_idx = [0, 2]
1212
+ move_max = im.size[0]
1213
+ elif portrait:
1214
+ move_idx = [1, 3]
1215
+ move_max = im.size[1]
1216
+ else:
1217
+ return []
1218
+
1219
+ e_max = 0
1220
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1221
+ crop_best = crop_current
1222
+ while crop_current[move_idx[1]] < move_max:
1223
+ crop = im.crop(tuple(crop_current))
1224
+ e = image_entropy(crop)
1225
+
1226
+ if (e > e_max):
1227
+ e_max = e
1228
+ crop_best = list(crop_current)
1229
+
1230
+ crop_current[move_idx[0]] += 4
1231
+ crop_current[move_idx[1]] += 4
1232
+
1233
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1234
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1235
+
1236
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1237
+
1238
+
1239
+ def image_entropy(im):
1240
+ # greyscale image entropy
1241
+ # band = np.asarray(im.convert("L"))
1242
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1243
+ hist, _ = np.histogram(band, bins=range(0, 256))
1244
+ hist = hist[hist > 0]
1245
+ return -np.log2(hist / hist.sum()).sum()
1246
+
1247
+ def centroid(pois):
1248
+ x = [poi.x for poi in pois]
1249
+ y = [poi.y for poi in pois]
1250
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1251
+
1252
+
1253
+ def poi_average(pois, settings):
1254
+ weight = 0.0
1255
+ x = 0.0
1256
+ y = 0.0
1257
+ for poi in pois:
1258
+ weight += poi.weight
1259
+ x += poi.x * poi.weight
1260
+ y += poi.y * poi.weight
1261
+ avg_x = round(weight and x / weight)
1262
+ avg_y = round(weight and y / weight)
1263
+
1264
+ return PointOfInterest(avg_x, avg_y)
1265
+
1266
+
1267
+ def is_landscape(w, h):
1268
+ return w > h
1269
+
1270
+
1271
+ def is_portrait(w, h):
1272
+ return h > w
1273
+
1274
+
1275
+ def is_square(w, h):
1276
+ return w == h
1277
+
1278
+
1279
+ class PointOfInterest:
1280
+ def __init__(self, x, y, weight=1.0, size=10):
1281
+ self.x = x
1282
+ self.y = y
1283
+ self.weight = weight
1284
+ self.size = size
1285
+
1286
+ def bounding(self, size):
1287
+ return [
1288
+ self.x - size//2,
1289
+ self.y - size//2,
1290
+ self.x + size//2,
1291
+ self.y + size//2
1292
+ ]
1293
+
1294
+ class Settings:
1295
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1296
+ self.crop_width = crop_width
1297
+ self.crop_height = crop_height
1298
+ self.corner_points_weight = corner_points_weight
1299
+ self.entropy_points_weight = entropy_points_weight
1300
+ self.face_points_weight = face_points_weight
1301
+
1302
+ settings = Settings(
1303
+ crop_width = size,
1304
+ crop_height = size,
1305
+ face_points_weight = 0.9,
1306
+ entropy_points_weight = 0.15,
1307
+ corner_points_weight = 0.5,
1308
+ )
1309
+
1310
+ scale_by = 1
1311
+ if is_landscape(im.width, im.height):
1312
+ scale_by = settings.crop_height / im.height
1313
+ elif is_portrait(im.width, im.height):
1314
+ scale_by = settings.crop_width / im.width
1315
+ elif is_square(im.width, im.height):
1316
+ if is_square(settings.crop_width, settings.crop_height):
1317
+ scale_by = settings.crop_width / im.width
1318
+ elif is_landscape(settings.crop_width, settings.crop_height):
1319
+ scale_by = settings.crop_width / im.width
1320
+ elif is_portrait(settings.crop_width, settings.crop_height):
1321
+ scale_by = settings.crop_height / im.height
1322
+
1323
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1324
+ im_debug = im.copy()
1325
+
1326
+ focus = focal_point(im_debug, settings)
1327
+
1328
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1329
+ # point but then get adjusted back into the frame
1330
+ y_half = int(settings.crop_height / 2)
1331
+ x_half = int(settings.crop_width / 2)
1332
+
1333
+ x1 = focus.x - x_half
1334
+ if x1 < 0:
1335
+ x1 = 0
1336
+ elif x1 + settings.crop_width > im.width:
1337
+ x1 = im.width - settings.crop_width
1338
+
1339
+ y1 = focus.y - y_half
1340
+ if y1 < 0:
1341
+ y1 = 0
1342
+ elif y1 + settings.crop_height > im.height:
1343
+ y1 = im.height - settings.crop_height
1344
+
1345
+ x2 = x1 + settings.crop_width
1346
+ y2 = y1 + settings.crop_height
1347
+
1348
+ crop = [x1, y1, x2, y2]
1349
+
1350
+ results = []
1351
+
1352
+ results.append(im.crop(tuple(crop)))
1353
+
1354
+ return results
scripts/mainpaperspacev2_311.py ADDED
@@ -0,0 +1,1347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+ from subprocess import check_output
23
+ import six
24
+ import re
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ from urllib.request import urlopen, Request
28
+ import tempfile
29
+ from tqdm import tqdm
30
+
31
+
32
+
33
+
34
+ def Deps(force_reinstall):
35
+
36
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
37
+ ntbk()
38
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
39
+ os.environ['PYTHONWARNINGS'] = 'ignore'
40
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
41
+ print('Modules and notebooks updated, dependencies already installed')
42
+
43
+ else:
44
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
45
+ call("pip uninstall -qq deepspeed -y", shell=True, stdout=open('/dev/null', 'w'))
46
+ ntbk()
47
+ if not os.path.exists('/models'):
48
+ call('mkdir /models', shell=True)
49
+ if not os.path.exists('/notebooks/models'):
50
+ call('ln -s /models /notebooks', shell=True)
51
+ if os.path.exists('/deps'):
52
+ call("rm -r /deps", shell=True)
53
+ call('mkdir /deps', shell=True)
54
+ if not os.path.exists('cache'):
55
+ call('mkdir cache', shell=True)
56
+ os.chdir('/deps')
57
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps_311.txt', shell=True)
58
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
59
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
60
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
61
+ os.chdir('/notebooks')
62
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
63
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
64
+ os.environ['PYTHONWARNINGS'] = 'ignore'
65
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
66
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
67
+ if not os.path.exists('/notebooks/diffusers'):
68
+ call('ln -s /diffusers /notebooks', shell=True)
69
+ call("rm -r /deps", shell=True)
70
+ os.chdir('/notebooks')
71
+ clear_output()
72
+
73
+ done()
74
+
75
+
76
+
77
+ def depsinst(url, dst):
78
+ file_size = None
79
+ req = Request(url, headers={"User-Agent": "torch.hub"})
80
+ u = urlopen(req)
81
+ meta = u.info()
82
+ if hasattr(meta, 'getheaders'):
83
+ content_length = meta.getheaders("Content-Length")
84
+ else:
85
+ content_length = meta.get_all("Content-Length")
86
+ if content_length is not None and len(content_length) > 0:
87
+ file_size = int(content_length[0])
88
+
89
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
90
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
91
+ with open(dst, "wb") as f:
92
+ while True:
93
+ buffer = u.read(8192)
94
+ if len(buffer) == 0:
95
+ break
96
+ f.write(buffer)
97
+ pbar.update(len(buffer))
98
+ f.close()
99
+
100
+
101
+ def ntbk():
102
+
103
+ os.chdir('/notebooks')
104
+ if not os.path.exists('Latest_Notebooks'):
105
+ call('mkdir Latest_Notebooks', shell=True)
106
+ else:
107
+ call('rm -r Latest_Notebooks', shell=True)
108
+ call('mkdir Latest_Notebooks', shell=True)
109
+ os.chdir('/notebooks/Latest_Notebooks')
110
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
111
+ call('rm Notebooks.txt', shell=True)
112
+ os.chdir('/notebooks')
113
+
114
+
115
+
116
+ def downloadmodel_hfv2(Path_to_HuggingFace):
117
+ import wget
118
+
119
+ if os.path.exists('/models/stable-diffusion-custom'):
120
+ call("rm -r /models/stable-diffusion-custom", shell=True)
121
+ clear_output()
122
+
123
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
124
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
125
+ token = f.read()
126
+ authe=f'https://USER:{token}@'
127
+ else:
128
+ authe="https://"
129
+
130
+ clear_output()
131
+ call("mkdir /models/stable-diffusion-custom", shell=True)
132
+ os.chdir("/models/stable-diffusion-custom")
133
+ call("git init", shell=True)
134
+ call("git lfs install --system --skip-repo", shell=True)
135
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
136
+ call("git config core.sparsecheckout true", shell=True)
137
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
138
+ call("git pull origin main", shell=True)
139
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
140
+ call("rm -r .git", shell=True)
141
+ os.chdir('/notebooks')
142
+ clear_output()
143
+ done()
144
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
145
+ print('Check the link you provided')
146
+ os.chdir('/notebooks')
147
+ time.sleep(5)
148
+
149
+
150
+
151
+
152
+
153
+ def downloadmodel_path_v2(MODEL_PATH):
154
+
155
+ modelname=os.path.basename(MODEL_PATH)
156
+ sftnsr=""
157
+ if modelname.split('.')[-1]=='safetensors':
158
+ sftnsr="--from_safetensors"
159
+
160
+ import wget
161
+ os.chdir('/models')
162
+ clear_output()
163
+ if os.path.exists(str(MODEL_PATH)):
164
+
165
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
166
+ print('Detecting model version...')
167
+ Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+MODEL_PATH, shell=True).decode('utf-8').replace('\n', '')
168
+ clear_output()
169
+ print(''+Custom_Model_Version+' Detected')
170
+ call('rm det.py', shell=True)
171
+
172
+ if Custom_Model_Version=='V2.1-512px':
173
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
174
+ call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
175
+
176
+ elif Custom_Model_Version=='V2.1-768px':
177
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
178
+ call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
179
+
180
+ call('rm convertodiffv2.py', shell=True)
181
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
182
+ clear_output()
183
+ done()
184
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
185
+ print('Conversion error')
186
+ os.chdir('/workspace')
187
+ time.sleep(5)
188
+ else:
189
+ while not os.path.exists(str(MODEL_PATH)):
190
+ print('Wrong path, use the file explorer to copy the path')
191
+ os.chdir('/workspace')
192
+ time.sleep(5)
193
+
194
+
195
+
196
+
197
+ def downloadmodel_link_v2(MODEL_LINK):
198
+
199
+ import wget
200
+ import gdown
201
+ from gdown.download import get_url_from_gdrive_confirmation
202
+
203
+ def getsrc(url):
204
+ parsed_url = urlparse(url)
205
+ if parsed_url.netloc == 'civitai.com':
206
+ src='civitai'
207
+ elif parsed_url.netloc == 'drive.google.com':
208
+ src='gdrive'
209
+ elif parsed_url.netloc == 'huggingface.co':
210
+ src='huggingface'
211
+ else:
212
+ src='others'
213
+ return src
214
+
215
+ src=getsrc(MODEL_LINK)
216
+
217
+ def get_name(url, gdrive):
218
+ if not gdrive:
219
+ response = requests.get(url, allow_redirects=False)
220
+ if "Location" in response.headers:
221
+ redirected_url = response.headers["Location"]
222
+ quer = parse_qs(urlparse(redirected_url).query)
223
+ if "response-content-disposition" in quer:
224
+ disp_val = quer["response-content-disposition"][0].split(";")
225
+ for vals in disp_val:
226
+ if vals.strip().startswith("filename="):
227
+ filenm=unquote(vals.split("=", 1)[1].strip())
228
+ return filenm.replace("\"","")
229
+ else:
230
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
231
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
232
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
233
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
234
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
235
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
236
+ return filenm
237
+
238
+ if src=='civitai':
239
+ modelname=get_name(MODEL_LINK, False)
240
+ elif src=='gdrive':
241
+ modelname=get_name(MODEL_LINK, True)
242
+ else:
243
+ modelname=os.path.basename(MODEL_LINK)
244
+
245
+ sftnsr=""
246
+ if modelname.split('.')[-1]!='safetensors':
247
+ modelnm="model.ckpt"
248
+ else:
249
+ modelnm="model.safetensors"
250
+ sftnsr="--from_safetensors"
251
+
252
+ os.chdir('/models')
253
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
254
+
255
+ if os.path.exists(modelnm):
256
+ if os.path.getsize(modelnm) > 1810671599:
257
+
258
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
259
+ print('Detecting model version...')
260
+ Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+modelnm, shell=True).decode('utf-8').replace('\n', '')
261
+ clear_output()
262
+ print(''+Custom_Model_Version+' Detected')
263
+ call('rm det.py', shell=True)
264
+
265
+ if Custom_Model_Version=='V2.1-512px':
266
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
267
+ call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
268
+
269
+ elif Custom_Model_Version=='V2.1-768px':
270
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
271
+ call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
272
+ call('rm convertodiffv2.py', shell=True)
273
+
274
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
275
+ call('rm '+modelnm, shell=True)
276
+ os.chdir('/workspace')
277
+ clear_output()
278
+ done()
279
+ else:
280
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
281
+ print('Conversion error')
282
+ os.chdir('/workspace')
283
+ time.sleep(5)
284
+ else:
285
+ while os.path.getsize(modelnm) < 1810671599:
286
+ print('Wrong link, check that the link is valid')
287
+ os.chdir('/workspace')
288
+ time.sleep(5)
289
+
290
+
291
+
292
+
293
+ def dlsv2(Path_to_HuggingFace, Model_Path, Model_Link, Model_Version):
294
+
295
+ if Path_to_HuggingFace != "":
296
+ downloadmodel_hfv2(Path_to_HuggingFace)
297
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
298
+ elif Model_Path !="":
299
+ downloadmodel_path_v2(Model_Path)
300
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
301
+ elif Model_Link !="":
302
+ downloadmodel_link_v2(Model_Link)
303
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
304
+ else:
305
+ if Model_Version=="512":
306
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base"
307
+ print('Using the original V2-512 model')
308
+ elif Model_Version=="768":
309
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
310
+ print('Using the original V2-768 model')
311
+ else:
312
+ MODEL_NAMEv2=""
313
+ print('Wrong model version')
314
+
315
+ return MODEL_NAMEv2
316
+
317
+
318
+
319
+
320
+ def sessv2(Session_Name, Session_Link_optional, MODEL_NAMEv2):
321
+ import gdown
322
+ import wget
323
+ os.chdir('/notebooks')
324
+ PT=""
325
+
326
+ while Session_Name=="":
327
+ print('Input the Session Name:')
328
+ Session_Name=input("")
329
+ Session_Name=Session_Name.replace(" ","_")
330
+
331
+ WORKSPACE='/notebooks/Fast-Dreambooth'
332
+
333
+ if Session_Link_optional !="":
334
+ print('Downloading session...')
335
+
336
+ if Session_Link_optional != "":
337
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
338
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
339
+ time.sleep(1)
340
+ os.chdir(WORKSPACE+'/Sessions')
341
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
342
+ os.chdir(Session_Name)
343
+ call("rm -r " +instance_images, shell=True)
344
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
345
+ call("rm -r " +concept_images, shell=True)
346
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
347
+ call("rm -r " +captions, shell=True)
348
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
349
+ os.chdir('/notebooks')
350
+ clear_output()
351
+
352
+ INSTANCE_NAME=Session_Name
353
+ OUTPUT_DIR="/models/"+Session_Name
354
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
355
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
356
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
357
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
358
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
359
+ resumev2=False
360
+
361
+ if os.path.exists(str(SESSION_DIR)):
362
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
363
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
364
+
365
+ def f(n):
366
+ k=0
367
+ for i in mdls:
368
+ if k==n:
369
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
370
+ k=k+1
371
+
372
+ k=0
373
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
374
+
375
+ for i in mdls:
376
+ print(str(k)+'- '+i)
377
+ k=k+1
378
+ n=input()
379
+ while int(n)>k-1:
380
+ n=input()
381
+ if n!="000":
382
+ f(int(n))
383
+ print('Using the model '+ mdls[int(n)]+" ...")
384
+ time.sleep(4)
385
+ else:
386
+ print('Skipping the intermediary checkpoints.')
387
+
388
+
389
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
390
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
391
+ if MODEL_NAMEv2=="":
392
+ print('No model found, use the "Model Download" cell to download a model.')
393
+ else:
394
+ print('Session Loaded, proceed to uploading instance images')
395
+
396
+ elif os.path.exists(MDLPTH):
397
+ print('Session found, loading the trained model ...')
398
+
399
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
400
+ print('Detecting model version...')
401
+ Model_Version=check_output('python det.py --MODEL_PATH '+MDLPTH, shell=True).decode('utf-8').replace('\n', '')
402
+ clear_output()
403
+ print(''+Model_Version+' Detected')
404
+ call('rm det.py', shell=True)
405
+
406
+ if Model_Version=='V2.1-512px':
407
+ call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
408
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
409
+ elif Model_Version=='V2.1-768px':
410
+ call('wget -q -O convertodiff.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
411
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
412
+ clear_output()
413
+ call('rm convertodiff.py', shell=True)
414
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
415
+ resumev2=True
416
+ clear_output()
417
+ print('Session loaded.')
418
+ else:
419
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
420
+
421
+ elif not os.path.exists(str(SESSION_DIR)):
422
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
423
+ print('Creating session...')
424
+ if MODEL_NAMEv2=="":
425
+ print('No model found, use the "Model Download" cell to download a model.')
426
+ else:
427
+ print('Session created, proceed to uploading instance images')
428
+
429
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
430
+
431
+
432
+
433
+ def done():
434
+ done = widgets.Button(
435
+ description='Done!',
436
+ disabled=True,
437
+ button_style='success',
438
+ tooltip='',
439
+ icon='check'
440
+ )
441
+ display(done)
442
+
443
+
444
+
445
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
446
+
447
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
448
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
449
+
450
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
451
+ Upload = widgets.Button(
452
+ description='Upload',
453
+ disabled=False,
454
+ button_style='info',
455
+ tooltip='Click to upload the chosen instance images',
456
+ icon=''
457
+ )
458
+
459
+
460
+ def up(Upload):
461
+ with out:
462
+ uploader.close()
463
+ Upload.close()
464
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
465
+ done()
466
+ out=widgets.Output()
467
+
468
+ if IMAGES_FOLDER_OPTIONAL=="":
469
+ Upload.on_click(up)
470
+ display(uploader, Upload, out)
471
+ else:
472
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
473
+ done()
474
+
475
+
476
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
477
+
478
+
479
+ if Remove_existing_instance_images:
480
+ if os.path.exists(str(INSTANCE_DIR)):
481
+ call("rm -r " +INSTANCE_DIR, shell=True)
482
+ if os.path.exists(str(CAPTIONS_DIR)):
483
+ call("rm -r " +CAPTIONS_DIR, shell=True)
484
+
485
+
486
+ if not os.path.exists(str(INSTANCE_DIR)):
487
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
488
+ if not os.path.exists(str(CAPTIONS_DIR)):
489
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
490
+
491
+
492
+ if IMAGES_FOLDER_OPTIONAL !="":
493
+
494
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
495
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
496
+
497
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
498
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
499
+ if Crop_images:
500
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
501
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
502
+ os.chdir('/notebooks')
503
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
504
+ extension = filename.split(".")[-1]
505
+ identifier=filename.split(".")[0]
506
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
507
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
508
+ file=file.convert("RGB")
509
+ file=ImageOps.exif_transpose(file)
510
+ width, height = file.size
511
+ if file.size !=(Crop_size, Crop_size):
512
+ image=crop_image(file, Crop_size)
513
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
514
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
515
+ else:
516
+ image[0].save(new_path_with_file, format=extension.upper())
517
+
518
+ else:
519
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
520
+
521
+ else:
522
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
523
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
524
+
525
+ elif IMAGES_FOLDER_OPTIONAL =="":
526
+ up=""
527
+ for file in uploader.value:
528
+ filename = file['name']
529
+ if filename.split(".")[-1]=="txt":
530
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
531
+ f.write(bytes(file['content']).decode())
532
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
533
+ if Crop_images:
534
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
535
+ filename = file['name']
536
+ img = Image.open(io.BytesIO(file['content']))
537
+ extension = filename.split(".")[-1]
538
+ identifier=filename.split(".")[0]
539
+ img=img.convert("RGB")
540
+ img=ImageOps.exif_transpose(img)
541
+
542
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
543
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
544
+ else:
545
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
546
+
547
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
548
+ file = Image.open(new_path_with_file)
549
+ width, height = file.size
550
+ if file.size !=(Crop_size, Crop_size):
551
+ image=crop_image(file, Crop_size)
552
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
553
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
554
+ else:
555
+ image[0].save(new_path_with_file, format=extension.upper())
556
+
557
+ else:
558
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
559
+ filename = file['name']
560
+ img = Image.open(io.BytesIO(file['content']))
561
+ img=img.convert("RGB")
562
+ extension = filename.split(".")[-1]
563
+ identifier=filename.split(".")[0]
564
+
565
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
566
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
567
+ else:
568
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
569
+
570
+ if ren:
571
+ i=0
572
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
573
+ extension = filename.split(".")[-1]
574
+ identifier=filename.split(".")[0]
575
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
576
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
577
+ i=i+1
578
+
579
+ os.chdir(INSTANCE_DIR)
580
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
581
+ os.chdir(CAPTIONS_DIR)
582
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
583
+ os.chdir('/notebooks')
584
+
585
+
586
+
587
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
588
+
589
+ paths=""
590
+ out=""
591
+ widgets_l=""
592
+ clear_output()
593
+ def Caption(path):
594
+ if path!="Select an instance image to caption":
595
+
596
+ name = os.path.splitext(os.path.basename(path))[0]
597
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
598
+ if ext=="jpg" or "JPG":
599
+ ext="JPEG"
600
+
601
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
602
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
603
+ text = f.read()
604
+ else:
605
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
606
+ f.write("")
607
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
608
+ text = f.read()
609
+
610
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
611
+ img=img.convert("RGB")
612
+ img=img.resize((420, 420))
613
+ image_bytes = BytesIO()
614
+ img.save(image_bytes, format=ext, qualiy=10)
615
+ image_bytes.seek(0)
616
+ image_data = image_bytes.read()
617
+ img= image_data
618
+ image = widgets.Image(
619
+ value=img,
620
+ width=420,
621
+ height=420
622
+ )
623
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
624
+
625
+
626
+ def update_text(text):
627
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
628
+ f.write(text)
629
+
630
+ button = widgets.Button(description='Save', button_style='success')
631
+ button.on_click(lambda b: update_text(text_area.value))
632
+
633
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
634
+
635
+
636
+ paths = os.listdir(INSTANCE_DIR)
637
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
638
+
639
+
640
+ out = widgets.Output()
641
+
642
+ def click(change):
643
+ with out:
644
+ out.clear_output()
645
+ display(Caption(change.new))
646
+
647
+ widgets_l.observe(click, names='value')
648
+ display(widgets.HBox([widgets_l, out]))
649
+
650
+
651
+
652
+ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
653
+
654
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
655
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
656
+ if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
657
+ call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
658
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
659
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
660
+
661
+ if resumev2 and not Resume_Training:
662
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model? yes or no ?')
663
+ while True:
664
+ ansres=input('')
665
+ if ansres=='no':
666
+ Resume_Training = True
667
+ resumev2= False
668
+ break
669
+ elif ansres=='yes':
670
+ Resume_Training = False
671
+ resumev2= False
672
+ break
673
+
674
+ while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
675
+ print('No model found, use the "Model Download" cell to download a model.')
676
+ time.sleep(5)
677
+
678
+ MODELT_NAME=MODEL_NAMEv2
679
+
680
+ Seed=random.randint(1, 999999)
681
+
682
+ ofstnse=""
683
+ if Offset_Noise:
684
+ ofstnse="--offset_noise"
685
+
686
+ extrnlcptn=""
687
+ if External_Captions:
688
+ extrnlcptn="--external_captions"
689
+
690
+ precision="fp16"
691
+
692
+
693
+ resuming=""
694
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
695
+ MODELT_NAME=OUTPUT_DIR
696
+ print('Resuming Training...')
697
+ resuming="Yes"
698
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
699
+ print('Previous model not found, training a new model...')
700
+ MODELT_NAME=MODEL_NAMEv2
701
+ while MODEL_NAMEv2=="":
702
+ print('No model found, use the "Model Download" cell to download a model.')
703
+ time.sleep(5)
704
+
705
+
706
+ trnonltxt=""
707
+ if UNet_Training_Steps==0:
708
+ trnonltxt="--train_only_text_encoder"
709
+
710
+ Enable_text_encoder_training= True
711
+ Enable_Text_Encoder_Concept_Training= True
712
+
713
+
714
+ if Text_Encoder_Training_Steps==0:
715
+ Enable_text_encoder_training= False
716
+ else:
717
+ stptxt=Text_Encoder_Training_Steps
718
+
719
+ if Text_Encoder_Concept_Training_Steps==0:
720
+ Enable_Text_Encoder_Concept_Training= False
721
+ else:
722
+ stptxtc=Text_Encoder_Concept_Training_Steps
723
+
724
+
725
+ if Save_Checkpoint_Every==None:
726
+ Save_Checkpoint_Every=1
727
+ stp=0
728
+ if Start_saving_from_the_step==None:
729
+ Start_saving_from_the_step=0
730
+ if (Start_saving_from_the_step < 200):
731
+ Start_saving_from_the_step=Save_Checkpoint_Every
732
+ stpsv=Start_saving_from_the_step
733
+ if Save_Checkpoint_Every_n_Steps:
734
+ stp=Save_Checkpoint_Every
735
+
736
+
737
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
738
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
739
+ '+trnonltxt+' \
740
+ '+extrnlcptn+' \
741
+ '+ofstnse+' \
742
+ --train_text_encoder \
743
+ --image_captions_filename \
744
+ --dump_only_text_encoder \
745
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
746
+ --instance_data_dir='+INSTANCE_DIR+' \
747
+ --output_dir='+OUTPUT_DIR+' \
748
+ --captions_dir='+CAPTIONS_DIR+' \
749
+ --instance_prompt='+PT+' \
750
+ --seed='+str(Seed)+' \
751
+ --resolution='+str(Resolution)+' \
752
+ --mixed_precision='+str(precision)+' \
753
+ --train_batch_size=1 \
754
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
755
+ --use_8bit_adam \
756
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
757
+ --lr_scheduler="linear" \
758
+ --lr_warmup_steps=0 \
759
+ --max_train_steps='+str(Training_Steps), shell=True)
760
+
761
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
762
+ clear_output()
763
+ if resuming=="Yes":
764
+ print('Resuming Training...')
765
+ print('Training the UNet...')
766
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
767
+ '+extrnlcptn+' \
768
+ '+ofstnse+' \
769
+ --image_captions_filename \
770
+ --train_only_unet \
771
+ --Session_dir='+SESSION_DIR+' \
772
+ --save_starting_step='+str(stpsv)+' \
773
+ --save_n_steps='+str(stp)+' \
774
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
775
+ --instance_data_dir='+INSTANCE_DIR+' \
776
+ --output_dir='+OUTPUT_DIR+' \
777
+ --captions_dir='+CAPTIONS_DIR+' \
778
+ --instance_prompt='+PT+' \
779
+ --seed='+str(Seed)+' \
780
+ --resolution='+str(Resolution)+' \
781
+ --mixed_precision='+str(precision)+' \
782
+ --train_batch_size=1 \
783
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
784
+ --use_8bit_adam \
785
+ --learning_rate='+str(UNet_Learning_Rate)+' \
786
+ --lr_scheduler="linear" \
787
+ --lr_warmup_steps=0 \
788
+ --max_train_steps='+str(Training_Steps), shell=True)
789
+
790
+ if Enable_text_encoder_training :
791
+ print('Training the text encoder...')
792
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
793
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
794
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
795
+
796
+ if Enable_Text_Encoder_Concept_Training:
797
+ if os.path.exists(CONCEPT_DIR):
798
+ if os.listdir(CONCEPT_DIR)!=[]:
799
+ clear_output()
800
+ if resuming=="Yes":
801
+ print('Resuming Training...')
802
+ print('Training the text encoder on the concept...')
803
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
804
+ else:
805
+ clear_output()
806
+ if resuming=="Yes":
807
+ print('Resuming Training...')
808
+ print('No concept images found, skipping concept training...')
809
+ Text_Encoder_Concept_Training_Steps=0
810
+ time.sleep(8)
811
+ else:
812
+ clear_output()
813
+ if resuming=="Yes":
814
+ print('Resuming Training...')
815
+ print('No concept images found, skipping concept training...')
816
+ Text_Encoder_Concept_Training_Steps=0
817
+ time.sleep(8)
818
+
819
+ if UNet_Training_Steps!=0:
820
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
821
+
822
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
823
+ print('Nothing to do')
824
+ else:
825
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
826
+
827
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
828
+ clear_output()
829
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
830
+ clear_output()
831
+ print("DONE, the CKPT model is in the session's folder")
832
+ else:
833
+ print("Something went wrong")
834
+
835
+ else:
836
+ print("Something went wrong")
837
+
838
+ return resumev2
839
+
840
+
841
+
842
+
843
+ def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
844
+
845
+
846
+ if Previous_Session_Name!="":
847
+ print("Loading a previous session model")
848
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
849
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
850
+
851
+
852
+ while not os.path.exists(path_to_trained_model):
853
+ print("There is no trained model in the previous session")
854
+ time.sleep(5)
855
+
856
+ elif Custom_Path!="":
857
+ print("Loading model from a custom path")
858
+ path_to_trained_model=Custom_Path
859
+
860
+
861
+ while not os.path.exists(path_to_trained_model):
862
+ print("Wrong Path")
863
+ time.sleep(5)
864
+
865
+ else:
866
+ print("Loading the trained model")
867
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
868
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
869
+
870
+
871
+ while not os.path.exists(path_to_trained_model):
872
+ print("There is no trained model in this session")
873
+ time.sleep(5)
874
+
875
+ auth=f"--gradio-auth {User}:{Password}"
876
+ if User =="" or Password=="":
877
+ auth=""
878
+
879
+ os.chdir('/notebooks')
880
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
881
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
882
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
883
+ call('rm sd_mrep.tar.zst', shell=True)
884
+
885
+ os.chdir('/notebooks/sd')
886
+ if not os.path.exists('stable-diffusion-webui'):
887
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
888
+
889
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
890
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
891
+ print('')
892
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
893
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
894
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
895
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
896
+ os.chdir('/notebooks')
897
+ clear_output()
898
+
899
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
900
+
901
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
902
+
903
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
904
+ if line.strip().startswith('self.server_name ='):
905
+ line = f' self.server_name = "{localurl}"\n'
906
+ if line.strip().startswith('self.protocol = "https"'):
907
+ line = ' self.protocol = "https"\n'
908
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
909
+ line = ''
910
+ if line.strip().startswith('else "http"'):
911
+ line = ''
912
+ sys.stdout.write(line)
913
+
914
+
915
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
916
+
917
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
918
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
919
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
920
+
921
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
922
+ clear_output()
923
+
924
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
925
+
926
+ return configf
927
+
928
+
929
+
930
+
931
+ def clean():
932
+
933
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
934
+
935
+ s = widgets.Select(
936
+ options=Sessions,
937
+ rows=5,
938
+ description='',
939
+ disabled=False
940
+ )
941
+
942
+ out=widgets.Output()
943
+
944
+ d = widgets.Button(
945
+ description='Remove',
946
+ disabled=False,
947
+ button_style='warning',
948
+ tooltip='Removet the selected session',
949
+ icon='warning'
950
+ )
951
+
952
+ def rem(d):
953
+ with out:
954
+ if s.value is not None:
955
+ clear_output()
956
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
957
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
958
+ if os.path.exists('/notebooks/models/'+s.value):
959
+ call('rm -r /notebooks/models/'+s.value, shell=True)
960
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
961
+
962
+
963
+ else:
964
+ d.close()
965
+ s.close()
966
+ clear_output()
967
+ print("NOTHING TO REMOVE")
968
+
969
+ d.on_click(rem)
970
+ if s.value is not None:
971
+ display(s,d,out)
972
+ else:
973
+ print("NOTHING TO REMOVE")
974
+
975
+
976
+
977
+ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
978
+
979
+ from slugify import slugify
980
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
981
+ from huggingface_hub import create_repo
982
+ from IPython.display import display_markdown
983
+
984
+ if(Name_of_your_concept == ""):
985
+ Name_of_your_concept = Session_Name
986
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
987
+
988
+
989
+
990
+ if hf_token_write =="":
991
+ print('Your Hugging Face write access token : ')
992
+ hf_token_write=input()
993
+
994
+ hf_token = hf_token_write
995
+
996
+ api = HfApi()
997
+ your_username = api.whoami(token=hf_token)["name"]
998
+
999
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
1000
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
1001
+
1002
+ def bar(prg):
1003
+ clear_output()
1004
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
1005
+ return br
1006
+
1007
+ print(bar(1))
1008
+
1009
+ readme_text = f'''---
1010
+ license: creativeml-openrail-m
1011
+ tags:
1012
+ - text-to-image
1013
+ - stable-diffusion
1014
+ ---
1015
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
1016
+
1017
+ '''
1018
+ #Save the readme to a file
1019
+ readme_file = open("README.md", "w")
1020
+ readme_file.write(readme_text)
1021
+ readme_file.close()
1022
+
1023
+ operations = [
1024
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
1025
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
1026
+
1027
+ ]
1028
+ create_repo(repo_id,private=True, token=hf_token)
1029
+
1030
+ api.create_commit(
1031
+ repo_id=repo_id,
1032
+ operations=operations,
1033
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1034
+ token=hf_token
1035
+ )
1036
+
1037
+ print(bar(8))
1038
+
1039
+ api.upload_folder(
1040
+ folder_path=OUTPUT_DIR+"/scheduler",
1041
+ path_in_repo="scheduler",
1042
+ repo_id=repo_id,
1043
+ token=hf_token
1044
+ )
1045
+
1046
+ print(bar(9))
1047
+
1048
+ api.upload_folder(
1049
+ folder_path=OUTPUT_DIR+"/text_encoder",
1050
+ path_in_repo="text_encoder",
1051
+ repo_id=repo_id,
1052
+ token=hf_token
1053
+ )
1054
+
1055
+ print(bar(12))
1056
+
1057
+ api.upload_folder(
1058
+ folder_path=OUTPUT_DIR+"/tokenizer",
1059
+ path_in_repo="tokenizer",
1060
+ repo_id=repo_id,
1061
+ token=hf_token
1062
+ )
1063
+
1064
+ print(bar(13))
1065
+
1066
+ api.upload_folder(
1067
+ folder_path=OUTPUT_DIR+"/unet",
1068
+ path_in_repo="unet",
1069
+ repo_id=repo_id,
1070
+ token=hf_token
1071
+ )
1072
+
1073
+ print(bar(21))
1074
+
1075
+ api.upload_folder(
1076
+ folder_path=OUTPUT_DIR+"/vae",
1077
+ path_in_repo="vae",
1078
+ repo_id=repo_id,
1079
+ token=hf_token
1080
+ )
1081
+
1082
+ print(bar(23))
1083
+
1084
+ api.upload_file(
1085
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1086
+ path_in_repo="model_index.json",
1087
+ repo_id=repo_id,
1088
+ token=hf_token
1089
+ )
1090
+
1091
+ print(bar(25))
1092
+
1093
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1094
+ done()
1095
+
1096
+
1097
+
1098
+ def crop_image(im, size):
1099
+
1100
+ GREEN = "#0F0"
1101
+ BLUE = "#00F"
1102
+ RED = "#F00"
1103
+
1104
+ def focal_point(im, settings):
1105
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1106
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1107
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1108
+
1109
+ pois = []
1110
+
1111
+ weight_pref_total = 0
1112
+ if len(corner_points) > 0:
1113
+ weight_pref_total += settings.corner_points_weight
1114
+ if len(entropy_points) > 0:
1115
+ weight_pref_total += settings.entropy_points_weight
1116
+ if len(face_points) > 0:
1117
+ weight_pref_total += settings.face_points_weight
1118
+
1119
+ corner_centroid = None
1120
+ if len(corner_points) > 0:
1121
+ corner_centroid = centroid(corner_points)
1122
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1123
+ pois.append(corner_centroid)
1124
+
1125
+ entropy_centroid = None
1126
+ if len(entropy_points) > 0:
1127
+ entropy_centroid = centroid(entropy_points)
1128
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1129
+ pois.append(entropy_centroid)
1130
+
1131
+ face_centroid = None
1132
+ if len(face_points) > 0:
1133
+ face_centroid = centroid(face_points)
1134
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1135
+ pois.append(face_centroid)
1136
+
1137
+ average_point = poi_average(pois, settings)
1138
+
1139
+ return average_point
1140
+
1141
+
1142
+ def image_face_points(im, settings):
1143
+
1144
+ np_im = np.array(im)
1145
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1146
+
1147
+ tries = [
1148
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1149
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1150
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1151
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1152
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1153
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1154
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1155
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1156
+ ]
1157
+ for t in tries:
1158
+ classifier = cv2.CascadeClassifier(t[0])
1159
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1160
+ try:
1161
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1162
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1163
+ except:
1164
+ continue
1165
+
1166
+ if len(faces) > 0:
1167
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1168
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1169
+ return []
1170
+
1171
+
1172
+ def image_corner_points(im, settings):
1173
+ grayscale = im.convert("L")
1174
+
1175
+
1176
+ gd = ImageDraw.Draw(grayscale)
1177
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1178
+
1179
+ np_im = np.array(grayscale)
1180
+
1181
+ points = cv2.goodFeaturesToTrack(
1182
+ np_im,
1183
+ maxCorners=100,
1184
+ qualityLevel=0.04,
1185
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1186
+ useHarrisDetector=False,
1187
+ )
1188
+
1189
+ if points is None:
1190
+ return []
1191
+
1192
+ focal_points = []
1193
+ for point in points:
1194
+ x, y = point.ravel()
1195
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1196
+
1197
+ return focal_points
1198
+
1199
+
1200
+ def image_entropy_points(im, settings):
1201
+ landscape = im.height < im.width
1202
+ portrait = im.height > im.width
1203
+ if landscape:
1204
+ move_idx = [0, 2]
1205
+ move_max = im.size[0]
1206
+ elif portrait:
1207
+ move_idx = [1, 3]
1208
+ move_max = im.size[1]
1209
+ else:
1210
+ return []
1211
+
1212
+ e_max = 0
1213
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1214
+ crop_best = crop_current
1215
+ while crop_current[move_idx[1]] < move_max:
1216
+ crop = im.crop(tuple(crop_current))
1217
+ e = image_entropy(crop)
1218
+
1219
+ if (e > e_max):
1220
+ e_max = e
1221
+ crop_best = list(crop_current)
1222
+
1223
+ crop_current[move_idx[0]] += 4
1224
+ crop_current[move_idx[1]] += 4
1225
+
1226
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1227
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1228
+
1229
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1230
+
1231
+
1232
+ def image_entropy(im):
1233
+ # greyscale image entropy
1234
+ # band = np.asarray(im.convert("L"))
1235
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1236
+ hist, _ = np.histogram(band, bins=range(0, 256))
1237
+ hist = hist[hist > 0]
1238
+ return -np.log2(hist / hist.sum()).sum()
1239
+
1240
+ def centroid(pois):
1241
+ x = [poi.x for poi in pois]
1242
+ y = [poi.y for poi in pois]
1243
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1244
+
1245
+
1246
+ def poi_average(pois, settings):
1247
+ weight = 0.0
1248
+ x = 0.0
1249
+ y = 0.0
1250
+ for poi in pois:
1251
+ weight += poi.weight
1252
+ x += poi.x * poi.weight
1253
+ y += poi.y * poi.weight
1254
+ avg_x = round(weight and x / weight)
1255
+ avg_y = round(weight and y / weight)
1256
+
1257
+ return PointOfInterest(avg_x, avg_y)
1258
+
1259
+
1260
+ def is_landscape(w, h):
1261
+ return w > h
1262
+
1263
+
1264
+ def is_portrait(w, h):
1265
+ return h > w
1266
+
1267
+
1268
+ def is_square(w, h):
1269
+ return w == h
1270
+
1271
+
1272
+ class PointOfInterest:
1273
+ def __init__(self, x, y, weight=1.0, size=10):
1274
+ self.x = x
1275
+ self.y = y
1276
+ self.weight = weight
1277
+ self.size = size
1278
+
1279
+ def bounding(self, size):
1280
+ return [
1281
+ self.x - size//2,
1282
+ self.y - size//2,
1283
+ self.x + size//2,
1284
+ self.y + size//2
1285
+ ]
1286
+
1287
+ class Settings:
1288
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1289
+ self.crop_width = crop_width
1290
+ self.crop_height = crop_height
1291
+ self.corner_points_weight = corner_points_weight
1292
+ self.entropy_points_weight = entropy_points_weight
1293
+ self.face_points_weight = face_points_weight
1294
+
1295
+ settings = Settings(
1296
+ crop_width = size,
1297
+ crop_height = size,
1298
+ face_points_weight = 0.9,
1299
+ entropy_points_weight = 0.15,
1300
+ corner_points_weight = 0.5,
1301
+ )
1302
+
1303
+ scale_by = 1
1304
+ if is_landscape(im.width, im.height):
1305
+ scale_by = settings.crop_height / im.height
1306
+ elif is_portrait(im.width, im.height):
1307
+ scale_by = settings.crop_width / im.width
1308
+ elif is_square(im.width, im.height):
1309
+ if is_square(settings.crop_width, settings.crop_height):
1310
+ scale_by = settings.crop_width / im.width
1311
+ elif is_landscape(settings.crop_width, settings.crop_height):
1312
+ scale_by = settings.crop_width / im.width
1313
+ elif is_portrait(settings.crop_width, settings.crop_height):
1314
+ scale_by = settings.crop_height / im.height
1315
+
1316
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1317
+ im_debug = im.copy()
1318
+
1319
+ focus = focal_point(im_debug, settings)
1320
+
1321
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1322
+ # point but then get adjusted back into the frame
1323
+ y_half = int(settings.crop_height / 2)
1324
+ x_half = int(settings.crop_width / 2)
1325
+
1326
+ x1 = focus.x - x_half
1327
+ if x1 < 0:
1328
+ x1 = 0
1329
+ elif x1 + settings.crop_width > im.width:
1330
+ x1 = im.width - settings.crop_width
1331
+
1332
+ y1 = focus.y - y_half
1333
+ if y1 < 0:
1334
+ y1 = 0
1335
+ elif y1 + settings.crop_height > im.height:
1336
+ y1 = im.height - settings.crop_height
1337
+
1338
+ x2 = x1 + settings.crop_width
1339
+ y2 = y1 + settings.crop_height
1340
+
1341
+ crop = [x1, y1, x2, y2]
1342
+
1343
+ results = []
1344
+
1345
+ results.append(im.crop(tuple(crop)))
1346
+
1347
+ return results
scripts/mainppsComfy.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from IPython.display import clear_output
3
+ from subprocess import call, getoutput, Popen, run
4
+ import time
5
+ import ipywidgets as widgets
6
+ import requests
7
+ import sys
8
+ import fileinput
9
+ from torch.hub import download_url_to_file
10
+ from urllib.parse import urlparse, parse_qs, unquote
11
+ import re
12
+ import six
13
+
14
+ from urllib.request import urlopen, Request
15
+ import tempfile
16
+ from tqdm import tqdm
17
+
18
+
19
+
20
+
21
+ def Deps(force_reinstall):
22
+
23
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
24
+ ntbk()
25
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
26
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
27
+ os.environ['PYTHONWARNINGS'] = 'ignore'
28
+ print('Modules and notebooks updated, dependencies already installed')
29
+
30
+ else:
31
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
32
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
33
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
34
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
35
+ ntbk()
36
+ if not os.path.exists('/models'):
37
+ call('mkdir /models', shell=True)
38
+ if not os.path.exists('/notebooks/models'):
39
+ call('ln -s /models /notebooks', shell=True)
40
+ if os.path.exists('/deps'):
41
+ call("rm -r /deps", shell=True)
42
+ call('mkdir /deps', shell=True)
43
+ if not os.path.exists('cache'):
44
+ call('mkdir cache', shell=True)
45
+ os.chdir('/deps')
46
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
47
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
48
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
49
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
50
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
51
+ os.chdir('/notebooks')
52
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
53
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
54
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
55
+ os.environ['PYTHONWARNINGS'] = 'ignore'
56
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.9/warnings.py", shell=True)
57
+ if not os.path.exists('/notebooks/diffusers'):
58
+ call('ln -s /diffusers /notebooks', shell=True)
59
+ call("rm -r /deps", shell=True)
60
+ os.chdir('/notebooks')
61
+ clear_output()
62
+
63
+ done()
64
+
65
+
66
+ def depsinst(url, dst):
67
+ file_size = None
68
+ req = Request(url, headers={"User-Agent": "torch.hub"})
69
+ u = urlopen(req)
70
+ meta = u.info()
71
+ if hasattr(meta, 'getheaders'):
72
+ content_length = meta.getheaders("Content-Length")
73
+ else:
74
+ content_length = meta.get_all("Content-Length")
75
+ if content_length is not None and len(content_length) > 0:
76
+ file_size = int(content_length[0])
77
+
78
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
79
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
80
+ with open(dst, "wb") as f:
81
+ while True:
82
+ buffer = u.read(8192)
83
+ if len(buffer) == 0:
84
+ break
85
+ f.write(buffer)
86
+ pbar.update(len(buffer))
87
+ f.close()
88
+
89
+
90
+
91
+ def dwn(url, dst, msg):
92
+ file_size = None
93
+ req = Request(url, headers={"User-Agent": "torch.hub"})
94
+ u = urlopen(req)
95
+ meta = u.info()
96
+ if hasattr(meta, 'getheaders'):
97
+ content_length = meta.getheaders("Content-Length")
98
+ else:
99
+ content_length = meta.get_all("Content-Length")
100
+ if content_length is not None and len(content_length) > 0:
101
+ file_size = int(content_length[0])
102
+
103
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
104
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
105
+ with open(dst, "wb") as f:
106
+ while True:
107
+ buffer = u.read(8192)
108
+ if len(buffer) == 0:
109
+ break
110
+ f.write(buffer)
111
+ pbar.update(len(buffer))
112
+ f.close()
113
+
114
+
115
+
116
+
117
+ def ntbk():
118
+
119
+ os.chdir('/notebooks')
120
+ if not os.path.exists('Latest_Notebooks'):
121
+ call('mkdir Latest_Notebooks', shell=True)
122
+ else:
123
+ call('rm -r Latest_Notebooks', shell=True)
124
+ call('mkdir Latest_Notebooks', shell=True)
125
+ os.chdir('/notebooks/Latest_Notebooks')
126
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
127
+ call('rm Notebooks.txt', shell=True)
128
+ os.chdir('/notebooks')
129
+
130
+
131
+
132
+
133
+ def repo():
134
+
135
+ os.chdir('/notebooks')
136
+
137
+ print('Installing/Updating the repo...')
138
+ os.chdir('/notebooks')
139
+ if not os.path.exists('ComfyUI'):
140
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
141
+
142
+ os.chdir('ComfyUI')
143
+ call('git reset --hard', shell=True)
144
+ print('')
145
+ call('git pull', shell=True)
146
+ os.chdir('/notebooks')
147
+ clear_output()
148
+ done()
149
+
150
+
151
+
152
+ def mdls(Original_Model_Version, Path_to_MODEL, MODEL_LINK, Temporary_Storage=False):
153
+
154
+ import gdown
155
+
156
+ src=getsrc(MODEL_LINK)
157
+
158
+
159
+ call('ln -s /datasets/stable-diffusion-classic/SDv1.5.ckpt /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
160
+ call('ln -s /datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base/v2-1_512-nonema-pruned.safetensors /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
161
+ call('ln -s /datasets/stable-diffusion-v2-1/stable-diffusion-2-1/v2-1_768-nonema-pruned.safetensors /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
162
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
163
+
164
+ if Path_to_MODEL !='':
165
+ if os.path.exists(str(Path_to_MODEL)):
166
+ print('Using the custom model.')
167
+ model=Path_to_MODEL
168
+ else:
169
+ print('Wrong path, check that the path to the model is correct')
170
+
171
+ elif MODEL_LINK !="":
172
+
173
+ if src=='civitai':
174
+ modelname=get_name(MODEL_LINK, False)
175
+ if Temporary_Storage:
176
+ model=f'/models/{modelname}'
177
+ else:
178
+ model=f'/notebooks/ComfyUI/models/checkpoints/{modelname}'
179
+ if not os.path.exists(model):
180
+ dwn(MODEL_LINK, model, 'Downloading the custom model')
181
+ clear_output()
182
+ else:
183
+ print('Model already exists')
184
+ elif src=='gdrive':
185
+ modelname=get_name(MODEL_LINK, True)
186
+ if Temporary_Storage:
187
+ model=f'/models/{modelname}'
188
+ else:
189
+ model=f'/notebooks/ComfyUI/models/checkpoints/{modelname}'
190
+ if not os.path.exists(model):
191
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
192
+ clear_output()
193
+ else:
194
+ print('Model already exists')
195
+ else:
196
+ modelname=os.path.basename(MODEL_LINK)
197
+ if Temporary_Storage:
198
+ model=f'/models/{modelname}'
199
+ else:
200
+ model=f'/notebooks/ComfyUI/models/checkpoints/{modelname}'
201
+ if not os.path.exists(model):
202
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
203
+ clear_output()
204
+ else:
205
+ print('Model already exists')
206
+
207
+ if os.path.exists(model) and os.path.getsize(model) > 1810671599:
208
+ print('Model downloaded, using the custom model.')
209
+ else:
210
+ call('rm '+model, shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
211
+ print('Wrong link, check that the link is valid')
212
+
213
+ else:
214
+ if Original_Model_Version == "v1.5":
215
+ model="/notebooks/ComfyUI/models/checkpoints/SDv1.5.ckpt"
216
+ print('Using the original V1.5 model')
217
+ elif Original_Model_Version == "v2-512":
218
+ model="/notebooks/ComfyUI/models/checkpoints/v2-1_512-nonema-pruned.safetensors"
219
+ print('Using the original V2-512 model')
220
+ elif Original_Model_Version == "v2-768":
221
+ model="/notebooks/ComfyUI/models/checkpoints/v2-1_768-nonema-pruned.safetensors"
222
+ print('Using the original V2-768 model')
223
+ elif Original_Model_Version == "SDXL":
224
+ model="/notebooks/ComfyUI/models/checkpoints/sd_xl_base_1.0.safetensors"
225
+ print('Using the original SDXL model')
226
+ else:
227
+ model="/notebooks/ComfyUI/models/checkpoints"
228
+ print('Wrong model version, try again')
229
+ try:
230
+ model
231
+ except:
232
+ model="/notebooks/ComfyUI/models/checkpoints"
233
+
234
+ return model
235
+
236
+
237
+
238
+ def loradwn(LoRA_LINK):
239
+
240
+ import gdown
241
+
242
+ if LoRA_LINK=='':
243
+ print('Nothing to do')
244
+ else:
245
+ src=getsrc(LoRA_LINK)
246
+
247
+ if src=='civitai':
248
+ modelname=get_name(LoRA_LINK, False)
249
+ loramodel=f'/notebooks/ComfyUI/models/loras/{modelname}'
250
+ if not os.path.exists(loramodel):
251
+ dwn(LoRA_LINK, loramodel, 'Downloading the LoRA model')
252
+ clear_output()
253
+ else:
254
+ print('Model already exists')
255
+ elif src=='gdrive':
256
+ modelname=get_name(LoRA_LINK, True)
257
+ loramodel=f'/notebooks/ComfyUI/models/loras/{modelname}'
258
+ if not os.path.exists(loramodel):
259
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
260
+ clear_output()
261
+ else:
262
+ print('Model already exists')
263
+ else:
264
+ modelname=os.path.basename(LoRA_LINK)
265
+ loramodel=f'/notebooks/ComfyUI/models/loras/{modelname}'
266
+ if not os.path.exists(loramodel):
267
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
268
+ clear_output()
269
+ else:
270
+ print('Model already exists')
271
+
272
+ if os.path.exists(loramodel) :
273
+ print('LoRA downloaded')
274
+ else:
275
+ print('Wrong link, check that the link is valid')
276
+
277
+
278
+
279
+ def CNet(ControlNet_Model, ControlNet_XL_Model):
280
+
281
+ def download(url, model_dir):
282
+
283
+ filename = os.path.basename(urlparse(url).path)
284
+ pth = os.path.abspath(os.path.join(model_dir, filename))
285
+ if not os.path.exists(pth):
286
+ print('Downloading: '+os.path.basename(url))
287
+ download_url_to_file(url, pth, hash_prefix=None, progress=True)
288
+ else:
289
+ print(f"The model {filename} already exists")
290
+
291
+ wrngv1=False
292
+ mdldir="/notebooks/ComfyUI/models/controlnet"
293
+ for filename in os.listdir(mdldir):
294
+ if "_sd14v1" in filename:
295
+ renamed = re.sub("_sd14v1", "-fp16", filename)
296
+ os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
297
+
298
+ call('wget -q -O CN_models.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models.txt', shell=True)
299
+ call('wget -q -O CN_models_XL.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_XL.txt', shell=True)
300
+
301
+ with open("CN_models.txt", 'r') as f:
302
+ mdllnk = f.read().splitlines()
303
+ with open("CN_models_XL.txt", 'r') as d:
304
+ mdllnk_XL = d.read().splitlines()
305
+ call('rm CN_models.txt CN_models_XL.txt', shell=True)
306
+
307
+ os.chdir('/notebooks')
308
+ if ControlNet_Model == "All" or ControlNet_Model == "all" :
309
+ for lnk in mdllnk:
310
+ download(lnk, mdldir)
311
+ clear_output()
312
+
313
+ elif ControlNet_Model == "15":
314
+ mdllnk=list(filter(lambda x: 't2i' in x, mdllnk))
315
+ for lnk in mdllnk:
316
+ download(lnk, mdldir)
317
+ clear_output()
318
+
319
+
320
+ elif ControlNet_Model.isdigit() and int(ControlNet_Model)-1<14 and int(ControlNet_Model)>0:
321
+ download(mdllnk[int(ControlNet_Model)-1], mdldir)
322
+ clear_output()
323
+
324
+ elif ControlNet_Model == "none":
325
+ pass
326
+ clear_output()
327
+
328
+ else:
329
+ print('Wrong ControlNet V1 choice, try again')
330
+ wrngv1=True
331
+
332
+
333
+ if ControlNet_XL_Model == "All" or ControlNet_XL_Model == "all" :
334
+ for lnk_XL in mdllnk_XL:
335
+ download(lnk_XL, mdldir)
336
+ if not wrngv1:
337
+ clear_output()
338
+ done()
339
+
340
+ elif ControlNet_XL_Model.isdigit() and int(ControlNet_XL_Model)-1<5:
341
+ download(mdllnk_XL[int(ControlNet_XL_Model)-1], mdldir)
342
+ if not wrngv1:
343
+ clear_output()
344
+ done()
345
+
346
+ elif ControlNet_XL_Model == "none":
347
+ pass
348
+ if not wrngv1:
349
+ clear_output()
350
+ done()
351
+
352
+ else:
353
+ print('Wrong ControlNet V2 choice, try again')
354
+
355
+
356
+
357
+ def sd():
358
+
359
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
360
+ call("sed -i 's@logging.info(\"To see the GUI go to: {}://{}:{}\".format(scheme, address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
361
+ os.chdir('/notebooks')
362
+
363
+
364
+
365
+
366
+ def getsrc(url):
367
+ parsed_url = urlparse(url)
368
+ if parsed_url.netloc == 'civitai.com':
369
+ src='civitai'
370
+ elif parsed_url.netloc == 'drive.google.com':
371
+ src='gdrive'
372
+ elif parsed_url.netloc == 'huggingface.co':
373
+ src='huggingface'
374
+ else:
375
+ src='others'
376
+ return src
377
+
378
+
379
+
380
+ def get_name(url, gdrive):
381
+
382
+ from gdown.download import get_url_from_gdrive_confirmation
383
+
384
+ if not gdrive:
385
+ response = requests.get(url, allow_redirects=False)
386
+ if "Location" in response.headers:
387
+ redirected_url = response.headers["Location"]
388
+ quer = parse_qs(urlparse(redirected_url).query)
389
+ if "response-content-disposition" in quer:
390
+ disp_val = quer["response-content-disposition"][0].split(";")
391
+ for vals in disp_val:
392
+ if vals.strip().startswith("filename="):
393
+ filenm=unquote(vals.split("=", 1)[1].strip())
394
+ return filenm.replace("\"","")
395
+ else:
396
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
397
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
398
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
399
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
400
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
401
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
402
+ return filenm
403
+
404
+
405
+
406
+
407
+ def done():
408
+ done = widgets.Button(
409
+ description='Done!',
410
+ disabled=True,
411
+ button_style='success',
412
+ tooltip='',
413
+ icon='check'
414
+ )
415
+ display(done)
scripts/mainppsComfy_311.py ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from IPython.display import clear_output
3
+ from subprocess import call, getoutput, Popen, run
4
+ import time
5
+ import ipywidgets as widgets
6
+ import requests
7
+ import sys
8
+ import fileinput
9
+ from torch.hub import download_url_to_file
10
+ from urllib.parse import urlparse, parse_qs, unquote
11
+ import re
12
+ import six
13
+
14
+ from urllib.request import urlopen, Request
15
+ import tempfile
16
+ from tqdm import tqdm
17
+
18
+
19
+
20
+
21
+ def Deps(force_reinstall):
22
+
23
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
24
+ ntbk()
25
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
26
+ os.environ['PYTHONWARNINGS'] = 'ignore'
27
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
28
+ print('Modules and notebooks updated, dependencies already installed')
29
+
30
+ else:
31
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
32
+ call("pip uninstall -qq deepspeed -y", shell=True, stdout=open('/dev/null', 'w'))
33
+ ntbk()
34
+ if not os.path.exists('/models'):
35
+ call('mkdir /models', shell=True)
36
+ if not os.path.exists('/notebooks/models'):
37
+ call('ln -s /models /notebooks', shell=True)
38
+ if os.path.exists('/deps'):
39
+ call("rm -r /deps", shell=True)
40
+ call('mkdir /deps', shell=True)
41
+ if not os.path.exists('cache'):
42
+ call('mkdir cache', shell=True)
43
+ os.chdir('/deps')
44
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps_311.txt', shell=True)
45
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
46
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
47
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
48
+ call("pip install --root-user-action=ignore -q transformers==4.30.2", shell=True, stdout=open('/dev/null', 'w'))
49
+ os.chdir('/notebooks')
50
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
51
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
52
+ os.environ['PYTHONWARNINGS'] = 'ignore'
53
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
54
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
55
+ if not os.path.exists('/notebooks/diffusers'):
56
+ call('ln -s /diffusers /notebooks', shell=True)
57
+ call("rm -r /deps", shell=True)
58
+ os.chdir('/notebooks')
59
+ clear_output()
60
+
61
+ done()
62
+
63
+
64
+
65
+
66
+ def depsinst(url, dst):
67
+ file_size = None
68
+ req = Request(url, headers={"User-Agent": "torch.hub"})
69
+ u = urlopen(req)
70
+ meta = u.info()
71
+ if hasattr(meta, 'getheaders'):
72
+ content_length = meta.getheaders("Content-Length")
73
+ else:
74
+ content_length = meta.get_all("Content-Length")
75
+ if content_length is not None and len(content_length) > 0:
76
+ file_size = int(content_length[0])
77
+
78
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
79
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
80
+ with open(dst, "wb") as f:
81
+ while True:
82
+ buffer = u.read(8192)
83
+ if len(buffer) == 0:
84
+ break
85
+ f.write(buffer)
86
+ pbar.update(len(buffer))
87
+ f.close()
88
+
89
+
90
+
91
+ def dwn(url, dst, msg):
92
+ file_size = None
93
+ req = Request(url, headers={"User-Agent": "torch.hub"})
94
+ u = urlopen(req)
95
+ meta = u.info()
96
+ if hasattr(meta, 'getheaders'):
97
+ content_length = meta.getheaders("Content-Length")
98
+ else:
99
+ content_length = meta.get_all("Content-Length")
100
+ if content_length is not None and len(content_length) > 0:
101
+ file_size = int(content_length[0])
102
+
103
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
104
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
105
+ with open(dst, "wb") as f:
106
+ while True:
107
+ buffer = u.read(8192)
108
+ if len(buffer) == 0:
109
+ break
110
+ f.write(buffer)
111
+ pbar.update(len(buffer))
112
+ f.close()
113
+
114
+
115
+
116
+
117
+ def ntbk():
118
+
119
+ os.chdir('/notebooks')
120
+ if not os.path.exists('Latest_Notebooks'):
121
+ call('mkdir Latest_Notebooks', shell=True)
122
+ else:
123
+ call('rm -r Latest_Notebooks', shell=True)
124
+ call('mkdir Latest_Notebooks', shell=True)
125
+ os.chdir('/notebooks/Latest_Notebooks')
126
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
127
+ call('rm Notebooks.txt', shell=True)
128
+ os.chdir('/notebooks')
129
+
130
+
131
+
132
+
133
+ def repo():
134
+
135
+ os.chdir('/notebooks')
136
+
137
+ print('Installing/Updating the repo...')
138
+ os.chdir('/notebooks')
139
+ if not os.path.exists('ComfyUI'):
140
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
141
+
142
+ os.chdir('ComfyUI')
143
+ call('git reset --hard', shell=True)
144
+ print('')
145
+ call('git pull', shell=True)
146
+ os.chdir('/notebooks')
147
+ clear_output()
148
+ done()
149
+
150
+
151
+
152
+ def mdls(Original_Model_Version, Path_to_MODEL, MODEL_LINK, Temporary_Storage=False):
153
+
154
+ import gdown
155
+
156
+ src=getsrc(MODEL_LINK)
157
+
158
+
159
+ call('ln -s /datasets/stable-diffusion-classic/SDv1.5.ckpt /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
160
+ call('ln -s /datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base/v2-1_512-nonema-pruned.safetensors /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
161
+ call('ln -s /datasets/stable-diffusion-v2-1/stable-diffusion-2-1/v2-1_768-nonema-pruned.safetensors /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
162
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors /notebooks/ComfyUI/models/checkpoints', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
163
+
164
+ if Path_to_MODEL !='':
165
+ if os.path.exists(str(Path_to_MODEL)):
166
+ print('Using the custom model.')
167
+ model=Path_to_MODEL
168
+ else:
169
+ print('Wrong path, check that the path to the model is correct')
170
+
171
+ elif MODEL_LINK !="":
172
+
173
+ if src=='civitai':
174
+ modelname=get_name(MODEL_LINK, False)
175
+ if Temporary_Storage:
176
+ model=f'/models/{modelname}'
177
+ else:
178
+ model=f'/notebooks/ComfyUI/models/checkpoints/{modelname}'
179
+ if not os.path.exists(model):
180
+ dwn(MODEL_LINK, model, 'Downloading the custom model')
181
+ clear_output()
182
+ else:
183
+ print('Model already exists')
184
+ elif src=='gdrive':
185
+ modelname=get_name(MODEL_LINK, True)
186
+ if Temporary_Storage:
187
+ model=f'/models/{modelname}'
188
+ else:
189
+ model=f'/notebooks/ComfyUI/models/checkpoints/{modelname}'
190
+ if not os.path.exists(model):
191
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
192
+ clear_output()
193
+ else:
194
+ print('Model already exists')
195
+ else:
196
+ modelname=os.path.basename(MODEL_LINK)
197
+ if Temporary_Storage:
198
+ model=f'/models/{modelname}'
199
+ else:
200
+ model=f'/notebooks/ComfyUI/models/checkpoints/{modelname}'
201
+ if not os.path.exists(model):
202
+ gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
203
+ clear_output()
204
+ else:
205
+ print('Model already exists')
206
+
207
+ if os.path.exists(model) and os.path.getsize(model) > 1810671599:
208
+ print('Model downloaded, using the custom model.')
209
+ else:
210
+ call('rm '+model, shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
211
+ print('Wrong link, check that the link is valid')
212
+
213
+ else:
214
+ if Original_Model_Version == "v1.5":
215
+ model="/notebooks/ComfyUI/models/checkpoints/SDv1.5.ckpt"
216
+ print('Using the original V1.5 model')
217
+ elif Original_Model_Version == "v2-512":
218
+ model="/notebooks/ComfyUI/models/checkpoints/v2-1_512-nonema-pruned.safetensors"
219
+ print('Using the original V2-512 model')
220
+ elif Original_Model_Version == "v2-768":
221
+ model="/notebooks/ComfyUI/models/checkpoints/v2-1_768-nonema-pruned.safetensors"
222
+ print('Using the original V2-768 model')
223
+ elif Original_Model_Version == "SDXL":
224
+ model="/notebooks/ComfyUI/models/checkpoints/sd_xl_base_1.0.safetensors"
225
+ print('Using the original SDXL model')
226
+ else:
227
+ model="/notebooks/ComfyUI/models/checkpoints"
228
+ print('Wrong model version, try again')
229
+ try:
230
+ model
231
+ except:
232
+ model="/notebooks/ComfyUI/models/checkpoints"
233
+
234
+ return model
235
+
236
+
237
+
238
+ def loradwn(LoRA_LINK):
239
+
240
+ import gdown
241
+
242
+ if LoRA_LINK=='':
243
+ print('Nothing to do')
244
+ else:
245
+ src=getsrc(LoRA_LINK)
246
+
247
+ if src=='civitai':
248
+ modelname=get_name(LoRA_LINK, False)
249
+ loramodel=f'/notebooks/ComfyUI/models/loras/{modelname}'
250
+ if not os.path.exists(loramodel):
251
+ dwn(LoRA_LINK, loramodel, 'Downloading the LoRA model')
252
+ clear_output()
253
+ else:
254
+ print('Model already exists')
255
+ elif src=='gdrive':
256
+ modelname=get_name(LoRA_LINK, True)
257
+ loramodel=f'/notebooks/ComfyUI/models/loras/{modelname}'
258
+ if not os.path.exists(loramodel):
259
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
260
+ clear_output()
261
+ else:
262
+ print('Model already exists')
263
+ else:
264
+ modelname=os.path.basename(LoRA_LINK)
265
+ loramodel=f'/notebooks/ComfyUI/models/loras/{modelname}'
266
+ if not os.path.exists(loramodel):
267
+ gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
268
+ clear_output()
269
+ else:
270
+ print('Model already exists')
271
+
272
+ if os.path.exists(loramodel) :
273
+ print('LoRA downloaded')
274
+ else:
275
+ print('Wrong link, check that the link is valid')
276
+
277
+
278
+
279
+ def CNet(ControlNet_Model, ControlNet_XL_Model):
280
+
281
+ def download(url, model_dir):
282
+
283
+ filename = os.path.basename(urlparse(url).path)
284
+ pth = os.path.abspath(os.path.join(model_dir, filename))
285
+ if not os.path.exists(pth):
286
+ print('Downloading: '+os.path.basename(url))
287
+ download_url_to_file(url, pth, hash_prefix=None, progress=True)
288
+ else:
289
+ print(f"The model {filename} already exists")
290
+
291
+ wrngv1=False
292
+ mdldir="/notebooks/ComfyUI/models/controlnet"
293
+ for filename in os.listdir(mdldir):
294
+ if "_sd14v1" in filename:
295
+ renamed = re.sub("_sd14v1", "-fp16", filename)
296
+ os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
297
+
298
+ call('wget -q -O CN_models.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models.txt', shell=True)
299
+ call('wget -q -O CN_models_XL.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_XL.txt', shell=True)
300
+
301
+ with open("CN_models.txt", 'r') as f:
302
+ mdllnk = f.read().splitlines()
303
+ with open("CN_models_XL.txt", 'r') as d:
304
+ mdllnk_XL = d.read().splitlines()
305
+ call('rm CN_models.txt CN_models_XL.txt', shell=True)
306
+
307
+ os.chdir('/notebooks')
308
+ if ControlNet_Model == "All" or ControlNet_Model == "all" :
309
+ for lnk in mdllnk:
310
+ download(lnk, mdldir)
311
+ clear_output()
312
+
313
+ elif ControlNet_Model == "15":
314
+ mdllnk=list(filter(lambda x: 't2i' in x, mdllnk))
315
+ for lnk in mdllnk:
316
+ download(lnk, mdldir)
317
+ clear_output()
318
+
319
+
320
+ elif ControlNet_Model.isdigit() and int(ControlNet_Model)-1<14 and int(ControlNet_Model)>0:
321
+ download(mdllnk[int(ControlNet_Model)-1], mdldir)
322
+ clear_output()
323
+
324
+ elif ControlNet_Model == "none":
325
+ pass
326
+ clear_output()
327
+
328
+ else:
329
+ print('Wrong ControlNet V1 choice, try again')
330
+ wrngv1=True
331
+
332
+
333
+ if ControlNet_XL_Model == "All" or ControlNet_XL_Model == "all" :
334
+ for lnk_XL in mdllnk_XL:
335
+ download(lnk_XL, mdldir)
336
+ if not wrngv1:
337
+ clear_output()
338
+ done()
339
+
340
+ elif ControlNet_XL_Model.isdigit() and int(ControlNet_XL_Model)-1<5:
341
+ download(mdllnk_XL[int(ControlNet_XL_Model)-1], mdldir)
342
+ if not wrngv1:
343
+ clear_output()
344
+ done()
345
+
346
+ elif ControlNet_XL_Model == "none":
347
+ pass
348
+ if not wrngv1:
349
+ clear_output()
350
+ done()
351
+
352
+ else:
353
+ print('Wrong ControlNet V2 choice, try again')
354
+
355
+
356
+
357
+ def sd():
358
+
359
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
360
+ call("sed -i 's@logging.info(\"To see the GUI go to: {}://{}:{}\".format(scheme, address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
361
+ os.chdir('/notebooks')
362
+
363
+
364
+
365
+
366
+ def getsrc(url):
367
+ parsed_url = urlparse(url)
368
+ if parsed_url.netloc == 'civitai.com':
369
+ src='civitai'
370
+ elif parsed_url.netloc == 'drive.google.com':
371
+ src='gdrive'
372
+ elif parsed_url.netloc == 'huggingface.co':
373
+ src='huggingface'
374
+ else:
375
+ src='others'
376
+ return src
377
+
378
+
379
+
380
+ def get_name(url, gdrive):
381
+
382
+ from gdown.download import get_url_from_gdrive_confirmation
383
+
384
+ if not gdrive:
385
+ response = requests.get(url, allow_redirects=False)
386
+ if "Location" in response.headers:
387
+ redirected_url = response.headers["Location"]
388
+ quer = parse_qs(urlparse(redirected_url).query)
389
+ if "response-content-disposition" in quer:
390
+ disp_val = quer["response-content-disposition"][0].split(";")
391
+ for vals in disp_val:
392
+ if vals.strip().startswith("filename="):
393
+ filenm=unquote(vals.split("=", 1)[1].strip())
394
+ return filenm.replace("\"","")
395
+ else:
396
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
397
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
398
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
399
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
400
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
401
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
402
+ return filenm
403
+
404
+
405
+
406
+
407
+ def done():
408
+ done = widgets.Button(
409
+ description='Done!',
410
+ disabled=True,
411
+ button_style='success',
412
+ tooltip='',
413
+ icon='check'
414
+ )
415
+ display(done)
scripts/sdxllorapps.py ADDED
@@ -0,0 +1,1152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput, Popen
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ import random
13
+ import sys
14
+ from io import BytesIO
15
+ import requests
16
+ from collections import defaultdict
17
+ from math import log, sqrt
18
+ import numpy as np
19
+ import sys
20
+ import fileinput
21
+ import six
22
+ import base64
23
+ import re
24
+ import cv2
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ import urllib.request
28
+ from urllib.request import urlopen, Request
29
+
30
+ import tempfile
31
+ from tqdm import tqdm
32
+
33
+
34
+
35
+
36
+ def Deps(force_reinstall):
37
+
38
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
39
+ ntbk()
40
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
41
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
42
+ os.environ['PYTHONWARNINGS'] = 'ignore'
43
+ print('Modules and notebooks updated, dependencies already installed')
44
+
45
+ else:
46
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
47
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
48
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
49
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
50
+ ntbk()
51
+ if not os.path.exists('/models'):
52
+ call('mkdir /models', shell=True)
53
+ if not os.path.exists('/notebooks/models'):
54
+ call('ln -s /models /notebooks', shell=True)
55
+ if os.path.exists('/deps'):
56
+ call("rm -r /deps", shell=True)
57
+ call('mkdir /deps', shell=True)
58
+ if not os.path.exists('cache'):
59
+ call('mkdir cache', shell=True)
60
+ os.chdir('/deps')
61
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
62
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
63
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
64
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
65
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
66
+ os.chdir('/notebooks')
67
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
68
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
69
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
70
+ os.environ['PYTHONWARNINGS'] = 'ignore'
71
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.9/warnings.py", shell=True)
72
+ if not os.path.exists('/notebooks/diffusers'):
73
+ call('ln -s /diffusers /notebooks', shell=True)
74
+ call("rm -r /deps", shell=True)
75
+ os.chdir('/notebooks')
76
+ clear_output()
77
+
78
+ done()
79
+
80
+
81
+ def depsinst(url, dst):
82
+ file_size = None
83
+ req = Request(url, headers={"User-Agent": "torch.hub"})
84
+ u = urlopen(req)
85
+ meta = u.info()
86
+ if hasattr(meta, 'getheaders'):
87
+ content_length = meta.getheaders("Content-Length")
88
+ else:
89
+ content_length = meta.get_all("Content-Length")
90
+ if content_length is not None and len(content_length) > 0:
91
+ file_size = int(content_length[0])
92
+
93
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
94
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
95
+ with open(dst, "wb") as f:
96
+ while True:
97
+ buffer = u.read(8192)
98
+ if len(buffer) == 0:
99
+ break
100
+ f.write(buffer)
101
+ pbar.update(len(buffer))
102
+ f.close()
103
+
104
+
105
+
106
+ def dwn(url, dst, msg):
107
+ file_size = None
108
+ req = Request(url, headers={"User-Agent": "torch.hub"})
109
+ u = urlopen(req)
110
+ meta = u.info()
111
+ if hasattr(meta, 'getheaders'):
112
+ content_length = meta.getheaders("Content-Length")
113
+ else:
114
+ content_length = meta.get_all("Content-Length")
115
+ if content_length is not None and len(content_length) > 0:
116
+ file_size = int(content_length[0])
117
+
118
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
119
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
120
+ with open(dst, "wb") as f:
121
+ while True:
122
+ buffer = u.read(8192)
123
+ if len(buffer) == 0:
124
+ break
125
+ f.write(buffer)
126
+ pbar.update(len(buffer))
127
+ f.close()
128
+
129
+
130
+
131
+
132
+ def ntbk():
133
+
134
+ os.chdir('/notebooks')
135
+ if not os.path.exists('Latest_Notebooks'):
136
+ call('mkdir Latest_Notebooks', shell=True)
137
+ else:
138
+ call('rm -r Latest_Notebooks', shell=True)
139
+ call('mkdir Latest_Notebooks', shell=True)
140
+ os.chdir('/notebooks/Latest_Notebooks')
141
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
142
+ call('rm Notebooks.txt', shell=True)
143
+ os.chdir('/notebooks')
144
+
145
+
146
+
147
+
148
+ def ntbks():
149
+
150
+ os.chdir('/notebooks')
151
+ if not os.path.exists('Latest_Notebooks'):
152
+ call('mkdir Latest_Notebooks', shell=True)
153
+ else:
154
+ call('rm -r Latest_Notebooks', shell=True)
155
+ call('mkdir Latest_Notebooks', shell=True)
156
+ os.chdir('/notebooks/Latest_Notebooks')
157
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
158
+ call('rm Notebooks.txt', shell=True)
159
+ os.chdir('/notebooks')
160
+
161
+ def done():
162
+ done = widgets.Button(
163
+ description='Done!',
164
+ disabled=True,
165
+ button_style='success',
166
+ tooltip='',
167
+ icon='check'
168
+ )
169
+ display(done)
170
+
171
+
172
+
173
+ def mdlvxl():
174
+
175
+ os.chdir('/notebooks')
176
+
177
+ if os.path.exists('stable-diffusion-XL') and not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
178
+ call('rm -r stable-diffusion-XL', shell=True)
179
+ if not os.path.exists('stable-diffusion-XL'):
180
+ print('Downloading SDXL model...')
181
+ call('mkdir stable-diffusion-XL', shell=True)
182
+ os.chdir('stable-diffusion-XL')
183
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
184
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
185
+ call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
186
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
187
+ call('echo -e "\nscheduler\ntext_encoder\ntext_encoder_2\ntokenizer\ntokenizer_2\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.bin\n!*.onnx*\n!*.xml\n!*.msgpack" > .git/info/sparse-checkout', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
188
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
189
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.safetensors', 'text_encoder/model.safetensors', '1/4')
190
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.safetensors', 'text_encoder_2/model.safetensors', '2/4')
191
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae/diffusion_pytorch_model.safetensors', 'vae/diffusion_pytorch_model.safetensors', '3/4')
192
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.safetensors', 'unet/diffusion_pytorch_model.safetensors', '4/4')
193
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
194
+ os.chdir('/notebooks')
195
+ clear_output()
196
+ while not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
197
+ print('Invalid HF token, make sure you have access to the model')
198
+ time.sleep(8)
199
+ if os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
200
+ print('Using SDXL model')
201
+ else:
202
+ print('Using SDXL model')
203
+
204
+ call("sed -i 's@\"force_upcast.*@@' /notebooks/stable-diffusion-XL/vae/config.json", shell=True)
205
+
206
+
207
+
208
+ def downloadmodel_hfxl(Path_to_HuggingFace):
209
+
210
+ os.chdir('/notebooks')
211
+ if os.path.exists('stable-diffusion-custom'):
212
+ call("rm -r stable-diffusion-custom", shell=True)
213
+ clear_output()
214
+
215
+ if os.path.exists('Fast-Dreambooth/token.txt'):
216
+ with open("Fast-Dreambooth/token.txt") as f:
217
+ token = f.read()
218
+ authe=f'https://USER:{token}@'
219
+ else:
220
+ authe="https://"
221
+
222
+ clear_output()
223
+ call("mkdir stable-diffusion-custom", shell=True)
224
+ os.chdir("stable-diffusion-custom")
225
+ call("git init", shell=True)
226
+ call("git lfs install --system --skip-repo", shell=True)
227
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
228
+ call("git config core.sparsecheckout true", shell=True)
229
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.fp16.bin" > .git/info/sparse-checkout', shell=True)
230
+ call("git pull origin main", shell=True)
231
+ if os.path.exists('unet/diffusion_pytorch_model.safetensors'):
232
+ call("rm -r .git", shell=True)
233
+ os.chdir('/notebooks')
234
+ clear_output()
235
+ done()
236
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.safetensors'):
237
+ print('Check the link you provided')
238
+ os.chdir('/notebooks')
239
+ time.sleep(5)
240
+
241
+
242
+
243
+ def downloadmodel_link_xl(MODEL_LINK):
244
+
245
+ import wget
246
+ import gdown
247
+ from gdown.download import get_url_from_gdrive_confirmation
248
+
249
+ def getsrc(url):
250
+ parsed_url = urlparse(url)
251
+ if parsed_url.netloc == 'civitai.com':
252
+ src='civitai'
253
+ elif parsed_url.netloc == 'drive.google.com':
254
+ src='gdrive'
255
+ elif parsed_url.netloc == 'huggingface.co':
256
+ src='huggingface'
257
+ else:
258
+ src='others'
259
+ return src
260
+
261
+ src=getsrc(MODEL_LINK)
262
+
263
+ def get_name(url, gdrive):
264
+ if not gdrive:
265
+ response = requests.get(url, allow_redirects=False)
266
+ if "Location" in response.headers:
267
+ redirected_url = response.headers["Location"]
268
+ quer = parse_qs(urlparse(redirected_url).query)
269
+ if "response-content-disposition" in quer:
270
+ disp_val = quer["response-content-disposition"][0].split(";")
271
+ for vals in disp_val:
272
+ if vals.strip().startswith("filename="):
273
+ filenm=unquote(vals.split("=", 1)[1].strip())
274
+ return filenm.replace("\"","")
275
+ else:
276
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
277
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
278
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
279
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
280
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
281
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
282
+ return filenm
283
+
284
+ if src=='civitai':
285
+ modelname=get_name(MODEL_LINK, False)
286
+ elif src=='gdrive':
287
+ modelname=get_name(MODEL_LINK, True)
288
+ else:
289
+ modelname=os.path.basename(MODEL_LINK)
290
+
291
+
292
+ os.chdir('/notebooks')
293
+ if src=='huggingface':
294
+ dwn(MODEL_LINK, modelname,'Downloading the Model')
295
+ else:
296
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelname, shell=True)
297
+
298
+ if os.path.exists(modelname):
299
+ if os.path.getsize(modelname) > 1810671599:
300
+
301
+ print('Converting to diffusers...')
302
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelname+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
303
+
304
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
305
+ os.chdir('/notebooks')
306
+ clear_output()
307
+ done()
308
+ else:
309
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
310
+ print('Conversion error')
311
+ os.chdir('/notebooks')
312
+ time.sleep(5)
313
+ else:
314
+ while os.path.getsize(modelname) < 1810671599:
315
+ print('Wrong link, check that the link is valid')
316
+ os.chdir('/notebooks')
317
+ time.sleep(5)
318
+
319
+
320
+
321
+ def downloadmodel_path_xl(MODEL_PATH):
322
+
323
+ import wget
324
+ os.chdir('/notebooks')
325
+ clear_output()
326
+ if os.path.exists(str(MODEL_PATH)):
327
+
328
+ print('Converting to diffusers...')
329
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
330
+
331
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
332
+ clear_output()
333
+ done()
334
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
335
+ print('Conversion error')
336
+ os.chdir('/notebooks')
337
+ time.sleep(5)
338
+ else:
339
+ while not os.path.exists(str(MODEL_PATH)):
340
+ print('Wrong path, use the file explorer to copy the path')
341
+ os.chdir('/notebooks')
342
+ time.sleep(5)
343
+
344
+
345
+
346
+
347
+ def dls_xl(Path_to_HuggingFace, MODEL_PATH, MODEL_LINK):
348
+
349
+ os.chdir('/notebooks')
350
+
351
+ if Path_to_HuggingFace != "":
352
+ downloadmodel_hfxl(Path_to_HuggingFace)
353
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
354
+
355
+ elif MODEL_PATH !="":
356
+
357
+ downloadmodel_path_xl(MODEL_PATH)
358
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
359
+
360
+ elif MODEL_LINK !="":
361
+
362
+ downloadmodel_link_xl(MODEL_LINK)
363
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
364
+
365
+ else:
366
+ mdlvxl()
367
+ MODEL_NAMExl="/notebooks/stable-diffusion-XL"
368
+
369
+ return MODEL_NAMExl
370
+
371
+
372
+
373
+ def sess_xl(Session_Name, MODEL_NAMExl):
374
+ import gdown
375
+ import wget
376
+ os.chdir('/notebooks')
377
+ PT=""
378
+
379
+ while Session_Name=="":
380
+ print('Input the Session Name:')
381
+ Session_Name=input("")
382
+ Session_Name=Session_Name.replace(" ","_")
383
+
384
+ WORKSPACE='/notebooks/Fast-Dreambooth'
385
+
386
+ INSTANCE_NAME=Session_Name
387
+ OUTPUT_DIR="/notebooks/models/"+Session_Name
388
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
389
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
390
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
391
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.safetensors')
392
+
393
+
394
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
395
+ print('Loading session with no previous LoRa model')
396
+ if MODEL_NAMExl=="":
397
+ print('No model found, use the "Model Download" cell to download a model.')
398
+ else:
399
+ print('Session Loaded, proceed')
400
+
401
+ elif not os.path.exists(str(SESSION_DIR)):
402
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
403
+ print('Creating session...')
404
+ if MODEL_NAMExl=="":
405
+ print('No model found, use the "Model Download" cell to download a model.')
406
+ else:
407
+ print('Session created, proceed to uploading instance images')
408
+ if MODEL_NAMExl=="":
409
+ print('No model found, use the "Model Download" cell to download a model.')
410
+
411
+ else:
412
+ print('Session Loaded, proceed')
413
+
414
+
415
+ return WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl
416
+
417
+
418
+
419
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR):
420
+
421
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
422
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
423
+
424
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
425
+ Upload = widgets.Button(
426
+ description='Upload',
427
+ disabled=False,
428
+ button_style='info',
429
+ tooltip='Click to upload the chosen instance images',
430
+ icon=''
431
+ )
432
+
433
+
434
+ def up(Upload):
435
+ with out:
436
+ uploader.close()
437
+ Upload.close()
438
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
439
+ done()
440
+ out=widgets.Output()
441
+
442
+ if IMAGES_FOLDER_OPTIONAL=="":
443
+ Upload.on_click(up)
444
+ display(uploader, Upload, out)
445
+ else:
446
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
447
+ done()
448
+
449
+
450
+
451
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader):
452
+
453
+
454
+ if Remove_existing_instance_images:
455
+ if os.path.exists(str(INSTANCE_DIR)):
456
+ call("rm -r " +INSTANCE_DIR, shell=True)
457
+ if os.path.exists(str(CAPTIONS_DIR)):
458
+ call("rm -r " +CAPTIONS_DIR, shell=True)
459
+
460
+
461
+ if not os.path.exists(str(INSTANCE_DIR)):
462
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
463
+ if not os.path.exists(str(CAPTIONS_DIR)):
464
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
465
+
466
+
467
+ if IMAGES_FOLDER_OPTIONAL !="":
468
+
469
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
470
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
471
+
472
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
473
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
474
+ if Crop_images:
475
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
476
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
477
+ os.chdir('/notebooks')
478
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
479
+ extension = filename.split(".")[-1]
480
+ identifier=filename.split(".")[0]
481
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
482
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
483
+ file=file.convert("RGB")
484
+ file=ImageOps.exif_transpose(file)
485
+ width, height = file.size
486
+ if file.size !=(Crop_size, Crop_size):
487
+ image=crop_image(file, Crop_size)
488
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
489
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
490
+ else:
491
+ image[0].save(new_path_with_file, format=extension.upper())
492
+
493
+ else:
494
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
495
+
496
+ else:
497
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
498
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
499
+
500
+ elif IMAGES_FOLDER_OPTIONAL =="":
501
+ up=""
502
+ for file in uploader.value:
503
+ filename = file['name']
504
+ if filename.split(".")[-1]=="txt":
505
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
506
+ f.write(bytes(file['content']).decode())
507
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
508
+ if Crop_images:
509
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
510
+ filename = file['name']
511
+ img = Image.open(io.BytesIO(file['content']))
512
+ extension = filename.split(".")[-1]
513
+ identifier=filename.split(".")[0]
514
+ img=img.convert("RGB")
515
+ img=ImageOps.exif_transpose(img)
516
+
517
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
518
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
519
+ else:
520
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
521
+
522
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
523
+ file = Image.open(new_path_with_file)
524
+ width, height = file.size
525
+ if file.size !=(Crop_size, Crop_size):
526
+ image=crop_image(file, Crop_size)
527
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
528
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
529
+ else:
530
+ image[0].save(new_path_with_file, format=extension.upper())
531
+
532
+ else:
533
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
534
+ filename = file['name']
535
+ img = Image.open(io.BytesIO(file['content']))
536
+ img=img.convert("RGB")
537
+ extension = filename.split(".")[-1]
538
+ identifier=filename.split(".")[0]
539
+
540
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
541
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
542
+ else:
543
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
544
+
545
+ os.chdir(INSTANCE_DIR)
546
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
547
+ os.chdir(CAPTIONS_DIR)
548
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
549
+ os.chdir('/notebooks')
550
+
551
+ if Resize_to_1024_and_keep_aspect_ratio and not Crop_images:
552
+ resize_keep_aspect(INSTANCE_DIR)
553
+
554
+
555
+
556
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
557
+
558
+ paths=""
559
+ out=""
560
+ widgets_l=""
561
+ clear_output()
562
+ def Caption(path):
563
+ if path!="Select an instance image to caption":
564
+
565
+ name = os.path.splitext(os.path.basename(path))[0]
566
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
567
+ if ext=="jpg" or "JPG":
568
+ ext="JPEG"
569
+
570
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
571
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
572
+ text = f.read()
573
+ else:
574
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
575
+ f.write("")
576
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
577
+ text = f.read()
578
+
579
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
580
+ img=img.convert("RGB")
581
+ img=img.resize((420, 420))
582
+ image_bytes = BytesIO()
583
+ img.save(image_bytes, format=ext, qualiy=10)
584
+ image_bytes.seek(0)
585
+ image_data = image_bytes.read()
586
+ img= image_data
587
+ image = widgets.Image(
588
+ value=img,
589
+ width=420,
590
+ height=420
591
+ )
592
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
593
+
594
+
595
+ def update_text(text):
596
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
597
+ f.write(text)
598
+
599
+ button = widgets.Button(description='Save', button_style='success')
600
+ button.on_click(lambda b: update_text(text_area.value))
601
+
602
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
603
+
604
+
605
+ paths = os.listdir(INSTANCE_DIR)
606
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
607
+
608
+
609
+ out = widgets.Output()
610
+
611
+ def click(change):
612
+ with out:
613
+ out.clear_output()
614
+ display(Caption(change.new))
615
+
616
+ widgets_l.observe(click, names='value')
617
+ display(widgets.HBox([widgets_l, out]))
618
+
619
+
620
+
621
+ def dbtrainxl(Unet_Training_Epochs, Text_Encoder_Training_Epochs, Unet_Learning_Rate, Text_Encoder_Learning_Rate, dim, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, ofstnselvl, Save_VRAM, Intermediary_Save_Epoch):
622
+
623
+
624
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
625
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
626
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
627
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
628
+
629
+
630
+ Seed=random.randint(1, 999999)
631
+
632
+ ofstnse=""
633
+ if Offset_Noise:
634
+ ofstnse="--offset_noise"
635
+
636
+ GC=''
637
+ if Save_VRAM:
638
+ GC='--gradient_checkpointing'
639
+
640
+ extrnlcptn=""
641
+ if External_Captions:
642
+ extrnlcptn="--external_captions"
643
+
644
+ precision="fp16"
645
+
646
+
647
+
648
+ def train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
649
+ print('Training the Text Encoder...')
650
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_TI.py \
651
+ '+ofstnse+' \
652
+ '+extrnlcptn+' \
653
+ --dim='+str(dim)+' \
654
+ --ofstnselvl='+str(ofstnselvl)+' \
655
+ --image_captions_filename \
656
+ --Session_dir='+SESSION_DIR+' \
657
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
658
+ --instance_data_dir='+INSTANCE_DIR+' \
659
+ --output_dir='+OUTPUT_DIR+' \
660
+ --captions_dir='+CAPTIONS_DIR+' \
661
+ --seed='+str(Seed)+' \
662
+ --resolution='+str(Resolution)+' \
663
+ --mixed_precision='+str(precision)+' \
664
+ --train_batch_size=1 \
665
+ --gradient_accumulation_steps=1 '+GC+ ' \
666
+ --use_8bit_adam \
667
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
668
+ --lr_scheduler="cosine" \
669
+ --lr_warmup_steps=0 \
670
+ --num_train_epochs='+str(Training_Epochs), shell=True)
671
+
672
+
673
+
674
+ def train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
675
+ print('Training the UNet...')
676
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_lora.py \
677
+ '+ofstnse+' \
678
+ '+extrnlcptn+' \
679
+ --saves='+Intermediary_Save_Epoch+' \
680
+ --dim='+str(dim)+' \
681
+ --ofstnselvl='+str(ofstnselvl)+' \
682
+ --image_captions_filename \
683
+ --Session_dir='+SESSION_DIR+' \
684
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
685
+ --instance_data_dir='+INSTANCE_DIR+' \
686
+ --output_dir='+OUTPUT_DIR+' \
687
+ --captions_dir='+CAPTIONS_DIR+' \
688
+ --seed='+str(Seed)+' \
689
+ --resolution='+str(Resolution)+' \
690
+ --mixed_precision='+str(precision)+' \
691
+ --train_batch_size=1 \
692
+ --gradient_accumulation_steps=1 '+GC+ ' \
693
+ --use_8bit_adam \
694
+ --learning_rate='+str(Unet_Learning_Rate)+' \
695
+ --lr_scheduler="cosine" \
696
+ --lr_warmup_steps=0 \
697
+ --num_train_epochs='+str(Training_Epochs), shell=True)
698
+
699
+
700
+
701
+ if Unet_Training_Epochs!=0:
702
+ if Text_Encoder_Training_Epochs!=0:
703
+ train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Text_Encoder_Training_Epochs)
704
+ clear_output()
705
+ train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Unet_Training_Epochs)
706
+ else :
707
+ print('Nothing to do')
708
+
709
+
710
+ if os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
711
+ clear_output()
712
+ print("DONE, the LoRa model is in the session's folder")
713
+ else:
714
+ print("Something went wrong")
715
+
716
+
717
+
718
+
719
+ def sdcmf(MDLPTH):
720
+
721
+ from slugify import slugify
722
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
723
+
724
+ os.chdir('/notebooks')
725
+
726
+
727
+ print('Installing/Updating the repo...')
728
+ if not os.path.exists('ComfyUI'):
729
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
730
+
731
+ os.chdir('ComfyUI')
732
+ call('git reset --hard', shell=True)
733
+ print('')
734
+ call('git pull', shell=True)
735
+
736
+ if os.path.exists(MDLPTH):
737
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/loras', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
738
+
739
+ clean_symlinks('models/loras')
740
+
741
+ if not os.path.exists('models/checkpoints/sd_xl_base_1.0.safetensors'):
742
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/checkpoints', shell=True)
743
+
744
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
745
+ call("sed -i 's@logging.info(\"To see the GUI go to: {}://{}:{}\".format(scheme, address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
746
+ os.chdir('/notebooks')
747
+
748
+
749
+ def test(MDLPTH, User, Password):
750
+
751
+
752
+ auth=f"--gradio-auth {User}:{Password}"
753
+ if User =="" or Password=="":
754
+ auth=""
755
+
756
+ os.chdir('/notebooks')
757
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
758
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
759
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
760
+ call('rm sd_mrep.tar.zst', shell=True)
761
+
762
+ os.chdir('/notebooks/sd')
763
+ if not os.path.exists('stable-diffusion-webui'):
764
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
765
+
766
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
767
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
768
+ print('')
769
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
770
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
771
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
772
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
773
+ clear_output()
774
+
775
+
776
+ if not os.path.exists('models/Stable-diffusion/sd_xl_base_1.0.safetensors'):
777
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/Stable-diffusion', shell=True)
778
+
779
+
780
+ if os.path.exists(MDLPTH):
781
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/Lora', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
782
+
783
+ clean_symlinks('models/Lora')
784
+
785
+ call('wget -q -O /notebooks/sd/stable-diffusion-webui/modules/styles.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/styles.py', shell=True)
786
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
787
+
788
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
789
+
790
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
791
+ if line.strip().startswith('self.server_name ='):
792
+ line = f' self.server_name = "{localurl}"\n'
793
+ if line.strip().startswith('self.protocol = "https"'):
794
+ line = ' self.protocol = "https"\n'
795
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
796
+ line = ''
797
+ if line.strip().startswith('else "http"'):
798
+ line = ''
799
+ sys.stdout.write(line)
800
+
801
+
802
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
803
+
804
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
805
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
806
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
807
+
808
+ call("sed -i 's@-> Network | None@@g' /notebooks/sd/stable-diffusion-webui/extensions-builtin/Lora/network.py", shell=True)
809
+ call("sed -i 's@|@or@' /notebooks/sd/stable-diffusion-webui/extensions/adetailer/aaaaaa/helper.py", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
810
+
811
+ call("sed -i 's@\"quicksettings\": OptionInfo(.*@\"quicksettings\": OptionInfo(\"sd_model_checkpoint, sd_vae, CLIP_stop_at_last_layers, inpainting_mask_weight, initial_noise_multiplier\", \"Quicksettings list\"),@' /notebooks/sd/stable-diffusion-webui/modules/shared.py", shell=True)
812
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
813
+ clear_output()
814
+
815
+ configf="--disable-console-progressbars --no-gradio-queue --upcast-sampling --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors "+auth
816
+
817
+ return configf
818
+
819
+
820
+
821
+
822
+ def clean():
823
+
824
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
825
+
826
+ s = widgets.Select(
827
+ options=Sessions,
828
+ rows=5,
829
+ description='',
830
+ disabled=False
831
+ )
832
+
833
+ out=widgets.Output()
834
+
835
+ d = widgets.Button(
836
+ description='Remove',
837
+ disabled=False,
838
+ button_style='warning',
839
+ tooltip='Removet the selected session',
840
+ icon='warning'
841
+ )
842
+
843
+ def rem(d):
844
+ with out:
845
+ if s.value is not None:
846
+ clear_output()
847
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
848
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
849
+ if os.path.exists('/notebooks/models/'+s.value):
850
+ call('rm -r /notebooks/models/'+s.value, shell=True)
851
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
852
+
853
+
854
+ else:
855
+ d.close()
856
+ s.close()
857
+ clear_output()
858
+ print("NOTHING TO REMOVE")
859
+
860
+ d.on_click(rem)
861
+ if s.value is not None:
862
+ display(s,d,out)
863
+ else:
864
+ print("NOTHING TO REMOVE")
865
+
866
+
867
+
868
+ def crop_image(im, size):
869
+
870
+ import cv2
871
+
872
+ GREEN = "#0F0"
873
+ BLUE = "#00F"
874
+ RED = "#F00"
875
+
876
+ def focal_point(im, settings):
877
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
878
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
879
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
880
+
881
+ pois = []
882
+
883
+ weight_pref_total = 0
884
+ if len(corner_points) > 0:
885
+ weight_pref_total += settings.corner_points_weight
886
+ if len(entropy_points) > 0:
887
+ weight_pref_total += settings.entropy_points_weight
888
+ if len(face_points) > 0:
889
+ weight_pref_total += settings.face_points_weight
890
+
891
+ corner_centroid = None
892
+ if len(corner_points) > 0:
893
+ corner_centroid = centroid(corner_points)
894
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
895
+ pois.append(corner_centroid)
896
+
897
+ entropy_centroid = None
898
+ if len(entropy_points) > 0:
899
+ entropy_centroid = centroid(entropy_points)
900
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
901
+ pois.append(entropy_centroid)
902
+
903
+ face_centroid = None
904
+ if len(face_points) > 0:
905
+ face_centroid = centroid(face_points)
906
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
907
+ pois.append(face_centroid)
908
+
909
+ average_point = poi_average(pois, settings)
910
+
911
+ return average_point
912
+
913
+
914
+ def image_face_points(im, settings):
915
+
916
+ np_im = np.array(im)
917
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
918
+
919
+ tries = [
920
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
921
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
922
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
923
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
924
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
925
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
926
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
927
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
928
+ ]
929
+ for t in tries:
930
+ classifier = cv2.CascadeClassifier(t[0])
931
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
932
+ try:
933
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
934
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
935
+ except:
936
+ continue
937
+
938
+ if len(faces) > 0:
939
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
940
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
941
+ return []
942
+
943
+
944
+ def image_corner_points(im, settings):
945
+ grayscale = im.convert("L")
946
+
947
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
948
+ gd = ImageDraw.Draw(grayscale)
949
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
950
+
951
+ np_im = np.array(grayscale)
952
+
953
+ points = cv2.goodFeaturesToTrack(
954
+ np_im,
955
+ maxCorners=100,
956
+ qualityLevel=0.04,
957
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
958
+ useHarrisDetector=False,
959
+ )
960
+
961
+ if points is None:
962
+ return []
963
+
964
+ focal_points = []
965
+ for point in points:
966
+ x, y = point.ravel()
967
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
968
+
969
+ return focal_points
970
+
971
+
972
+ def image_entropy_points(im, settings):
973
+ landscape = im.height < im.width
974
+ portrait = im.height > im.width
975
+ if landscape:
976
+ move_idx = [0, 2]
977
+ move_max = im.size[0]
978
+ elif portrait:
979
+ move_idx = [1, 3]
980
+ move_max = im.size[1]
981
+ else:
982
+ return []
983
+
984
+ e_max = 0
985
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
986
+ crop_best = crop_current
987
+ while crop_current[move_idx[1]] < move_max:
988
+ crop = im.crop(tuple(crop_current))
989
+ e = image_entropy(crop)
990
+
991
+ if (e > e_max):
992
+ e_max = e
993
+ crop_best = list(crop_current)
994
+
995
+ crop_current[move_idx[0]] += 4
996
+ crop_current[move_idx[1]] += 4
997
+
998
+ x_mid = int(crop_best[0] + settings.crop_width/2)
999
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1000
+
1001
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1002
+
1003
+
1004
+ def image_entropy(im):
1005
+ # greyscale image entropy
1006
+ # band = np.asarray(im.convert("L"))
1007
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1008
+ hist, _ = np.histogram(band, bins=range(0, 256))
1009
+ hist = hist[hist > 0]
1010
+ return -np.log2(hist / hist.sum()).sum()
1011
+
1012
+ def centroid(pois):
1013
+ x = [poi.x for poi in pois]
1014
+ y = [poi.y for poi in pois]
1015
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1016
+
1017
+
1018
+ def poi_average(pois, settings):
1019
+ weight = 0.0
1020
+ x = 0.0
1021
+ y = 0.0
1022
+ for poi in pois:
1023
+ weight += poi.weight
1024
+ x += poi.x * poi.weight
1025
+ y += poi.y * poi.weight
1026
+ avg_x = round(weight and x / weight)
1027
+ avg_y = round(weight and y / weight)
1028
+
1029
+ return PointOfInterest(avg_x, avg_y)
1030
+
1031
+
1032
+ def is_landscape(w, h):
1033
+ return w > h
1034
+
1035
+
1036
+ def is_portrait(w, h):
1037
+ return h > w
1038
+
1039
+
1040
+ def is_square(w, h):
1041
+ return w == h
1042
+
1043
+
1044
+ class PointOfInterest:
1045
+ def __init__(self, x, y, weight=1.0, size=10):
1046
+ self.x = x
1047
+ self.y = y
1048
+ self.weight = weight
1049
+ self.size = size
1050
+
1051
+ def bounding(self, size):
1052
+ return [
1053
+ self.x - size//2,
1054
+ self.y - size//2,
1055
+ self.x + size//2,
1056
+ self.y + size//2
1057
+ ]
1058
+
1059
+ class Settings:
1060
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1061
+ self.crop_width = crop_width
1062
+ self.crop_height = crop_height
1063
+ self.corner_points_weight = corner_points_weight
1064
+ self.entropy_points_weight = entropy_points_weight
1065
+ self.face_points_weight = face_points_weight
1066
+
1067
+ settings = Settings(
1068
+ crop_width = size,
1069
+ crop_height = size,
1070
+ face_points_weight = 0.9,
1071
+ entropy_points_weight = 0.15,
1072
+ corner_points_weight = 0.5,
1073
+ )
1074
+
1075
+ scale_by = 1
1076
+ if is_landscape(im.width, im.height):
1077
+ scale_by = settings.crop_height / im.height
1078
+ elif is_portrait(im.width, im.height):
1079
+ scale_by = settings.crop_width / im.width
1080
+ elif is_square(im.width, im.height):
1081
+ if is_square(settings.crop_width, settings.crop_height):
1082
+ scale_by = settings.crop_width / im.width
1083
+ elif is_landscape(settings.crop_width, settings.crop_height):
1084
+ scale_by = settings.crop_width / im.width
1085
+ elif is_portrait(settings.crop_width, settings.crop_height):
1086
+ scale_by = settings.crop_height / im.height
1087
+
1088
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1089
+ im_debug = im.copy()
1090
+
1091
+ focus = focal_point(im_debug, settings)
1092
+
1093
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1094
+ # point but then get adjusted back into the frame
1095
+ y_half = int(settings.crop_height / 2)
1096
+ x_half = int(settings.crop_width / 2)
1097
+
1098
+ x1 = focus.x - x_half
1099
+ if x1 < 0:
1100
+ x1 = 0
1101
+ elif x1 + settings.crop_width > im.width:
1102
+ x1 = im.width - settings.crop_width
1103
+
1104
+ y1 = focus.y - y_half
1105
+ if y1 < 0:
1106
+ y1 = 0
1107
+ elif y1 + settings.crop_height > im.height:
1108
+ y1 = im.height - settings.crop_height
1109
+
1110
+ x2 = x1 + settings.crop_width
1111
+ y2 = y1 + settings.crop_height
1112
+
1113
+ crop = [x1, y1, x2, y2]
1114
+
1115
+ results = []
1116
+
1117
+ results.append(im.crop(tuple(crop)))
1118
+
1119
+ return results
1120
+
1121
+
1122
+
1123
+ def resize_keep_aspect(DIR):
1124
+
1125
+ min_dimension=1024
1126
+
1127
+ for filename in os.listdir(DIR):
1128
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
1129
+ image = cv2.imread(os.path.join(DIR, filename))
1130
+
1131
+ org_height, org_width = image.shape[0], image.shape[1]
1132
+
1133
+ if org_width < org_height:
1134
+ new_width = min_dimension
1135
+ new_height = int(org_height * (min_dimension / org_width))
1136
+ else:
1137
+ new_height = min_dimension
1138
+ new_width = int(org_width * (min_dimension / org_height))
1139
+
1140
+ resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LANCZOS4)
1141
+
1142
+ cv2.imwrite(os.path.join(DIR, filename), resized_image, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
1143
+
1144
+
1145
+
1146
+ def clean_symlinks(path):
1147
+ for item in os.listdir(path):
1148
+ lnk = os.path.join(path, item)
1149
+ if os.path.islink(lnk) and not os.path.exists(os.readlink(lnk)):
1150
+ os.remove(lnk)
1151
+
1152
+
scripts/sdxllorapps_311.py ADDED
@@ -0,0 +1,1147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput, Popen
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ import random
13
+ import sys
14
+ from io import BytesIO
15
+ import requests
16
+ from collections import defaultdict
17
+ from math import log, sqrt
18
+ import numpy as np
19
+ import sys
20
+ import fileinput
21
+ import six
22
+ import base64
23
+ import re
24
+ import cv2
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ import urllib.request
28
+ from urllib.request import urlopen, Request
29
+
30
+ import tempfile
31
+ from tqdm import tqdm
32
+
33
+
34
+
35
+
36
+ def Deps(force_reinstall):
37
+
38
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
39
+ ntbk()
40
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
41
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
42
+ os.environ['PYTHONWARNINGS'] = 'ignore'
43
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
44
+ print('Modules and notebooks updated, dependencies already installed')
45
+
46
+ else:
47
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
48
+ call("pip uninstall -qq deepspeed -y", shell=True, stdout=open('/dev/null', 'w'))
49
+ ntbk()
50
+ if not os.path.exists('/models'):
51
+ call('mkdir /models', shell=True)
52
+ if not os.path.exists('/notebooks/models'):
53
+ call('ln -s /models /notebooks', shell=True)
54
+ if os.path.exists('/deps'):
55
+ call("rm -r /deps", shell=True)
56
+ call('mkdir /deps', shell=True)
57
+ if not os.path.exists('cache'):
58
+ call('mkdir cache', shell=True)
59
+ os.chdir('/deps')
60
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps_311.txt', shell=True)
61
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
62
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
63
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
64
+ call("pip install --root-user-action=ignore -q transformers==4.30.2", shell=True, stdout=open('/dev/null', 'w'))
65
+ os.chdir('/notebooks')
66
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
67
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
68
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
69
+ os.environ['PYTHONWARNINGS'] = 'ignore'
70
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
71
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
72
+ if not os.path.exists('/notebooks/diffusers'):
73
+ call('ln -s /diffusers /notebooks', shell=True)
74
+ call("rm -r /deps", shell=True)
75
+ os.chdir('/notebooks')
76
+ clear_output()
77
+
78
+ done()
79
+
80
+
81
+
82
+
83
+ def depsinst(url, dst):
84
+ file_size = None
85
+ req = Request(url, headers={"User-Agent": "torch.hub"})
86
+ u = urlopen(req)
87
+ meta = u.info()
88
+ if hasattr(meta, 'getheaders'):
89
+ content_length = meta.getheaders("Content-Length")
90
+ else:
91
+ content_length = meta.get_all("Content-Length")
92
+ if content_length is not None and len(content_length) > 0:
93
+ file_size = int(content_length[0])
94
+
95
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
96
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
97
+ with open(dst, "wb") as f:
98
+ while True:
99
+ buffer = u.read(8192)
100
+ if len(buffer) == 0:
101
+ break
102
+ f.write(buffer)
103
+ pbar.update(len(buffer))
104
+ f.close()
105
+
106
+
107
+
108
+ def dwn(url, dst, msg):
109
+ file_size = None
110
+ req = Request(url, headers={"User-Agent": "torch.hub"})
111
+ u = urlopen(req)
112
+ meta = u.info()
113
+ if hasattr(meta, 'getheaders'):
114
+ content_length = meta.getheaders("Content-Length")
115
+ else:
116
+ content_length = meta.get_all("Content-Length")
117
+ if content_length is not None and len(content_length) > 0:
118
+ file_size = int(content_length[0])
119
+
120
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
121
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
122
+ with open(dst, "wb") as f:
123
+ while True:
124
+ buffer = u.read(8192)
125
+ if len(buffer) == 0:
126
+ break
127
+ f.write(buffer)
128
+ pbar.update(len(buffer))
129
+ f.close()
130
+
131
+
132
+
133
+
134
+ def ntbk():
135
+
136
+ os.chdir('/notebooks')
137
+ if not os.path.exists('Latest_Notebooks'):
138
+ call('mkdir Latest_Notebooks', shell=True)
139
+ else:
140
+ call('rm -r Latest_Notebooks', shell=True)
141
+ call('mkdir Latest_Notebooks', shell=True)
142
+ os.chdir('/notebooks/Latest_Notebooks')
143
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
144
+ call('rm Notebooks.txt', shell=True)
145
+ os.chdir('/notebooks')
146
+
147
+
148
+
149
+
150
+ def ntbks():
151
+
152
+ os.chdir('/notebooks')
153
+ if not os.path.exists('Latest_Notebooks'):
154
+ call('mkdir Latest_Notebooks', shell=True)
155
+ else:
156
+ call('rm -r Latest_Notebooks', shell=True)
157
+ call('mkdir Latest_Notebooks', shell=True)
158
+ os.chdir('/notebooks/Latest_Notebooks')
159
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
160
+ call('rm Notebooks.txt', shell=True)
161
+ os.chdir('/notebooks')
162
+
163
+ def done():
164
+ done = widgets.Button(
165
+ description='Done!',
166
+ disabled=True,
167
+ button_style='success',
168
+ tooltip='',
169
+ icon='check'
170
+ )
171
+ display(done)
172
+
173
+
174
+
175
+ def mdlvxl():
176
+
177
+ os.chdir('/notebooks')
178
+
179
+ if os.path.exists('stable-diffusion-XL') and not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
180
+ call('rm -r stable-diffusion-XL', shell=True)
181
+ if not os.path.exists('stable-diffusion-XL'):
182
+ print('Downloading SDXL model...')
183
+ call('mkdir stable-diffusion-XL', shell=True)
184
+ os.chdir('stable-diffusion-XL')
185
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
186
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
187
+ call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
188
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
189
+ call('echo -e "\nscheduler\ntext_encoder\ntext_encoder_2\ntokenizer\ntokenizer_2\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.bin\n!*.onnx*\n!*.xml\n!*.msgpack" > .git/info/sparse-checkout', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
190
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
191
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.safetensors', 'text_encoder/model.safetensors', '1/4')
192
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.safetensors', 'text_encoder_2/model.safetensors', '2/4')
193
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae/diffusion_pytorch_model.safetensors', 'vae/diffusion_pytorch_model.safetensors', '3/4')
194
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.safetensors', 'unet/diffusion_pytorch_model.safetensors', '4/4')
195
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
196
+ os.chdir('/notebooks')
197
+ clear_output()
198
+ while not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
199
+ print('Invalid HF token, make sure you have access to the model')
200
+ time.sleep(8)
201
+ if os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
202
+ print('Using SDXL model')
203
+ else:
204
+ print('Using SDXL model')
205
+
206
+ call("sed -i 's@\"force_upcast.*@@' /notebooks/stable-diffusion-XL/vae/config.json", shell=True)
207
+
208
+
209
+
210
+ def downloadmodel_hfxl(Path_to_HuggingFace):
211
+
212
+ os.chdir('/notebooks')
213
+ if os.path.exists('stable-diffusion-custom'):
214
+ call("rm -r stable-diffusion-custom", shell=True)
215
+ clear_output()
216
+
217
+ if os.path.exists('Fast-Dreambooth/token.txt'):
218
+ with open("Fast-Dreambooth/token.txt") as f:
219
+ token = f.read()
220
+ authe=f'https://USER:{token}@'
221
+ else:
222
+ authe="https://"
223
+
224
+ clear_output()
225
+ call("mkdir stable-diffusion-custom", shell=True)
226
+ os.chdir("stable-diffusion-custom")
227
+ call("git init", shell=True)
228
+ call("git lfs install --system --skip-repo", shell=True)
229
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
230
+ call("git config core.sparsecheckout true", shell=True)
231
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.fp16.bin" > .git/info/sparse-checkout', shell=True)
232
+ call("git pull origin main", shell=True)
233
+ if os.path.exists('unet/diffusion_pytorch_model.safetensors'):
234
+ call("rm -r .git", shell=True)
235
+ os.chdir('/notebooks')
236
+ clear_output()
237
+ done()
238
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.safetensors'):
239
+ print('Check the link you provided')
240
+ os.chdir('/notebooks')
241
+ time.sleep(5)
242
+
243
+
244
+
245
+ def downloadmodel_link_xl(MODEL_LINK):
246
+
247
+ import wget
248
+ import gdown
249
+ from gdown.download import get_url_from_gdrive_confirmation
250
+
251
+ def getsrc(url):
252
+ parsed_url = urlparse(url)
253
+ if parsed_url.netloc == 'civitai.com':
254
+ src='civitai'
255
+ elif parsed_url.netloc == 'drive.google.com':
256
+ src='gdrive'
257
+ elif parsed_url.netloc == 'huggingface.co':
258
+ src='huggingface'
259
+ else:
260
+ src='others'
261
+ return src
262
+
263
+ src=getsrc(MODEL_LINK)
264
+
265
+ def get_name(url, gdrive):
266
+ if not gdrive:
267
+ response = requests.get(url, allow_redirects=False)
268
+ if "Location" in response.headers:
269
+ redirected_url = response.headers["Location"]
270
+ quer = parse_qs(urlparse(redirected_url).query)
271
+ if "response-content-disposition" in quer:
272
+ disp_val = quer["response-content-disposition"][0].split(";")
273
+ for vals in disp_val:
274
+ if vals.strip().startswith("filename="):
275
+ filenm=unquote(vals.split("=", 1)[1].strip())
276
+ return filenm.replace("\"","")
277
+ else:
278
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
279
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
280
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
281
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
282
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
283
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
284
+ return filenm
285
+
286
+ if src=='civitai':
287
+ modelname=get_name(MODEL_LINK, False)
288
+ elif src=='gdrive':
289
+ modelname=get_name(MODEL_LINK, True)
290
+ else:
291
+ modelname=os.path.basename(MODEL_LINK)
292
+
293
+
294
+ os.chdir('/notebooks')
295
+ if src=='huggingface':
296
+ dwn(MODEL_LINK, modelname,'Downloading the Model')
297
+ else:
298
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelname, shell=True)
299
+
300
+ if os.path.exists(modelname):
301
+ if os.path.getsize(modelname) > 1810671599:
302
+
303
+ print('Converting to diffusers...')
304
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelname+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
305
+
306
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
307
+ os.chdir('/notebooks')
308
+ clear_output()
309
+ done()
310
+ else:
311
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
312
+ print('Conversion error')
313
+ os.chdir('/notebooks')
314
+ time.sleep(5)
315
+ else:
316
+ while os.path.getsize(modelname) < 1810671599:
317
+ print('Wrong link, check that the link is valid')
318
+ os.chdir('/notebooks')
319
+ time.sleep(5)
320
+
321
+
322
+
323
+ def downloadmodel_path_xl(MODEL_PATH):
324
+
325
+ import wget
326
+ os.chdir('/notebooks')
327
+ clear_output()
328
+ if os.path.exists(str(MODEL_PATH)):
329
+
330
+ print('Converting to diffusers...')
331
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
332
+
333
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
334
+ clear_output()
335
+ done()
336
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
337
+ print('Conversion error')
338
+ os.chdir('/notebooks')
339
+ time.sleep(5)
340
+ else:
341
+ while not os.path.exists(str(MODEL_PATH)):
342
+ print('Wrong path, use the file explorer to copy the path')
343
+ os.chdir('/notebooks')
344
+ time.sleep(5)
345
+
346
+
347
+
348
+
349
+ def dls_xl(Path_to_HuggingFace, MODEL_PATH, MODEL_LINK):
350
+
351
+ os.chdir('/notebooks')
352
+
353
+ if Path_to_HuggingFace != "":
354
+ downloadmodel_hfxl(Path_to_HuggingFace)
355
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
356
+
357
+ elif MODEL_PATH !="":
358
+
359
+ downloadmodel_path_xl(MODEL_PATH)
360
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
361
+
362
+ elif MODEL_LINK !="":
363
+
364
+ downloadmodel_link_xl(MODEL_LINK)
365
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
366
+
367
+ else:
368
+ mdlvxl()
369
+ MODEL_NAMExl="/notebooks/stable-diffusion-XL"
370
+
371
+ return MODEL_NAMExl
372
+
373
+
374
+
375
+ def sess_xl(Session_Name, MODEL_NAMExl):
376
+ import gdown
377
+ import wget
378
+ os.chdir('/notebooks')
379
+ PT=""
380
+
381
+ while Session_Name=="":
382
+ print('Input the Session Name:')
383
+ Session_Name=input("")
384
+ Session_Name=Session_Name.replace(" ","_")
385
+
386
+ WORKSPACE='/notebooks/Fast-Dreambooth'
387
+
388
+ INSTANCE_NAME=Session_Name
389
+ OUTPUT_DIR="/notebooks/models/"+Session_Name
390
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
391
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
392
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
393
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.safetensors')
394
+
395
+
396
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
397
+ print('Loading session with no previous LoRa model')
398
+ if MODEL_NAMExl=="":
399
+ print('No model found, use the "Model Download" cell to download a model.')
400
+ else:
401
+ print('Session Loaded, proceed')
402
+
403
+ elif not os.path.exists(str(SESSION_DIR)):
404
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
405
+ print('Creating session...')
406
+ if MODEL_NAMExl=="":
407
+ print('No model found, use the "Model Download" cell to download a model.')
408
+ else:
409
+ print('Session created, proceed to uploading instance images')
410
+ if MODEL_NAMExl=="":
411
+ print('No model found, use the "Model Download" cell to download a model.')
412
+
413
+ else:
414
+ print('Session Loaded, proceed')
415
+
416
+
417
+ return WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl
418
+
419
+
420
+
421
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR):
422
+
423
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
424
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
425
+
426
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
427
+ Upload = widgets.Button(
428
+ description='Upload',
429
+ disabled=False,
430
+ button_style='info',
431
+ tooltip='Click to upload the chosen instance images',
432
+ icon=''
433
+ )
434
+
435
+
436
+ def up(Upload):
437
+ with out:
438
+ uploader.close()
439
+ Upload.close()
440
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
441
+ done()
442
+ out=widgets.Output()
443
+
444
+ if IMAGES_FOLDER_OPTIONAL=="":
445
+ Upload.on_click(up)
446
+ display(uploader, Upload, out)
447
+ else:
448
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
449
+ done()
450
+
451
+
452
+
453
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader):
454
+
455
+
456
+ if Remove_existing_instance_images:
457
+ if os.path.exists(str(INSTANCE_DIR)):
458
+ call("rm -r " +INSTANCE_DIR, shell=True)
459
+ if os.path.exists(str(CAPTIONS_DIR)):
460
+ call("rm -r " +CAPTIONS_DIR, shell=True)
461
+
462
+
463
+ if not os.path.exists(str(INSTANCE_DIR)):
464
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
465
+ if not os.path.exists(str(CAPTIONS_DIR)):
466
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
467
+
468
+
469
+ if IMAGES_FOLDER_OPTIONAL !="":
470
+
471
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
472
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
473
+
474
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
475
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
476
+ if Crop_images:
477
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
478
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
479
+ os.chdir('/notebooks')
480
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
481
+ extension = filename.split(".")[-1]
482
+ identifier=filename.split(".")[0]
483
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
484
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
485
+ file=file.convert("RGB")
486
+ file=ImageOps.exif_transpose(file)
487
+ width, height = file.size
488
+ if file.size !=(Crop_size, Crop_size):
489
+ image=crop_image(file, Crop_size)
490
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
491
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
492
+ else:
493
+ image[0].save(new_path_with_file, format=extension.upper())
494
+
495
+ else:
496
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
497
+
498
+ else:
499
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
500
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
501
+
502
+ elif IMAGES_FOLDER_OPTIONAL =="":
503
+ up=""
504
+ for file in uploader.value:
505
+ filename = file['name']
506
+ if filename.split(".")[-1]=="txt":
507
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
508
+ f.write(bytes(file['content']).decode())
509
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
510
+ if Crop_images:
511
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
512
+ filename = file['name']
513
+ img = Image.open(io.BytesIO(file['content']))
514
+ extension = filename.split(".")[-1]
515
+ identifier=filename.split(".")[0]
516
+ img=img.convert("RGB")
517
+ img=ImageOps.exif_transpose(img)
518
+
519
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
520
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
521
+ else:
522
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
523
+
524
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
525
+ file = Image.open(new_path_with_file)
526
+ width, height = file.size
527
+ if file.size !=(Crop_size, Crop_size):
528
+ image=crop_image(file, Crop_size)
529
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
530
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
531
+ else:
532
+ image[0].save(new_path_with_file, format=extension.upper())
533
+
534
+ else:
535
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
536
+ filename = file['name']
537
+ img = Image.open(io.BytesIO(file['content']))
538
+ img=img.convert("RGB")
539
+ extension = filename.split(".")[-1]
540
+ identifier=filename.split(".")[0]
541
+
542
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
543
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
544
+ else:
545
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
546
+
547
+ os.chdir(INSTANCE_DIR)
548
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
549
+ os.chdir(CAPTIONS_DIR)
550
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
551
+ os.chdir('/notebooks')
552
+
553
+ if Resize_to_1024_and_keep_aspect_ratio and not Crop_images:
554
+ resize_keep_aspect(INSTANCE_DIR)
555
+
556
+
557
+
558
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
559
+
560
+ paths=""
561
+ out=""
562
+ widgets_l=""
563
+ clear_output()
564
+ def Caption(path):
565
+ if path!="Select an instance image to caption":
566
+
567
+ name = os.path.splitext(os.path.basename(path))[0]
568
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
569
+ if ext=="jpg" or "JPG":
570
+ ext="JPEG"
571
+
572
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
573
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
574
+ text = f.read()
575
+ else:
576
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
577
+ f.write("")
578
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
579
+ text = f.read()
580
+
581
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
582
+ img=img.convert("RGB")
583
+ img=img.resize((420, 420))
584
+ image_bytes = BytesIO()
585
+ img.save(image_bytes, format=ext, qualiy=10)
586
+ image_bytes.seek(0)
587
+ image_data = image_bytes.read()
588
+ img= image_data
589
+ image = widgets.Image(
590
+ value=img,
591
+ width=420,
592
+ height=420
593
+ )
594
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
595
+
596
+
597
+ def update_text(text):
598
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
599
+ f.write(text)
600
+
601
+ button = widgets.Button(description='Save', button_style='success')
602
+ button.on_click(lambda b: update_text(text_area.value))
603
+
604
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
605
+
606
+
607
+ paths = os.listdir(INSTANCE_DIR)
608
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
609
+
610
+
611
+ out = widgets.Output()
612
+
613
+ def click(change):
614
+ with out:
615
+ out.clear_output()
616
+ display(Caption(change.new))
617
+
618
+ widgets_l.observe(click, names='value')
619
+ display(widgets.HBox([widgets_l, out]))
620
+
621
+
622
+
623
+ def dbtrainxl(Unet_Training_Epochs, Text_Encoder_Training_Epochs, Unet_Learning_Rate, Text_Encoder_Learning_Rate, dim, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, ofstnselvl, Save_VRAM, Intermediary_Save_Epoch):
624
+
625
+
626
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
627
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
628
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
629
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
630
+
631
+
632
+ Seed=random.randint(1, 999999)
633
+
634
+ ofstnse=""
635
+ if Offset_Noise:
636
+ ofstnse="--offset_noise"
637
+
638
+ GC=''
639
+ if Save_VRAM:
640
+ GC='--gradient_checkpointing'
641
+
642
+ extrnlcptn=""
643
+ if External_Captions:
644
+ extrnlcptn="--external_captions"
645
+
646
+ precision="fp16"
647
+
648
+
649
+
650
+ def train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
651
+ print('Training the Text Encoder...')
652
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_TI.py \
653
+ '+ofstnse+' \
654
+ '+extrnlcptn+' \
655
+ --dim='+str(dim)+' \
656
+ --ofstnselvl='+str(ofstnselvl)+' \
657
+ --image_captions_filename \
658
+ --Session_dir='+SESSION_DIR+' \
659
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
660
+ --instance_data_dir='+INSTANCE_DIR+' \
661
+ --output_dir='+OUTPUT_DIR+' \
662
+ --captions_dir='+CAPTIONS_DIR+' \
663
+ --seed='+str(Seed)+' \
664
+ --resolution='+str(Resolution)+' \
665
+ --mixed_precision='+str(precision)+' \
666
+ --train_batch_size=1 \
667
+ --gradient_accumulation_steps=1 '+GC+ ' \
668
+ --use_8bit_adam \
669
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
670
+ --lr_scheduler="cosine" \
671
+ --lr_warmup_steps=0 \
672
+ --num_train_epochs='+str(Training_Epochs), shell=True)
673
+
674
+
675
+
676
+ def train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
677
+ print('Training the UNet...')
678
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_lora.py \
679
+ '+ofstnse+' \
680
+ '+extrnlcptn+' \
681
+ --saves='+Intermediary_Save_Epoch+' \
682
+ --dim='+str(dim)+' \
683
+ --ofstnselvl='+str(ofstnselvl)+' \
684
+ --image_captions_filename \
685
+ --Session_dir='+SESSION_DIR+' \
686
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
687
+ --instance_data_dir='+INSTANCE_DIR+' \
688
+ --output_dir='+OUTPUT_DIR+' \
689
+ --captions_dir='+CAPTIONS_DIR+' \
690
+ --seed='+str(Seed)+' \
691
+ --resolution='+str(Resolution)+' \
692
+ --mixed_precision='+str(precision)+' \
693
+ --train_batch_size=1 \
694
+ --gradient_accumulation_steps=1 '+GC+ ' \
695
+ --use_8bit_adam \
696
+ --learning_rate='+str(Unet_Learning_Rate)+' \
697
+ --lr_scheduler="cosine" \
698
+ --lr_warmup_steps=0 \
699
+ --num_train_epochs='+str(Training_Epochs), shell=True)
700
+
701
+
702
+
703
+ if Unet_Training_Epochs!=0:
704
+ if Text_Encoder_Training_Epochs!=0:
705
+ train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Text_Encoder_Training_Epochs)
706
+ clear_output()
707
+ train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Unet_Training_Epochs)
708
+ else :
709
+ print('Nothing to do')
710
+
711
+
712
+ if os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
713
+ clear_output()
714
+ print("DONE, the LoRa model is in the session's folder")
715
+ else:
716
+ print("Something went wrong")
717
+
718
+
719
+
720
+
721
+ def sdcmf(MDLPTH):
722
+
723
+ from slugify import slugify
724
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
725
+
726
+ os.chdir('/notebooks')
727
+
728
+
729
+ print('Installing/Updating the repo...')
730
+ if not os.path.exists('ComfyUI'):
731
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
732
+
733
+ os.chdir('ComfyUI')
734
+ call('git reset --hard', shell=True)
735
+ print('')
736
+ call('git pull', shell=True)
737
+
738
+ if os.path.exists(MDLPTH):
739
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/loras', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
740
+
741
+ clean_symlinks('models/loras')
742
+
743
+ if not os.path.exists('models/checkpoints/sd_xl_base_1.0.safetensors'):
744
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/checkpoints', shell=True)
745
+
746
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
747
+ call("sed -i 's@logging.info(\"To see the GUI go to: {}://{}:{}\".format(scheme, address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
748
+ os.chdir('/notebooks')
749
+
750
+
751
+ def test(MDLPTH, User, Password):
752
+
753
+
754
+ auth=f"--gradio-auth {User}:{Password}"
755
+ if User =="" or Password=="":
756
+ auth=""
757
+
758
+ os.chdir('/notebooks')
759
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
760
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
761
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
762
+ call('rm sd_mrep.tar.zst', shell=True)
763
+
764
+ os.chdir('/notebooks/sd')
765
+ if not os.path.exists('stable-diffusion-webui'):
766
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
767
+
768
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
769
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
770
+ print('')
771
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
772
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
773
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
774
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
775
+ clear_output()
776
+
777
+
778
+ if not os.path.exists('models/Stable-diffusion/sd_xl_base_1.0.safetensors'):
779
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/Stable-diffusion', shell=True)
780
+
781
+
782
+ if os.path.exists(MDLPTH):
783
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/Lora', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
784
+
785
+ clean_symlinks('models/Lora')
786
+
787
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
788
+
789
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
790
+
791
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
792
+ if line.strip().startswith('self.server_name ='):
793
+ line = f' self.server_name = "{localurl}"\n'
794
+ if line.strip().startswith('self.protocol = "https"'):
795
+ line = ' self.protocol = "https"\n'
796
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
797
+ line = ''
798
+ if line.strip().startswith('else "http"'):
799
+ line = ''
800
+ sys.stdout.write(line)
801
+
802
+
803
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
804
+
805
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
806
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
807
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
808
+
809
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
810
+ clear_output()
811
+
812
+ configf="--disable-console-progressbars --no-gradio-queue --upcast-sampling --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors "+auth
813
+
814
+ return configf
815
+
816
+
817
+
818
+
819
+ def clean():
820
+
821
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
822
+
823
+ s = widgets.Select(
824
+ options=Sessions,
825
+ rows=5,
826
+ description='',
827
+ disabled=False
828
+ )
829
+
830
+ out=widgets.Output()
831
+
832
+ d = widgets.Button(
833
+ description='Remove',
834
+ disabled=False,
835
+ button_style='warning',
836
+ tooltip='Removet the selected session',
837
+ icon='warning'
838
+ )
839
+
840
+ def rem(d):
841
+ with out:
842
+ if s.value is not None:
843
+ clear_output()
844
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
845
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
846
+ if os.path.exists('/notebooks/models/'+s.value):
847
+ call('rm -r /notebooks/models/'+s.value, shell=True)
848
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
849
+
850
+
851
+ else:
852
+ d.close()
853
+ s.close()
854
+ clear_output()
855
+ print("NOTHING TO REMOVE")
856
+
857
+ d.on_click(rem)
858
+ if s.value is not None:
859
+ display(s,d,out)
860
+ else:
861
+ print("NOTHING TO REMOVE")
862
+
863
+
864
+
865
+ def crop_image(im, size):
866
+
867
+ import cv2
868
+
869
+ GREEN = "#0F0"
870
+ BLUE = "#00F"
871
+ RED = "#F00"
872
+
873
+ def focal_point(im, settings):
874
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
875
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
876
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
877
+
878
+ pois = []
879
+
880
+ weight_pref_total = 0
881
+ if len(corner_points) > 0:
882
+ weight_pref_total += settings.corner_points_weight
883
+ if len(entropy_points) > 0:
884
+ weight_pref_total += settings.entropy_points_weight
885
+ if len(face_points) > 0:
886
+ weight_pref_total += settings.face_points_weight
887
+
888
+ corner_centroid = None
889
+ if len(corner_points) > 0:
890
+ corner_centroid = centroid(corner_points)
891
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
892
+ pois.append(corner_centroid)
893
+
894
+ entropy_centroid = None
895
+ if len(entropy_points) > 0:
896
+ entropy_centroid = centroid(entropy_points)
897
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
898
+ pois.append(entropy_centroid)
899
+
900
+ face_centroid = None
901
+ if len(face_points) > 0:
902
+ face_centroid = centroid(face_points)
903
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
904
+ pois.append(face_centroid)
905
+
906
+ average_point = poi_average(pois, settings)
907
+
908
+ return average_point
909
+
910
+
911
+ def image_face_points(im, settings):
912
+
913
+ np_im = np.array(im)
914
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
915
+
916
+ tries = [
917
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
918
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
919
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
920
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
921
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
922
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
923
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
924
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
925
+ ]
926
+ for t in tries:
927
+ classifier = cv2.CascadeClassifier(t[0])
928
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
929
+ try:
930
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
931
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
932
+ except:
933
+ continue
934
+
935
+ if len(faces) > 0:
936
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
937
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
938
+ return []
939
+
940
+
941
+ def image_corner_points(im, settings):
942
+ grayscale = im.convert("L")
943
+
944
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
945
+ gd = ImageDraw.Draw(grayscale)
946
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
947
+
948
+ np_im = np.array(grayscale)
949
+
950
+ points = cv2.goodFeaturesToTrack(
951
+ np_im,
952
+ maxCorners=100,
953
+ qualityLevel=0.04,
954
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
955
+ useHarrisDetector=False,
956
+ )
957
+
958
+ if points is None:
959
+ return []
960
+
961
+ focal_points = []
962
+ for point in points:
963
+ x, y = point.ravel()
964
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
965
+
966
+ return focal_points
967
+
968
+
969
+ def image_entropy_points(im, settings):
970
+ landscape = im.height < im.width
971
+ portrait = im.height > im.width
972
+ if landscape:
973
+ move_idx = [0, 2]
974
+ move_max = im.size[0]
975
+ elif portrait:
976
+ move_idx = [1, 3]
977
+ move_max = im.size[1]
978
+ else:
979
+ return []
980
+
981
+ e_max = 0
982
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
983
+ crop_best = crop_current
984
+ while crop_current[move_idx[1]] < move_max:
985
+ crop = im.crop(tuple(crop_current))
986
+ e = image_entropy(crop)
987
+
988
+ if (e > e_max):
989
+ e_max = e
990
+ crop_best = list(crop_current)
991
+
992
+ crop_current[move_idx[0]] += 4
993
+ crop_current[move_idx[1]] += 4
994
+
995
+ x_mid = int(crop_best[0] + settings.crop_width/2)
996
+ y_mid = int(crop_best[1] + settings.crop_height/2)
997
+
998
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
999
+
1000
+
1001
+ def image_entropy(im):
1002
+ # greyscale image entropy
1003
+ # band = np.asarray(im.convert("L"))
1004
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1005
+ hist, _ = np.histogram(band, bins=range(0, 256))
1006
+ hist = hist[hist > 0]
1007
+ return -np.log2(hist / hist.sum()).sum()
1008
+
1009
+ def centroid(pois):
1010
+ x = [poi.x for poi in pois]
1011
+ y = [poi.y for poi in pois]
1012
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1013
+
1014
+
1015
+ def poi_average(pois, settings):
1016
+ weight = 0.0
1017
+ x = 0.0
1018
+ y = 0.0
1019
+ for poi in pois:
1020
+ weight += poi.weight
1021
+ x += poi.x * poi.weight
1022
+ y += poi.y * poi.weight
1023
+ avg_x = round(weight and x / weight)
1024
+ avg_y = round(weight and y / weight)
1025
+
1026
+ return PointOfInterest(avg_x, avg_y)
1027
+
1028
+
1029
+ def is_landscape(w, h):
1030
+ return w > h
1031
+
1032
+
1033
+ def is_portrait(w, h):
1034
+ return h > w
1035
+
1036
+
1037
+ def is_square(w, h):
1038
+ return w == h
1039
+
1040
+
1041
+ class PointOfInterest:
1042
+ def __init__(self, x, y, weight=1.0, size=10):
1043
+ self.x = x
1044
+ self.y = y
1045
+ self.weight = weight
1046
+ self.size = size
1047
+
1048
+ def bounding(self, size):
1049
+ return [
1050
+ self.x - size//2,
1051
+ self.y - size//2,
1052
+ self.x + size//2,
1053
+ self.y + size//2
1054
+ ]
1055
+
1056
+ class Settings:
1057
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1058
+ self.crop_width = crop_width
1059
+ self.crop_height = crop_height
1060
+ self.corner_points_weight = corner_points_weight
1061
+ self.entropy_points_weight = entropy_points_weight
1062
+ self.face_points_weight = face_points_weight
1063
+
1064
+ settings = Settings(
1065
+ crop_width = size,
1066
+ crop_height = size,
1067
+ face_points_weight = 0.9,
1068
+ entropy_points_weight = 0.15,
1069
+ corner_points_weight = 0.5,
1070
+ )
1071
+
1072
+ scale_by = 1
1073
+ if is_landscape(im.width, im.height):
1074
+ scale_by = settings.crop_height / im.height
1075
+ elif is_portrait(im.width, im.height):
1076
+ scale_by = settings.crop_width / im.width
1077
+ elif is_square(im.width, im.height):
1078
+ if is_square(settings.crop_width, settings.crop_height):
1079
+ scale_by = settings.crop_width / im.width
1080
+ elif is_landscape(settings.crop_width, settings.crop_height):
1081
+ scale_by = settings.crop_width / im.width
1082
+ elif is_portrait(settings.crop_width, settings.crop_height):
1083
+ scale_by = settings.crop_height / im.height
1084
+
1085
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1086
+ im_debug = im.copy()
1087
+
1088
+ focus = focal_point(im_debug, settings)
1089
+
1090
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1091
+ # point but then get adjusted back into the frame
1092
+ y_half = int(settings.crop_height / 2)
1093
+ x_half = int(settings.crop_width / 2)
1094
+
1095
+ x1 = focus.x - x_half
1096
+ if x1 < 0:
1097
+ x1 = 0
1098
+ elif x1 + settings.crop_width > im.width:
1099
+ x1 = im.width - settings.crop_width
1100
+
1101
+ y1 = focus.y - y_half
1102
+ if y1 < 0:
1103
+ y1 = 0
1104
+ elif y1 + settings.crop_height > im.height:
1105
+ y1 = im.height - settings.crop_height
1106
+
1107
+ x2 = x1 + settings.crop_width
1108
+ y2 = y1 + settings.crop_height
1109
+
1110
+ crop = [x1, y1, x2, y2]
1111
+
1112
+ results = []
1113
+
1114
+ results.append(im.crop(tuple(crop)))
1115
+
1116
+ return results
1117
+
1118
+
1119
+
1120
+ def resize_keep_aspect(DIR):
1121
+
1122
+ min_dimension=1024
1123
+
1124
+ for filename in os.listdir(DIR):
1125
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
1126
+ image = cv2.imread(os.path.join(DIR, filename))
1127
+
1128
+ org_height, org_width = image.shape[0], image.shape[1]
1129
+
1130
+ if org_width < org_height:
1131
+ new_width = min_dimension
1132
+ new_height = int(org_height * (min_dimension / org_width))
1133
+ else:
1134
+ new_height = min_dimension
1135
+ new_width = int(org_width * (min_dimension / org_height))
1136
+
1137
+ resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LANCZOS4)
1138
+
1139
+ cv2.imwrite(os.path.join(DIR, filename), resized_image, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
1140
+
1141
+
1142
+
1143
+ def clean_symlinks(path):
1144
+ for item in os.listdir(path):
1145
+ lnk = os.path.join(path, item)
1146
+ if os.path.islink(lnk) and not os.path.exists(os.readlink(lnk)):
1147
+ os.remove(lnk)