Bobby commited on
Commit
74087f7
·
2 Parent(s): cb5005b cb3c878

Merge remote-tracking branch 'origin/main'

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -0
  2. app.py +450 -450
  3. controlnet_aux/canny/__pycache__/__init__.cpython-310.pyc +0 -0
  4. controlnet_aux/dwpose/__pycache__/__init__.cpython-310.pyc +0 -0
  5. controlnet_aux/dwpose/__pycache__/util.cpython-310.pyc +0 -0
  6. controlnet_aux/dwpose/__pycache__/wholebody.cpython-310.pyc +0 -0
  7. controlnet_aux/hed/__pycache__/__init__.cpython-310.pyc +0 -0
  8. controlnet_aux/leres/__pycache__/__init__.cpython-310.pyc +0 -0
  9. controlnet_aux/leres/leres/__pycache__/Resnet.cpython-310.pyc +0 -0
  10. controlnet_aux/leres/leres/__pycache__/Resnext_torch.cpython-310.pyc +0 -0
  11. controlnet_aux/leres/leres/__pycache__/__init__.cpython-310.pyc +0 -0
  12. controlnet_aux/leres/leres/__pycache__/depthmap.cpython-310.pyc +0 -0
  13. controlnet_aux/leres/leres/__pycache__/multi_depth_model_woauxi.cpython-310.pyc +0 -0
  14. controlnet_aux/leres/leres/__pycache__/net_tools.cpython-310.pyc +0 -0
  15. controlnet_aux/leres/leres/__pycache__/network_auxi.cpython-310.pyc +0 -0
  16. controlnet_aux/leres/pix2pix/__pycache__/__init__.cpython-310.pyc +0 -0
  17. controlnet_aux/leres/pix2pix/models/__pycache__/__init__.cpython-310.pyc +0 -0
  18. controlnet_aux/leres/pix2pix/models/__pycache__/base_model.cpython-310.pyc +0 -0
  19. controlnet_aux/leres/pix2pix/models/__pycache__/base_model_hg.cpython-310.pyc +0 -0
  20. controlnet_aux/leres/pix2pix/models/__pycache__/networks.cpython-310.pyc +0 -0
  21. controlnet_aux/leres/pix2pix/models/__pycache__/pix2pix4depth_model.cpython-310.pyc +0 -0
  22. controlnet_aux/leres/pix2pix/options/__pycache__/__init__.cpython-310.pyc +0 -0
  23. controlnet_aux/leres/pix2pix/options/__pycache__/base_options.cpython-310.pyc +0 -0
  24. controlnet_aux/leres/pix2pix/options/__pycache__/test_options.cpython-310.pyc +0 -0
  25. controlnet_aux/leres/pix2pix/util/__pycache__/__init__.cpython-310.pyc +0 -0
  26. controlnet_aux/leres/pix2pix/util/__pycache__/util.cpython-310.pyc +0 -0
  27. controlnet_aux/lineart/__pycache__/__init__.cpython-310.pyc +0 -0
  28. controlnet_aux/lineart_anime/__pycache__/__init__.cpython-310.pyc +0 -0
  29. controlnet_aux/mediapipe_face/__pycache__/__init__.cpython-310.pyc +0 -0
  30. controlnet_aux/mediapipe_face/__pycache__/mediapipe_face_common.cpython-310.pyc +0 -0
  31. controlnet_aux/midas/__pycache__/__init__.cpython-310.pyc +0 -0
  32. controlnet_aux/midas/__pycache__/api.cpython-310.pyc +0 -0
  33. controlnet_aux/midas/__pycache__/utils.cpython-310.pyc +0 -0
  34. controlnet_aux/midas/midas/__pycache__/__init__.cpython-310.pyc +0 -0
  35. controlnet_aux/midas/midas/__pycache__/base_model.cpython-310.pyc +0 -0
  36. controlnet_aux/midas/midas/__pycache__/blocks.cpython-310.pyc +0 -0
  37. controlnet_aux/midas/midas/__pycache__/dpt_depth.cpython-310.pyc +0 -0
  38. controlnet_aux/midas/midas/__pycache__/midas_net.cpython-310.pyc +0 -0
  39. controlnet_aux/midas/midas/__pycache__/midas_net_custom.cpython-310.pyc +0 -0
  40. controlnet_aux/midas/midas/__pycache__/transforms.cpython-310.pyc +0 -0
  41. controlnet_aux/midas/midas/__pycache__/vit.cpython-310.pyc +0 -0
  42. controlnet_aux/mlsd/__pycache__/__init__.cpython-310.pyc +0 -0
  43. controlnet_aux/mlsd/__pycache__/utils.cpython-310.pyc +0 -0
  44. controlnet_aux/mlsd/models/__pycache__/__init__.cpython-310.pyc +0 -0
  45. controlnet_aux/mlsd/models/__pycache__/mbv2_mlsd_large.cpython-310.pyc +0 -0
  46. controlnet_aux/mlsd/models/__pycache__/mbv2_mlsd_tiny.cpython-310.pyc +0 -0
  47. controlnet_aux/normalbae/__pycache__/__init__.cpython-310.pyc +0 -0
  48. controlnet_aux/normalbae/nets/__pycache__/NNET.cpython-310.pyc +0 -0
  49. controlnet_aux/normalbae/nets/__pycache__/__init__.cpython-310.pyc +0 -0
  50. controlnet_aux/normalbae/nets/__pycache__/baseline.cpython-310.pyc +0 -0
.gitignore CHANGED
@@ -1,4 +1,5 @@
1
  venv/*
 
2
  __pycache__/*
3
  anime_app_local.py
4
  *__/pycache__/*
 
1
  venv/*
2
+ venv2/*
3
  __pycache__/*
4
  anime_app_local.py
5
  *__/pycache__/*
app.py CHANGED
@@ -1,451 +1,451 @@
1
- prod = False
2
- port = 8080
3
- show_options = False
4
- if prod:
5
- port = 8081
6
- # show_options = False
7
-
8
- import os
9
- import gc
10
- import random
11
- import time
12
- import gradio as gr
13
- import numpy as np
14
- # import imageio
15
- import torch
16
- from PIL import Image
17
- from diffusers import (
18
- ControlNetModel,
19
- DPMSolverMultistepScheduler,
20
- StableDiffusionControlNetPipeline,
21
- AutoencoderKL,
22
- )
23
- from diffusers.models.attention_processor import AttnProcessor2_0
24
- from preprocess import Preprocessor
25
- MAX_SEED = np.iinfo(np.int32).max
26
- API_KEY = os.environ.get("API_KEY", None)
27
-
28
- print("CUDA version:", torch.version.cuda)
29
- print("loading pipe")
30
- compiled = False
31
- # api = HfApi()
32
-
33
- import spaces
34
-
35
- preprocessor = Preprocessor()
36
- preprocessor.load("NormalBae")
37
-
38
- if gr.NO_RELOAD:
39
- torch.cuda.max_memory_allocated(device="cuda")
40
-
41
- # Controlnet Normal
42
- model_id = "lllyasviel/control_v11p_sd15_normalbae"
43
- print("initializing controlnet")
44
- controlnet = ControlNetModel.from_pretrained(
45
- model_id,
46
- torch_dtype=torch.float16,
47
- attn_implementation="flash_attention_2",
48
- ).to("cuda")
49
-
50
- # Scheduler
51
- scheduler = DPMSolverMultistepScheduler.from_pretrained(
52
- "runwayml/stable-diffusion-v1-5",
53
- solver_order=2,
54
- subfolder="scheduler",
55
- use_karras_sigmas=True,
56
- final_sigmas_type="sigma_min",
57
- algorithm_type="sde-dpmsolver++",
58
- prediction_type="epsilon",
59
- thresholding=False,
60
- denoise_final=True,
61
- device_map="cuda",
62
- torch_dtype=torch.float16,
63
- )
64
-
65
- # Stable Diffusion Pipeline URL
66
- # base_model_url = "https://huggingface.co/broyang/hentaidigitalart_v20/blob/main/realcartoon3d_v15.safetensors"
67
- base_model_url = "https://huggingface.co/Lykon/AbsoluteReality/blob/main/AbsoluteReality_1.8.1_pruned.safetensors"
68
- vae_url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
69
-
70
- vae = AutoencoderKL.from_single_file(vae_url, torch_dtype=torch.float16).to("cuda")
71
- vae.to(memory_format=torch.channels_last)
72
-
73
- pipe = StableDiffusionControlNetPipeline.from_single_file(
74
- base_model_url,
75
- # safety_checker=None,
76
- # load_safety_checker=True,
77
- controlnet=controlnet,
78
- scheduler=scheduler,
79
- vae=vae,
80
- torch_dtype=torch.float16,
81
- )
82
-
83
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="EasyNegativeV2.safetensors", token="EasyNegativeV2",)
84
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="badhandv4.pt", token="badhandv4")
85
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="fcNeg-neg.pt", token="fcNeg-neg")
86
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Ahegao.pt", token="HDA_Ahegao")
87
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Bondage.pt", token="HDA_Bondage")
88
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_pet_play.pt", token="HDA_pet_play")
89
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_unconventional maid.pt", token="HDA_unconventional_maid")
90
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NakedHoodie.pt", token="HDA_NakedHoodie")
91
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NunDress.pt", token="HDA_NunDress")
92
- pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
93
- pipe.to("cuda")
94
-
95
- # experimental speedup?
96
- # pipe.compile()
97
- # torch.cuda.empty_cache()
98
- # gc.collect()
99
- print("---------------Loaded controlnet pipeline---------------")
100
-
101
- @spaces.GPU(duration=12)
102
- def init(pipe):
103
- pipe.enable_xformers_memory_efficient_attention()
104
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
105
- pipe.unet.set_attn_processor(AttnProcessor2_0())
106
- print("Model Compiled!")
107
- init(pipe)
108
-
109
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
110
- if randomize_seed:
111
- seed = random.randint(0, MAX_SEED)
112
- return seed
113
-
114
- def get_additional_prompt():
115
- prompt = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
116
- top = ["tank top", "blouse", "button up shirt", "sweater", "corset top"]
117
- bottom = ["short skirt", "athletic shorts", "jean shorts", "pleated skirt", "short skirt", "leggings", "high-waisted shorts"]
118
- accessory = ["knee-high boots", "gloves", "Thigh-high stockings", "Garter belt", "choker", "necklace", "headband", "headphones"]
119
- return f"{prompt}, {random.choice(top)}, {random.choice(bottom)}, {random.choice(accessory)}, score_9"
120
- # outfit = ["schoolgirl outfit", "playboy outfit", "red dress", "gala dress", "cheerleader outfit", "nurse outfit", "Kimono"]
121
-
122
- def get_prompt(prompt, additional_prompt):
123
- interior = "design-style interior designed (interior space), captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length, tungsten white balance, (sharp focus), professional photography, high-resolution, 8k, Pulitzer Prize-winning"
124
- default = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
125
- default2 = f"professional 3d model {prompt},octane render,highly detailed,volumetric,dramatic lighting,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
126
- randomize = get_additional_prompt()
127
- # nude = "NSFW,((nude)),medium bare breasts,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
128
- # bodypaint = "((fully naked with no clothes)),nude naked seethroughxray,invisiblebodypaint,rating_newd,NSFW"
129
- lab_girl = "hyperrealistic photography, extremely detailed, shy assistant wearing minidress boots and gloves, laboratory background, score_9, 1girl"
130
- pet_play = "hyperrealistic photography, extremely detailed, playful, blush, glasses, collar, score_9, HDA_pet_play"
131
- bondage = "hyperrealistic photography, extremely detailed, submissive, glasses, score_9, HDA_Bondage"
132
- # ahegao = "((invisible clothing)), hyperrealistic photography,exposed vagina,sexy,nsfw,HDA_Ahegao"
133
- ahegao2 = "(invisiblebodypaint),rating_newd,HDA_Ahegao"
134
- athleisure = "hyperrealistic photography, extremely detailed, 1girl athlete, exhausted embarrassed sweaty,outdoors, ((athleisure clothing)), score_9"
135
- atompunk = "((atompunk world)), hyperrealistic photography, extremely detailed, short hair, bodysuit, glasses, neon cyberpunk background, score_9"
136
- maid = "hyperrealistic photography, extremely detailed, shy, blushing, score_9, pastel background, HDA_unconventional_maid"
137
- nundress = "hyperrealistic photography, extremely detailed, shy, blushing, fantasy background, score_9, HDA_NunDress"
138
- naked_hoodie = "hyperrealistic photography, extremely detailed, medium hair, cityscape, (neon lights), score_9, HDA_NakedHoodie"
139
- abg = "(1girl, asian body covered in words, words on body, tattoos of (words) on body),(masterpiece, best quality),medium breasts,(intricate details),unity 8k wallpaper,ultra detailed,(pastel colors),beautiful and aesthetic,see-through (clothes),detailed,solo"
140
- # shibari = "extremely detailed, hyperrealistic photography, earrings, blushing, lace choker, tattoo, medium hair, score_9, HDA_Shibari"
141
- shibari2 = "octane render, highly detailed, volumetric, HDA_Shibari"
142
-
143
- if prompt == "":
144
- girls = [randomize, pet_play, bondage, lab_girl, athleisure, atompunk, maid, nundress, naked_hoodie, abg, shibari2, ahegao2]
145
- prompts_nsfw = [abg, shibari2, ahegao2]
146
- prompt = f"{random.choice(girls)}"
147
- prompt = f"boho chic"
148
- # print(f"-------------{preset}-------------")
149
- else:
150
- prompt = f"Photo from Pinterest of {prompt} {interior}"
151
- # prompt = default2
152
- return f"{prompt} f{additional_prompt}"
153
-
154
- style_list = [
155
- {
156
- "name": "None",
157
- "prompt": ""
158
- },
159
- {
160
- "name": "Minimalistic",
161
- "prompt": "Minimalistic"
162
- },
163
- {
164
- "name": "Boho Chic",
165
- "prompt": "boho chic"
166
- },
167
- {
168
- "name": "Saudi Prince Gold",
169
- "prompt": "saudi prince gold",
170
- },
171
- {
172
- "name": "Modern Farmhouse",
173
- "prompt": "modern farmhouse",
174
- },
175
- {
176
- "name": "Neoclassical",
177
- "prompt": "Neoclassical",
178
- },
179
- {
180
- "name": "Eclectic",
181
- "prompt": "Eclectic",
182
- },
183
- {
184
- "name": "Parisian White",
185
- "prompt": "Parisian White",
186
- },
187
- {
188
- "name": "Hollywood Glam",
189
- "prompt": "Hollywood Glam",
190
- },
191
- {
192
- "name": "Scandinavian",
193
- "prompt": "Scandinavian",
194
- },
195
- {
196
- "name": "Japanese",
197
- "prompt": "Japanese",
198
- },
199
- {
200
- "name": "Texas Cowboy",
201
- "prompt": "Texas Cowboy",
202
- },
203
- {
204
- "name": "Midcentury Modern",
205
- "prompt": "Midcentury Modern",
206
- },
207
- {
208
- "name": "Beach",
209
- "prompt": "Beach",
210
- },
211
- ]
212
-
213
- styles = {k["name"]: (k["prompt"]) for k in style_list}
214
- STYLE_NAMES = list(styles.keys())
215
-
216
- def apply_style(style_name):
217
- if style_name in styles:
218
- p = styles.get(style_name, "boho chic")
219
- return p
220
-
221
-
222
- css = """
223
- h1 {
224
- text-align: center;
225
- display:block;
226
- }
227
- h2 {
228
- text-align: center;
229
- display:block;
230
- }
231
- h3 {
232
- text-align: center;
233
- display:block;
234
- }
235
- .gradio-container{max-width: 1200px !important}
236
- footer {visibility: hidden}
237
- """
238
- with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
239
- #############################################################################
240
- with gr.Row():
241
- with gr.Accordion("Advanced options", open=show_options, visible=show_options):
242
- num_images = gr.Slider(
243
- label="Images", minimum=1, maximum=4, value=1, step=1
244
- )
245
- image_resolution = gr.Slider(
246
- label="Image resolution",
247
- minimum=256,
248
- maximum=1024,
249
- value=512,
250
- step=256,
251
- )
252
- preprocess_resolution = gr.Slider(
253
- label="Preprocess resolution",
254
- minimum=128,
255
- maximum=1024,
256
- value=512,
257
- step=1,
258
- )
259
- num_steps = gr.Slider(
260
- label="Number of steps", minimum=1, maximum=100, value=15, step=1
261
- ) # 20/4.5 or 12 without lora, 4 with lora
262
- guidance_scale = gr.Slider(
263
- label="Guidance scale", minimum=0.1, maximum=30.0, value=5.5, step=0.1
264
- ) # 5 without lora, 2 with lora
265
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
266
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
267
- a_prompt = gr.Textbox(
268
- label="Additional prompt",
269
- value = "design-style interior designed (interior space), captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length, tungsten white balance, (sharp focus), professional photography, high-resolution, 8k, Pulitzer Prize-winning"
270
- )
271
- n_prompt = gr.Textbox(
272
- label="Negative prompt",
273
- value="EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)",
274
- )
275
- #############################################################################
276
- # input text
277
- with gr.Row():
278
- gr.Text(label="Interior Design Style Examples", value="Eclectic, Maximalist, Bohemian, Scandinavian, Minimalist, Rustic, Modern Farmhouse, Contemporary, Luxury, Airbnb, Boho Chic, Midcentury Modern, Art Deco, Zen, Beach, Neoclassical, Industrial, Biophilic, Eco-friendly, Hollywood Glam, Parisian White, Saudi Prince Gold, French Country, Monster Energy Drink, Cyberpunk, Vaporwave, Baroque, etc.\n\nPro tip: add a color to customize it! You can also describe the furniture type.")
279
- with gr.Column():
280
- prompt = gr.Textbox(
281
- label="Custom Prompt",
282
- placeholder="boho chic",
283
- )
284
- with gr.Row(visible=True):
285
- style_selection = gr.Radio(
286
- show_label=True,
287
- container=True,
288
- interactive=True,
289
- choices=STYLE_NAMES,
290
- value="None",
291
- label="Design Styles",
292
- )
293
- # input image
294
- with gr.Row():
295
- with gr.Column():
296
- image = gr.Image(
297
- label="Input",
298
- sources=["upload"],
299
- show_label=True,
300
- mirror_webcam=True,
301
- format="webp",
302
- )
303
- # run button
304
- with gr.Column():
305
- run_button = gr.Button(value="Use this one", size=["lg"], visible=False)
306
- # output image
307
- with gr.Column():
308
- result = gr.Image(
309
- label="Output",
310
- interactive=False,
311
- format="webp",
312
- show_share_button= False,
313
- )
314
- # Use this image button
315
- with gr.Column():
316
- use_ai_button = gr.Button(value="Use this one", size=["lg"], visible=False)
317
- config = [
318
- image,
319
- style_selection,
320
- prompt,
321
- a_prompt,
322
- n_prompt,
323
- num_images,
324
- image_resolution,
325
- preprocess_resolution,
326
- num_steps,
327
- guidance_scale,
328
- seed,
329
- ]
330
-
331
- with gr.Row():
332
- helper_text = gr.Markdown("## Tap and hold (on mobile) to save the image.", visible=True)
333
-
334
- # image processing
335
- @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
336
- def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
337
- return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
338
-
339
- # AI Image Processing
340
- @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
341
- def submit(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
342
- return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
343
-
344
- # Change input to result
345
- @gr.on(triggers=[use_ai_button.click], inputs=None, outputs=image, show_progress="hidden")
346
- def update_input():
347
- try:
348
- print("Updating image to AI Temp Image")
349
- ai_temp_image = Image.open("temp_image.jpg")
350
- return ai_temp_image
351
- except FileNotFoundError:
352
- print("No AI Image Available")
353
- return None
354
-
355
- # Turn off buttons when processing
356
- @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
357
- def turn_buttons_off():
358
- return gr.update(visible=False), gr.update(visible=False)
359
-
360
- # Turn on buttons when processing is complete
361
- @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button], show_progress="hidden")
362
- def turn_buttons_on():
363
- return gr.update(visible=True), gr.update(visible=True)
364
-
365
- @spaces.GPU(duration=10)
366
- @torch.inference_mode()
367
- def process_image(
368
- image,
369
- style_selection,
370
- prompt,
371
- a_prompt,
372
- n_prompt,
373
- num_images,
374
- image_resolution,
375
- preprocess_resolution,
376
- num_steps,
377
- guidance_scale,
378
- seed,
379
- progress=gr.Progress(track_tqdm=True)
380
- ):
381
- torch.cuda.synchronize()
382
- preprocess_start = time.time()
383
- print("processing image")
384
- preprocessor.load("NormalBae")
385
- # preprocessor.load("Canny") #20 steps, 9 guidance, 512, 512
386
-
387
- global compiled
388
- if not compiled:
389
- print("Not Compiled")
390
- compiled = True
391
-
392
- seed = random.randint(0, MAX_SEED)
393
- generator = torch.cuda.manual_seed(seed)
394
- control_image = preprocessor(
395
- image=image,
396
- image_resolution=image_resolution,
397
- detect_resolution=preprocess_resolution,
398
- )
399
- preprocess_time = time.time() - preprocess_start
400
- if style_selection is not None or style_selection != "None":
401
- prompt = "Photo from Pinterest of " + apply_style(style_selection) + " " + prompt + " " + a_prompt
402
- else:
403
- prompt=str(get_prompt(prompt, a_prompt))
404
- negative_prompt=str(n_prompt)
405
- print(prompt)
406
- start = time.time()
407
- results = pipe(
408
- prompt=prompt,
409
- negative_prompt=negative_prompt,
410
- guidance_scale=guidance_scale,
411
- num_images_per_prompt=num_images,
412
- num_inference_steps=num_steps,
413
- generator=generator,
414
- image=control_image,
415
- ).images[0]
416
- torch.cuda.synchronize()
417
- torch.cuda.empty_cache()
418
- print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
419
- print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
420
-
421
- # timestamp = int(time.time())
422
- #if not os.path.exists("./outputs"):
423
- # os.makedirs("./outputs")
424
- # img_path = f"./{timestamp}.jpg"
425
- # results_path = f"./{timestamp}_out_{prompt}.jpg"
426
- # imageio.imsave(img_path, image)
427
- # results.save(results_path)
428
- results.save("temp_image.jpg")
429
-
430
- # api.upload_file(
431
- # path_or_fileobj=img_path,
432
- # path_in_repo=img_path,
433
- # repo_id="broyang/anime-ai-outputs",
434
- # repo_type="dataset",
435
- # token=API_KEY,
436
- # run_as_future=True,
437
- # )
438
- # api.upload_file(
439
- # path_or_fileobj=results_path,
440
- # path_in_repo=results_path,
441
- # repo_id="broyang/anime-ai-outputs",
442
- # repo_type="dataset",
443
- # token=API_KEY,
444
- # run_as_future=True,
445
- # )
446
-
447
- return results
448
- if prod:
449
- demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
450
- else:
451
  demo.queue(api_open=False).launch(show_api=False)
 
1
+ prod = False
2
+ port = 8080
3
+ show_options = False
4
+ if prod:
5
+ port = 8081
6
+ # show_options = False
7
+
8
+ import os
9
+ import gc
10
+ import random
11
+ import time
12
+ import gradio as gr
13
+ import numpy as np
14
+ # import imageio
15
+ import torch
16
+ from PIL import Image
17
+ from diffusers import (
18
+ ControlNetModel,
19
+ DPMSolverMultistepScheduler,
20
+ StableDiffusionControlNetPipeline,
21
+ AutoencoderKL,
22
+ )
23
+ from diffusers.models.attention_processor import AttnProcessor2_0
24
+ from preprocess import Preprocessor
25
+ MAX_SEED = np.iinfo(np.int32).max
26
+ API_KEY = os.environ.get("API_KEY", None)
27
+
28
+ print("CUDA version:", torch.version.cuda)
29
+ print("loading pipe")
30
+ compiled = False
31
+ # api = HfApi()
32
+
33
+ import spaces
34
+
35
+ preprocessor = Preprocessor()
36
+ preprocessor.load("NormalBae")
37
+
38
+ if gr.NO_RELOAD:
39
+ torch.cuda.max_memory_allocated(device="cuda")
40
+
41
+ # Controlnet Normal
42
+ model_id = "lllyasviel/control_v11p_sd15_normalbae"
43
+ print("initializing controlnet")
44
+ controlnet = ControlNetModel.from_pretrained(
45
+ model_id,
46
+ torch_dtype=torch.float16,
47
+ attn_implementation="flash_attention_2",
48
+ ).to("cuda")
49
+
50
+ # Scheduler
51
+ scheduler = DPMSolverMultistepScheduler.from_pretrained(
52
+ "runwayml/stable-diffusion-v1-5",
53
+ solver_order=2,
54
+ subfolder="scheduler",
55
+ use_karras_sigmas=True,
56
+ final_sigmas_type="sigma_min",
57
+ algorithm_type="sde-dpmsolver++",
58
+ prediction_type="epsilon",
59
+ thresholding=False,
60
+ denoise_final=True,
61
+ device_map="cuda",
62
+ torch_dtype=torch.float16,
63
+ )
64
+
65
+ # Stable Diffusion Pipeline URL
66
+ # base_model_url = "https://huggingface.co/broyang/hentaidigitalart_v20/blob/main/realcartoon3d_v15.safetensors"
67
+ base_model_url = "https://huggingface.co/Lykon/AbsoluteReality/blob/main/AbsoluteReality_1.8.1_pruned.safetensors"
68
+ vae_url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
69
+
70
+ vae = AutoencoderKL.from_single_file(vae_url, torch_dtype=torch.float16).to("cuda")
71
+ vae.to(memory_format=torch.channels_last)
72
+
73
+ pipe = StableDiffusionControlNetPipeline.from_single_file(
74
+ base_model_url,
75
+ # safety_checker=None,
76
+ # load_safety_checker=True,
77
+ controlnet=controlnet,
78
+ scheduler=scheduler,
79
+ vae=vae,
80
+ torch_dtype=torch.float16,
81
+ )
82
+
83
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="EasyNegativeV2.safetensors", token="EasyNegativeV2",)
84
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="badhandv4.pt", token="badhandv4")
85
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="fcNeg-neg.pt", token="fcNeg-neg")
86
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Ahegao.pt", token="HDA_Ahegao")
87
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Bondage.pt", token="HDA_Bondage")
88
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_pet_play.pt", token="HDA_pet_play")
89
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_unconventional maid.pt", token="HDA_unconventional_maid")
90
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NakedHoodie.pt", token="HDA_NakedHoodie")
91
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_NunDress.pt", token="HDA_NunDress")
92
+ pipe.load_textual_inversion("broyang/hentaidigitalart_v20", weight_name="HDA_Shibari.pt", token="HDA_Shibari")
93
+ pipe.to("cuda")
94
+
95
+ # experimental speedup?
96
+ # pipe.compile()
97
+ # torch.cuda.empty_cache()
98
+ # gc.collect()
99
+ print("---------------Loaded controlnet pipeline---------------")
100
+
101
+ @spaces.GPU(duration=12)
102
+ def init(pipe):
103
+ pipe.enable_xformers_memory_efficient_attention()
104
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
105
+ pipe.unet.set_attn_processor(AttnProcessor2_0())
106
+ print("Model Compiled!")
107
+ init(pipe)
108
+
109
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
110
+ if randomize_seed:
111
+ seed = random.randint(0, MAX_SEED)
112
+ return seed
113
+
114
+ def get_additional_prompt():
115
+ prompt = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
116
+ top = ["tank top", "blouse", "button up shirt", "sweater", "corset top"]
117
+ bottom = ["short skirt", "athletic shorts", "jean shorts", "pleated skirt", "short skirt", "leggings", "high-waisted shorts"]
118
+ accessory = ["knee-high boots", "gloves", "Thigh-high stockings", "Garter belt", "choker", "necklace", "headband", "headphones"]
119
+ return f"{prompt}, {random.choice(top)}, {random.choice(bottom)}, {random.choice(accessory)}, score_9"
120
+ # outfit = ["schoolgirl outfit", "playboy outfit", "red dress", "gala dress", "cheerleader outfit", "nurse outfit", "Kimono"]
121
+
122
+ def get_prompt(prompt, additional_prompt):
123
+ interior = "design-style interior designed (interior space), captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length, tungsten white balance, (sharp focus), professional photography, high-resolution, 8k, Pulitzer Prize-winning"
124
+ default = "hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
125
+ default2 = f"professional 3d model {prompt},octane render,highly detailed,volumetric,dramatic lighting,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
126
+ randomize = get_additional_prompt()
127
+ # nude = "NSFW,((nude)),medium bare breasts,hyperrealistic photography,extremely detailed,(intricate details),unity 8k wallpaper,ultra detailed"
128
+ # bodypaint = "((fully naked with no clothes)),nude naked seethroughxray,invisiblebodypaint,rating_newd,NSFW"
129
+ lab_girl = "hyperrealistic photography, extremely detailed, shy assistant wearing minidress boots and gloves, laboratory background, score_9, 1girl"
130
+ pet_play = "hyperrealistic photography, extremely detailed, playful, blush, glasses, collar, score_9, HDA_pet_play"
131
+ bondage = "hyperrealistic photography, extremely detailed, submissive, glasses, score_9, HDA_Bondage"
132
+ # ahegao = "((invisible clothing)), hyperrealistic photography,exposed vagina,sexy,nsfw,HDA_Ahegao"
133
+ ahegao2 = "(invisiblebodypaint),rating_newd,HDA_Ahegao"
134
+ athleisure = "hyperrealistic photography, extremely detailed, 1girl athlete, exhausted embarrassed sweaty,outdoors, ((athleisure clothing)), score_9"
135
+ atompunk = "((atompunk world)), hyperrealistic photography, extremely detailed, short hair, bodysuit, glasses, neon cyberpunk background, score_9"
136
+ maid = "hyperrealistic photography, extremely detailed, shy, blushing, score_9, pastel background, HDA_unconventional_maid"
137
+ nundress = "hyperrealistic photography, extremely detailed, shy, blushing, fantasy background, score_9, HDA_NunDress"
138
+ naked_hoodie = "hyperrealistic photography, extremely detailed, medium hair, cityscape, (neon lights), score_9, HDA_NakedHoodie"
139
+ abg = "(1girl, asian body covered in words, words on body, tattoos of (words) on body),(masterpiece, best quality),medium breasts,(intricate details),unity 8k wallpaper,ultra detailed,(pastel colors),beautiful and aesthetic,see-through (clothes),detailed,solo"
140
+ # shibari = "extremely detailed, hyperrealistic photography, earrings, blushing, lace choker, tattoo, medium hair, score_9, HDA_Shibari"
141
+ shibari2 = "octane render, highly detailed, volumetric, HDA_Shibari"
142
+
143
+ if prompt == "":
144
+ girls = [randomize, pet_play, bondage, lab_girl, athleisure, atompunk, maid, nundress, naked_hoodie, abg, shibari2, ahegao2]
145
+ prompts_nsfw = [abg, shibari2, ahegao2]
146
+ prompt = f"{random.choice(girls)}"
147
+ prompt = f"boho chic"
148
+ # print(f"-------------{preset}-------------")
149
+ else:
150
+ prompt = f"Photo from Pinterest of {prompt} {interior}"
151
+ # prompt = default2
152
+ return f"{prompt} f{additional_prompt}"
153
+
154
+ style_list = [
155
+ {
156
+ "name": "None",
157
+ "prompt": ""
158
+ },
159
+ {
160
+ "name": "Minimalistic",
161
+ "prompt": "Minimalistic"
162
+ },
163
+ {
164
+ "name": "Boho Chic",
165
+ "prompt": "boho chic"
166
+ },
167
+ {
168
+ "name": "Saudi Prince Gold",
169
+ "prompt": "saudi prince gold",
170
+ },
171
+ {
172
+ "name": "Modern Farmhouse",
173
+ "prompt": "modern farmhouse",
174
+ },
175
+ {
176
+ "name": "Neoclassical",
177
+ "prompt": "Neoclassical",
178
+ },
179
+ {
180
+ "name": "Eclectic",
181
+ "prompt": "Eclectic",
182
+ },
183
+ {
184
+ "name": "Parisian White",
185
+ "prompt": "Parisian White",
186
+ },
187
+ {
188
+ "name": "Hollywood Glam",
189
+ "prompt": "Hollywood Glam",
190
+ },
191
+ {
192
+ "name": "Scandinavian",
193
+ "prompt": "Scandinavian",
194
+ },
195
+ {
196
+ "name": "Japanese",
197
+ "prompt": "Japanese",
198
+ },
199
+ {
200
+ "name": "Texas Cowboy",
201
+ "prompt": "Texas Cowboy",
202
+ },
203
+ {
204
+ "name": "Midcentury Modern",
205
+ "prompt": "Midcentury Modern",
206
+ },
207
+ {
208
+ "name": "Beach",
209
+ "prompt": "Beach",
210
+ },
211
+ ]
212
+
213
+ styles = {k["name"]: (k["prompt"]) for k in style_list}
214
+ STYLE_NAMES = list(styles.keys())
215
+
216
+ def apply_style(style_name):
217
+ if style_name in styles:
218
+ p = styles.get(style_name, "boho chic")
219
+ return p
220
+
221
+
222
+ css = """
223
+ h1 {
224
+ text-align: center;
225
+ display:block;
226
+ }
227
+ h2 {
228
+ text-align: center;
229
+ display:block;
230
+ }
231
+ h3 {
232
+ text-align: center;
233
+ display:block;
234
+ }
235
+ .gradio-container{max-width: 1200px !important}
236
+ footer {visibility: hidden}
237
+ """
238
+ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
239
+ #############################################################################
240
+ with gr.Row():
241
+ with gr.Accordion("Advanced options", open=show_options, visible=show_options):
242
+ num_images = gr.Slider(
243
+ label="Images", minimum=1, maximum=4, value=1, step=1
244
+ )
245
+ image_resolution = gr.Slider(
246
+ label="Image resolution",
247
+ minimum=256,
248
+ maximum=1024,
249
+ value=512,
250
+ step=256,
251
+ )
252
+ preprocess_resolution = gr.Slider(
253
+ label="Preprocess resolution",
254
+ minimum=128,
255
+ maximum=1024,
256
+ value=512,
257
+ step=1,
258
+ )
259
+ num_steps = gr.Slider(
260
+ label="Number of steps", minimum=1, maximum=100, value=15, step=1
261
+ ) # 20/4.5 or 12 without lora, 4 with lora
262
+ guidance_scale = gr.Slider(
263
+ label="Guidance scale", minimum=0.1, maximum=30.0, value=5.5, step=0.1
264
+ ) # 5 without lora, 2 with lora
265
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
266
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
267
+ a_prompt = gr.Textbox(
268
+ label="Additional prompt",
269
+ value = "design-style interior designed (interior space), captured with a DSLR camera using f/10 aperture, 1/60 sec shutter speed, ISO 400, 20mm focal length, tungsten white balance, (sharp focus), professional photography, high-resolution, 8k, Pulitzer Prize-winning"
270
+ )
271
+ n_prompt = gr.Textbox(
272
+ label="Negative prompt",
273
+ value="EasyNegativeV2, fcNeg, (badhandv4:1.4), (worst quality, low quality, bad quality, normal quality:2.0), (bad hands, missing fingers, extra fingers:2.0)",
274
+ )
275
+ #############################################################################
276
+ # input text
277
+ with gr.Row():
278
+ gr.Text(label="Interior Design Style Examples", value="Eclectic, Maximalist, Bohemian, Scandinavian, Minimalist, Rustic, Modern Farmhouse, Contemporary, Luxury, Airbnb, Boho Chic, Midcentury Modern, Art Deco, Zen, Beach, Neoclassical, Industrial, Biophilic, Eco-friendly, Hollywood Glam, Parisian White, Saudi Prince Gold, French Country, Monster Energy Drink, Cyberpunk, Vaporwave, Baroque, etc.\n\nPro tip: add a color to customize it! You can also describe the furniture type.")
279
+ with gr.Column():
280
+ prompt = gr.Textbox(
281
+ label="Custom Prompt",
282
+ placeholder="boho chic",
283
+ )
284
+ with gr.Row(visible=True):
285
+ style_selection = gr.Radio(
286
+ show_label=True,
287
+ container=True,
288
+ interactive=True,
289
+ choices=STYLE_NAMES,
290
+ value="None",
291
+ label="Design Styles",
292
+ )
293
+ # input image
294
+ with gr.Row():
295
+ with gr.Column():
296
+ image = gr.Image(
297
+ label="Input",
298
+ sources=["upload"],
299
+ show_label=True,
300
+ mirror_webcam=True,
301
+ format="webp",
302
+ )
303
+ # run button
304
+ with gr.Column():
305
+ run_button = gr.Button(value="Use this one", size=["lg"], visible=False)
306
+ # output image
307
+ with gr.Column():
308
+ result = gr.Image(
309
+ label="Output",
310
+ interactive=False,
311
+ format="webp",
312
+ show_share_button= False,
313
+ )
314
+ # Use this image button
315
+ with gr.Column():
316
+ use_ai_button = gr.Button(value="Use this one", size=["lg"], visible=False)
317
+ config = [
318
+ image,
319
+ style_selection,
320
+ prompt,
321
+ a_prompt,
322
+ n_prompt,
323
+ num_images,
324
+ image_resolution,
325
+ preprocess_resolution,
326
+ num_steps,
327
+ guidance_scale,
328
+ seed,
329
+ ]
330
+
331
+ with gr.Row():
332
+ helper_text = gr.Markdown("## Tap and hold (on mobile) to save the image.", visible=True)
333
+
334
+ # image processing
335
+ @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
336
+ def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
337
+ return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
338
+
339
+ # AI Image Processing
340
+ @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
341
+ def submit(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
342
+ return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
343
+
344
+ # Change input to result
345
+ @gr.on(triggers=[use_ai_button.click], inputs=None, outputs=image, show_progress="hidden")
346
+ def update_input():
347
+ try:
348
+ print("Updating image to AI Temp Image")
349
+ ai_temp_image = Image.open("temp_image.jpg")
350
+ return ai_temp_image
351
+ except FileNotFoundError:
352
+ print("No AI Image Available")
353
+ return None
354
+
355
+ # Turn off buttons when processing
356
+ @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
357
+ def turn_buttons_off():
358
+ return gr.update(visible=False), gr.update(visible=False)
359
+
360
+ # Turn on buttons when processing is complete
361
+ @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button], show_progress="hidden")
362
+ def turn_buttons_on():
363
+ return gr.update(visible=True), gr.update(visible=True)
364
+
365
+ @spaces.GPU(duration=10)
366
+ @torch.inference_mode()
367
+ def process_image(
368
+ image,
369
+ style_selection,
370
+ prompt,
371
+ a_prompt,
372
+ n_prompt,
373
+ num_images,
374
+ image_resolution,
375
+ preprocess_resolution,
376
+ num_steps,
377
+ guidance_scale,
378
+ seed,
379
+ progress=gr.Progress(track_tqdm=True)
380
+ ):
381
+ torch.cuda.synchronize()
382
+ preprocess_start = time.time()
383
+ print("processing image")
384
+ preprocessor.load("NormalBae")
385
+ # preprocessor.load("Canny") #20 steps, 9 guidance, 512, 512
386
+
387
+ global compiled
388
+ if not compiled:
389
+ print("Not Compiled")
390
+ compiled = True
391
+
392
+ seed = random.randint(0, MAX_SEED)
393
+ generator = torch.cuda.manual_seed(seed)
394
+ control_image = preprocessor(
395
+ image=image,
396
+ image_resolution=image_resolution,
397
+ detect_resolution=preprocess_resolution,
398
+ )
399
+ preprocess_time = time.time() - preprocess_start
400
+ if style_selection is not None or style_selection != "None":
401
+ prompt = "Photo from Pinterest of " + apply_style(style_selection) + " " + prompt + " " + a_prompt
402
+ else:
403
+ prompt=str(get_prompt(prompt, a_prompt))
404
+ negative_prompt=str(n_prompt)
405
+ print(prompt)
406
+ start = time.time()
407
+ results = pipe(
408
+ prompt=prompt,
409
+ negative_prompt=negative_prompt,
410
+ guidance_scale=guidance_scale,
411
+ num_images_per_prompt=num_images,
412
+ num_inference_steps=num_steps,
413
+ generator=generator,
414
+ image=control_image,
415
+ ).images[0]
416
+ torch.cuda.synchronize()
417
+ torch.cuda.empty_cache()
418
+ print(f"\n-------------------------Preprocess done in: {preprocess_time:.2f} seconds-------------------------")
419
+ print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
420
+
421
+ # timestamp = int(time.time())
422
+ #if not os.path.exists("./outputs"):
423
+ # os.makedirs("./outputs")
424
+ # img_path = f"./{timestamp}.jpg"
425
+ # results_path = f"./{timestamp}_out_{prompt}.jpg"
426
+ # imageio.imsave(img_path, image)
427
+ # results.save(results_path)
428
+ results.save("temp_image.jpg")
429
+
430
+ # api.upload_file(
431
+ # path_or_fileobj=img_path,
432
+ # path_in_repo=img_path,
433
+ # repo_id="broyang/anime-ai-outputs",
434
+ # repo_type="dataset",
435
+ # token=API_KEY,
436
+ # run_as_future=True,
437
+ # )
438
+ # api.upload_file(
439
+ # path_or_fileobj=results_path,
440
+ # path_in_repo=results_path,
441
+ # repo_id="broyang/anime-ai-outputs",
442
+ # repo_type="dataset",
443
+ # token=API_KEY,
444
+ # run_as_future=True,
445
+ # )
446
+
447
+ return results
448
+ if prod:
449
+ demo.queue(max_size=20).launch(server_name="localhost", server_port=port)
450
+ else:
451
  demo.queue(api_open=False).launch(show_api=False)
controlnet_aux/canny/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (1.29 kB)
 
controlnet_aux/dwpose/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (2.61 kB)
 
controlnet_aux/dwpose/__pycache__/util.cpython-310.pyc DELETED
Binary file (7.78 kB)
 
controlnet_aux/dwpose/__pycache__/wholebody.cpython-310.pyc DELETED
Binary file (3.59 kB)
 
controlnet_aux/hed/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (5.05 kB)
 
controlnet_aux/leres/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (3.32 kB)
 
controlnet_aux/leres/leres/__pycache__/Resnet.cpython-310.pyc DELETED
Binary file (5.55 kB)
 
controlnet_aux/leres/leres/__pycache__/Resnext_torch.cpython-310.pyc DELETED
Binary file (5.83 kB)
 
controlnet_aux/leres/leres/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (178 Bytes)
 
controlnet_aux/leres/leres/__pycache__/depthmap.cpython-310.pyc DELETED
Binary file (11.7 kB)
 
controlnet_aux/leres/leres/__pycache__/multi_depth_model_woauxi.cpython-310.pyc DELETED
Binary file (1.71 kB)
 
controlnet_aux/leres/leres/__pycache__/net_tools.cpython-310.pyc DELETED
Binary file (1.89 kB)
 
controlnet_aux/leres/leres/__pycache__/network_auxi.cpython-310.pyc DELETED
Binary file (9.82 kB)
 
controlnet_aux/leres/pix2pix/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (180 Bytes)
 
controlnet_aux/leres/pix2pix/models/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (3.3 kB)
 
controlnet_aux/leres/pix2pix/models/__pycache__/base_model.cpython-310.pyc DELETED
Binary file (10.3 kB)
 
controlnet_aux/leres/pix2pix/models/__pycache__/base_model_hg.cpython-310.pyc DELETED
Binary file (2.66 kB)
 
controlnet_aux/leres/pix2pix/models/__pycache__/networks.cpython-310.pyc DELETED
Binary file (23.5 kB)
 
controlnet_aux/leres/pix2pix/models/__pycache__/pix2pix4depth_model.cpython-310.pyc DELETED
Binary file (5.56 kB)
 
controlnet_aux/leres/pix2pix/options/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (329 Bytes)
 
controlnet_aux/leres/pix2pix/options/__pycache__/base_options.cpython-310.pyc DELETED
Binary file (7.2 kB)
 
controlnet_aux/leres/pix2pix/options/__pycache__/test_options.cpython-310.pyc DELETED
Binary file (1.14 kB)
 
controlnet_aux/leres/pix2pix/util/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (273 Bytes)
 
controlnet_aux/leres/pix2pix/util/__pycache__/util.cpython-310.pyc DELETED
Binary file (3.02 kB)
 
controlnet_aux/lineart/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (4.75 kB)
 
controlnet_aux/lineart_anime/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (6.7 kB)
 
controlnet_aux/mediapipe_face/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (1.82 kB)
 
controlnet_aux/mediapipe_face/__pycache__/mediapipe_face_common.cpython-310.pyc DELETED
Binary file (4.66 kB)
 
controlnet_aux/midas/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (2.94 kB)
 
controlnet_aux/midas/__pycache__/api.cpython-310.pyc DELETED
Binary file (3.72 kB)
 
controlnet_aux/midas/__pycache__/utils.cpython-310.pyc DELETED
Binary file (4.11 kB)
 
controlnet_aux/midas/midas/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (178 Bytes)
 
controlnet_aux/midas/midas/__pycache__/base_model.cpython-310.pyc DELETED
Binary file (706 Bytes)
 
controlnet_aux/midas/midas/__pycache__/blocks.cpython-310.pyc DELETED
Binary file (7.23 kB)
 
controlnet_aux/midas/midas/__pycache__/dpt_depth.cpython-310.pyc DELETED
Binary file (2.93 kB)
 
controlnet_aux/midas/midas/__pycache__/midas_net.cpython-310.pyc DELETED
Binary file (2.61 kB)
 
controlnet_aux/midas/midas/__pycache__/midas_net_custom.cpython-310.pyc DELETED
Binary file (3.73 kB)
 
controlnet_aux/midas/midas/__pycache__/transforms.cpython-310.pyc DELETED
Binary file (5.69 kB)
 
controlnet_aux/midas/midas/__pycache__/vit.cpython-310.pyc DELETED
Binary file (9.38 kB)
 
controlnet_aux/mlsd/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (2.91 kB)
 
controlnet_aux/mlsd/__pycache__/utils.cpython-310.pyc DELETED
Binary file (12.3 kB)
 
controlnet_aux/mlsd/models/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (178 Bytes)
 
controlnet_aux/mlsd/models/__pycache__/mbv2_mlsd_large.cpython-310.pyc DELETED
Binary file (8.44 kB)
 
controlnet_aux/mlsd/models/__pycache__/mbv2_mlsd_tiny.cpython-310.pyc DELETED
Binary file (8.15 kB)
 
controlnet_aux/normalbae/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (3.23 kB)
 
controlnet_aux/normalbae/nets/__pycache__/NNET.cpython-310.pyc DELETED
Binary file (1.22 kB)
 
controlnet_aux/normalbae/nets/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (181 Bytes)
 
controlnet_aux/normalbae/nets/__pycache__/baseline.cpython-310.pyc DELETED
Binary file (3.33 kB)