doczues commited on
Commit
4fa7fc8
·
verified ·
1 Parent(s): 557a7eb

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,20 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ models/newdream-sdxl-20/text_encoder/model.safetensors filter=lfs diff=lfs merge=lfs -text
2
+ models/newdream-sdxl-20/text_encoder/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
3
+ models/newdream-sdxl-20/text_encoder_2/model.safetensors filter=lfs diff=lfs merge=lfs -text
4
+ models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
5
+ models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
6
+ models/newdream-sdxl-20/unet/diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
7
+ models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
8
+ models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
9
+ models/sdxl-lcmlora-1024-100k-3000steps/checkpoint-3000/optimizer.bin filter=lfs diff=lfs merge=lfs -text
10
+ models/sdxl-lcmlora-1024-100k-3000steps/checkpoint-3000/pytorch_lora_weights.safetensors filter=lfs diff=lfs merge=lfs -text
11
+ models/sdxl-lcmlora-1024-100k-3000steps/checkpoint-3000/random_states_0.pkl filter=lfs diff=lfs merge=lfs -text
12
+ models/sdxl-lcmlora-1024-100k-3000steps/checkpoint-3000/scaler.pt filter=lfs diff=lfs merge=lfs -text
13
+ models/sdxl-lcmlora-1024-100k-3000steps/checkpoint-3000/scheduler.bin filter=lfs diff=lfs merge=lfs -text
14
+ models/sdxl-lcmlora-1024-100k-3000steps/pytorch_lora_weights.safetensors filter=lfs diff=lfs merge=lfs -text
15
+ models/model/checkpoint-3000/optimizer.bin filter=lfs diff=lfs merge=lfs -text
16
+ models/model/checkpoint-3000/pytorch_lora_weights.safetensors filter=lfs diff=lfs merge=lfs -text
17
+ models/model/checkpoint-3000/random_states_0.pkl filter=lfs diff=lfs merge=lfs -text
18
+ models/model/checkpoint-3000/scaler.pt filter=lfs diff=lfs merge=lfs -text
19
+ models/model/checkpoint-3000/scheduler.bin filter=lfs diff=lfs merge=lfs -text
20
+ models/model/pytorch_lora_weights.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitmodules CHANGED
@@ -2,3 +2,7 @@
2
  path = models/newdream-sdxl-20
3
  url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
  branch = main
 
 
 
 
 
2
  path = models/newdream-sdxl-20
3
  url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
  branch = main
5
+ [submodule "sdxl-lcmlora-1024-100k-3000steps"]
6
+ path = models/sdxl-lcmlora-1024-100k-3000steps
7
+ url = https://huggingface.co/mhussainahmad/sdxl-lcmlora-1024-100k-3000steps
8
+ branch = main
models/model/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/model/checkpoint-3000/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b099d29a8a515e5af7217f2980fe7d44567645ea870941372c1b491bfd543063
3
+ size 396043572
models/model/checkpoint-3000/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d0d20f48219b393264e82870b4e86ec51f8fc4f9d100bd572af7061449eb216
3
+ size 787239704
models/model/checkpoint-3000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ca7b6960a4d6a4fb2924e02d79df68ee241e05b4880cee568dae88be0007c2a
3
+ size 14540
models/model/checkpoint-3000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21aba8ed0f38ed1c04994c10a9ca7e9925e55ef2ed51283c43ff8e2cce78585f
3
+ size 988
models/model/checkpoint-3000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3099c70cdd005d6ef37edca83824e231a4c490e87e02d339ef8bf738464b46ed
3
+ size 1000
models/model/pytorch_lora_weights.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d0d20f48219b393264e82870b4e86ec51f8fc4f9d100bd572af7061449eb216
3
+ size 787239704
requirements.txt CHANGED
@@ -1 +1,2 @@
1
  # Specify any extra options here, like --find-links, --pre, etc. Avoid specifying dependencies here and specify them in pyproject.toml instead
 
 
1
  # Specify any extra options here, like --find-links, --pre, etc. Avoid specifying dependencies here and specify them in pyproject.toml instead
2
+ peft
src/main.py CHANGED
@@ -1,7 +1,7 @@
1
  from io import BytesIO
2
  from multiprocessing.connection import Listener
3
- from os import chmod, remove
4
- from os.path import abspath, exists
5
  from pathlib import Path
6
 
7
  from PIL.JpegImagePlugin import JpegImageFile
@@ -16,11 +16,9 @@ def main():
16
  print(f"Loading pipeline")
17
  pipeline = load_pipeline()
18
 
19
- print(f"Pipeline loaded, creating socket at '{SOCKET}'")
20
-
21
- if exists(SOCKET):
22
- remove(SOCKET)
23
 
 
24
  with Listener(SOCKET) as listener:
25
  chmod(SOCKET, 0o777)
26
 
 
1
  from io import BytesIO
2
  from multiprocessing.connection import Listener
3
+ from os import chmod
4
+ from os.path import abspath
5
  from pathlib import Path
6
 
7
  from PIL.JpegImagePlugin import JpegImageFile
 
16
  print(f"Loading pipeline")
17
  pipeline = load_pipeline()
18
 
19
+ print(f"Pipeline loaded")
 
 
 
20
 
21
+ print(f"Creating socket at '{SOCKET}'")
22
  with Listener(SOCKET) as listener:
23
  chmod(SOCKET, 0o777)
24
 
src/pipeline.py CHANGED
@@ -1,48 +1,33 @@
1
  import torch
2
  from PIL.Image import Image
3
- from diffusers import StableDiffusionXLPipeline
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
- import gc
7
 
8
 
9
- # Efficiently load the pipeline and apply mixed precision if possible.
10
  def load_pipeline() -> StableDiffusionXLPipeline:
11
- print("Loading model pipeline...")
 
 
 
 
 
 
12
 
13
- # Enable mixed precision for better memory usage
14
- with torch.cuda.amp.autocast():
15
- pipeline = StableDiffusionXLPipeline.from_pretrained(
16
- "./models/newdream-sdxl-20",
17
- torch_dtype=torch.float16, # Mixed precision to speed up inference
18
- local_files_only=True,
19
- ).to("cuda")
20
-
21
- # Prime the pipeline to reduce initial inference time by running a dummy prompt
22
  pipeline(prompt="")
23
 
24
  return pipeline
25
 
26
 
27
- # Inference function with better resource management and memory handling
28
  def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
29
- # Use the provided seed for reproducibility, if available
30
  generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
31
 
32
- print(f"Generating image with prompt: {request.prompt}")
33
-
34
- # Inference with mixed precision
35
- with torch.cuda.amp.autocast():
36
- image = pipeline(
37
- prompt=request.prompt,
38
- negative_prompt=request.negative_prompt,
39
- width=request.width,
40
- height=request.height,
41
- generator=generator,
42
- ).images[0]
43
-
44
- # Clear memory after inference to avoid CUDA OOM errors
45
- torch.cuda.empty_cache()
46
- gc.collect()
47
-
48
- return image
 
1
  import torch
2
  from PIL.Image import Image
3
+ from diffusers import StableDiffusionXLPipeline, LCMScheduler
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
 
6
 
7
 
 
8
  def load_pipeline() -> StableDiffusionXLPipeline:
9
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
10
+ "./models/newdream-sdxl-20",
11
+ torch_dtype=torch.float16,
12
+ local_files_only=True,
13
+ ).to("cuda")
14
+ pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
15
+ pipeline.load_lora_weights("./models/model")
16
 
 
 
 
 
 
 
 
 
 
17
  pipeline(prompt="")
18
 
19
  return pipeline
20
 
21
 
 
22
  def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
 
23
  generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
24
 
25
+ return pipeline(
26
+ prompt=request.prompt,
27
+ negative_prompt=request.negative_prompt,
28
+ width=request.width,
29
+ height=request.height,
30
+ generator=generator,
31
+ num_inference_steps=4,
32
+ guidance_scale=1.5,
33
+ ).images[0]