AnithaRathnamKVFoundry commited on
Commit
b7cfff7
·
1 Parent(s): 64dd662

Git modules and attributes

Browse files
Files changed (36) hide show
  1. models/newdream-sdxl-20/.gitattributes → .gitattributes +1 -0
  2. .gitmodules +4 -0
  3. README.md +0 -12
  4. models/newdream-sdxl-20/.gitignore +0 -2
  5. models/newdream-sdxl-20/README.md +0 -66
  6. models/newdream-sdxl-20/model_index.json +0 -33
  7. models/newdream-sdxl-20/scheduler/scheduler_config.json +0 -18
  8. models/newdream-sdxl-20/text_encoder/config.json +0 -24
  9. models/newdream-sdxl-20/text_encoder/model.safetensors +0 -3
  10. models/newdream-sdxl-20/text_encoder/pytorch_model.bin +0 -3
  11. models/newdream-sdxl-20/text_encoder_2/config.json +0 -24
  12. models/newdream-sdxl-20/text_encoder_2/model.safetensors +0 -3
  13. models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin +0 -3
  14. models/newdream-sdxl-20/tokenizer/merges.txt +0 -0
  15. models/newdream-sdxl-20/tokenizer/special_tokens_map.json +0 -24
  16. models/newdream-sdxl-20/tokenizer/tokenizer_config.json +0 -33
  17. models/newdream-sdxl-20/tokenizer/vocab.json +0 -0
  18. models/newdream-sdxl-20/tokenizer_2/merges.txt +0 -0
  19. models/newdream-sdxl-20/tokenizer_2/special_tokens_map.json +0 -24
  20. models/newdream-sdxl-20/tokenizer_2/tokenizer_config.json +0 -33
  21. models/newdream-sdxl-20/tokenizer_2/vocab.json +0 -0
  22. models/newdream-sdxl-20/unet/config.json +0 -71
  23. models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin +0 -3
  24. models/newdream-sdxl-20/vae/config.json +0 -31
  25. models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin +0 -3
  26. models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors +0 -3
  27. pyproject.toml +0 -21
  28. requirements.txt +0 -1
  29. src/edge_maxxing_4090_newdream.egg-info/PKG-INFO +0 -12
  30. src/edge_maxxing_4090_newdream.egg-info/SOURCES.txt +0 -10
  31. src/edge_maxxing_4090_newdream.egg-info/dependency_links.txt +0 -1
  32. src/edge_maxxing_4090_newdream.egg-info/entry_points.txt +0 -2
  33. src/edge_maxxing_4090_newdream.egg-info/requires.txt +0 -7
  34. src/edge_maxxing_4090_newdream.egg-info/top_level.txt +0 -2
  35. src/main.py +0 -59
  36. src/pipeline.py +0 -41
models/newdream-sdxl-20/.gitattributes → .gitattributes RENAMED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "newdream-sdxl-20"]
2
+ path = models/newdream-sdxl-20
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
README.md DELETED
@@ -1,12 +0,0 @@
1
- # edge-maxxing-newdream-sdxl
2
-
3
- This holds the baseline for the SDXL Nvidia GeForce RTX 4090 contest, which can be forked freely and optimized
4
-
5
- Some recommendations are as follows:
6
- - Installing dependencies should be done in pyproject.toml, including git dependencies
7
- - Compiled models should be included directly in the repository(rather than compiling during loading), loading time matters far more than file sizes
8
- - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
9
- - Change `requirements.txt` to add extra arguments to be used when installing the package
10
-
11
- For testing, you need a docker container with pytorch and ubuntu 22.04,
12
- you can download your listed dependencies with `pip install -r requirements.txt -e .`, and then running `start_inference`
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/.gitignore DELETED
@@ -1,2 +0,0 @@
1
- step_*
2
- epoch_*
 
 
 
models/newdream-sdxl-20/README.md DELETED
@@ -1,66 +0,0 @@
1
- ---
2
- license: creativeml-openrail-m
3
- tags:
4
- - stablediffusionapi.com
5
- - stable-diffusion-api
6
- - text-to-image
7
- - ultra-realistic
8
- pinned: true
9
- ---
10
-
11
- # NewDream-SDXL 2.0 API Inference
12
-
13
- ![generated from stablediffusionapi.com](https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/8478583971702167737.png)
14
- ## Get API Key
15
-
16
- Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed.
17
-
18
- Replace Key in below code, change **model_id** to "newdream-sdxl-20"
19
-
20
- Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs)
21
-
22
- Try model for free: [Generate Images](https://stablediffusionapi.com/models/newdream-sdxl-20)
23
-
24
- Model link: [View model](https://stablediffusionapi.com/models/newdream-sdxl-20)
25
-
26
- Credits: [View credits](https://civitai.com/?query=NewDream-SDXL%202.0)
27
-
28
- View all models: [View Models](https://stablediffusionapi.com/models)
29
-
30
- import requests
31
- import json
32
-
33
- url = "https://stablediffusionapi.com/api/v4/dreambooth"
34
-
35
- payload = json.dumps({
36
- "key": "your_api_key",
37
- "model_id": "newdream-sdxl-20",
38
- "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K",
39
- "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime",
40
- "width": "512",
41
- "height": "512",
42
- "samples": "1",
43
- "num_inference_steps": "30",
44
- "safety_checker": "no",
45
- "enhance_prompt": "yes",
46
- "seed": None,
47
- "guidance_scale": 7.5,
48
- "multi_lingual": "no",
49
- "panorama": "no",
50
- "self_attention": "no",
51
- "upscale": "no",
52
- "embeddings": "embeddings_model_id",
53
- "lora": "lora_model_id",
54
- "webhook": None,
55
- "track_id": None
56
- })
57
-
58
- headers = {
59
- 'Content-Type': 'application/json'
60
- }
61
-
62
- response = requests.request("POST", url, headers=headers, data=payload)
63
-
64
- print(response.text)
65
-
66
- > Use this coupon code to get 25% off **DMGG0RBN**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/model_index.json DELETED
@@ -1,33 +0,0 @@
1
- {
2
- "_class_name": "StableDiffusionXLPipeline",
3
- "_diffusers_version": "0.21.2",
4
- "force_zeros_for_empty_prompt": true,
5
- "scheduler": [
6
- "diffusers",
7
- "EulerDiscreteScheduler"
8
- ],
9
- "text_encoder": [
10
- "transformers",
11
- "CLIPTextModel"
12
- ],
13
- "text_encoder_2": [
14
- "transformers",
15
- "CLIPTextModelWithProjection"
16
- ],
17
- "tokenizer": [
18
- "transformers",
19
- "CLIPTokenizer"
20
- ],
21
- "tokenizer_2": [
22
- "transformers",
23
- "CLIPTokenizer"
24
- ],
25
- "unet": [
26
- "diffusers",
27
- "UNet2DConditionModel"
28
- ],
29
- "vae": [
30
- "diffusers",
31
- "AutoencoderKL"
32
- ]
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/scheduler/scheduler_config.json DELETED
@@ -1,18 +0,0 @@
1
- {
2
- "_class_name": "EulerDiscreteScheduler",
3
- "_diffusers_version": "0.21.2",
4
- "beta_end": 0.012,
5
- "beta_schedule": "scaled_linear",
6
- "beta_start": 0.00085,
7
- "clip_sample": false,
8
- "interpolation_type": "linear",
9
- "num_train_timesteps": 1000,
10
- "prediction_type": "epsilon",
11
- "sample_max_value": 1.0,
12
- "set_alpha_to_one": false,
13
- "skip_prk_steps": true,
14
- "steps_offset": 1,
15
- "timestep_spacing": "leading",
16
- "trained_betas": null,
17
- "use_karras_sigmas": false
18
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/text_encoder/config.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "architectures": [
3
- "CLIPTextModel"
4
- ],
5
- "attention_dropout": 0.0,
6
- "bos_token_id": 0,
7
- "dropout": 0.0,
8
- "eos_token_id": 2,
9
- "hidden_act": "quick_gelu",
10
- "hidden_size": 768,
11
- "initializer_factor": 1.0,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
- "layer_norm_eps": 1e-05,
15
- "max_position_embeddings": 77,
16
- "model_type": "clip_text_model",
17
- "num_attention_heads": 12,
18
- "num_hidden_layers": 12,
19
- "pad_token_id": 1,
20
- "projection_dim": 768,
21
- "torch_dtype": "float16",
22
- "transformers_version": "4.33.1",
23
- "vocab_size": 49408
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/text_encoder/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:57f094c002b6f50986b68a714285e72a23c17d9e1b146b078a2219397c51e37a
3
- size 246144152
 
 
 
 
models/newdream-sdxl-20/text_encoder/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:192a06f4ef7ece4acb33fc3c717790ee37b1f5d85e48e0dcac54dfea93e584a2
3
- size 246185562
 
 
 
 
models/newdream-sdxl-20/text_encoder_2/config.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "architectures": [
3
- "CLIPTextModelWithProjection"
4
- ],
5
- "attention_dropout": 0.0,
6
- "bos_token_id": 0,
7
- "dropout": 0.0,
8
- "eos_token_id": 2,
9
- "hidden_act": "gelu",
10
- "hidden_size": 1280,
11
- "initializer_factor": 1.0,
12
- "initializer_range": 0.02,
13
- "intermediate_size": 5120,
14
- "layer_norm_eps": 1e-05,
15
- "max_position_embeddings": 77,
16
- "model_type": "clip_text_model",
17
- "num_attention_heads": 20,
18
- "num_hidden_layers": 32,
19
- "pad_token_id": 1,
20
- "projection_dim": 1280,
21
- "torch_dtype": "float16",
22
- "transformers_version": "4.33.1",
23
- "vocab_size": 49408
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/text_encoder_2/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a0d55a94e8508c869f35163fb6fcf34e02ea1b614d9259b47f97c562cff9575
3
- size 1389382176
 
 
 
 
models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8841d5e4c05ce74941eb536ec3835f600cc82d36763fc5f30c69d09a886158c9
3
- size 1389490462
 
 
 
 
models/newdream-sdxl-20/tokenizer/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer/special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<|startoftext|>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "<|endoftext|>",
17
- "unk_token": {
18
- "content": "<|endoftext|>",
19
- "lstrip": false,
20
- "normalized": true,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/tokenizer/tokenizer_config.json DELETED
@@ -1,33 +0,0 @@
1
- {
2
- "add_prefix_space": false,
3
- "bos_token": {
4
- "__type": "AddedToken",
5
- "content": "<|startoftext|>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false
10
- },
11
- "clean_up_tokenization_spaces": true,
12
- "do_lower_case": true,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "<|endoftext|>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "errors": "replace",
22
- "model_max_length": 77,
23
- "pad_token": "<|endoftext|>",
24
- "tokenizer_class": "CLIPTokenizer",
25
- "unk_token": {
26
- "__type": "AddedToken",
27
- "content": "<|endoftext|>",
28
- "lstrip": false,
29
- "normalized": true,
30
- "rstrip": false,
31
- "single_word": false
32
- }
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/tokenizer/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer_2/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer_2/special_tokens_map.json DELETED
@@ -1,24 +0,0 @@
1
- {
2
- "bos_token": {
3
- "content": "<|startoftext|>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "pad_token": "!",
17
- "unk_token": {
18
- "content": "<|endoftext|>",
19
- "lstrip": false,
20
- "normalized": true,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/tokenizer_2/tokenizer_config.json DELETED
@@ -1,33 +0,0 @@
1
- {
2
- "add_prefix_space": false,
3
- "bos_token": {
4
- "__type": "AddedToken",
5
- "content": "<|startoftext|>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false
10
- },
11
- "clean_up_tokenization_spaces": true,
12
- "do_lower_case": true,
13
- "eos_token": {
14
- "__type": "AddedToken",
15
- "content": "<|endoftext|>",
16
- "lstrip": false,
17
- "normalized": true,
18
- "rstrip": false,
19
- "single_word": false
20
- },
21
- "errors": "replace",
22
- "model_max_length": 77,
23
- "pad_token": "!",
24
- "tokenizer_class": "CLIPTokenizer",
25
- "unk_token": {
26
- "__type": "AddedToken",
27
- "content": "<|endoftext|>",
28
- "lstrip": false,
29
- "normalized": true,
30
- "rstrip": false,
31
- "single_word": false
32
- }
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/tokenizer_2/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/unet/config.json DELETED
@@ -1,71 +0,0 @@
1
- {
2
- "_class_name": "UNet2DConditionModel",
3
- "_diffusers_version": "0.21.2",
4
- "act_fn": "silu",
5
- "addition_embed_type": "text_time",
6
- "addition_embed_type_num_heads": 64,
7
- "addition_time_embed_dim": 256,
8
- "attention_head_dim": [
9
- 5,
10
- 10,
11
- 20
12
- ],
13
- "attention_type": "default",
14
- "block_out_channels": [
15
- 320,
16
- 640,
17
- 1280
18
- ],
19
- "center_input_sample": false,
20
- "class_embed_type": null,
21
- "class_embeddings_concat": false,
22
- "conv_in_kernel": 3,
23
- "conv_out_kernel": 3,
24
- "cross_attention_dim": 2048,
25
- "cross_attention_norm": null,
26
- "down_block_types": [
27
- "DownBlock2D",
28
- "CrossAttnDownBlock2D",
29
- "CrossAttnDownBlock2D"
30
- ],
31
- "downsample_padding": 1,
32
- "dropout": 0.0,
33
- "dual_cross_attention": false,
34
- "encoder_hid_dim": null,
35
- "encoder_hid_dim_type": null,
36
- "flip_sin_to_cos": true,
37
- "freq_shift": 0,
38
- "in_channels": 4,
39
- "layers_per_block": 2,
40
- "mid_block_only_cross_attention": null,
41
- "mid_block_scale_factor": 1,
42
- "mid_block_type": "UNetMidBlock2DCrossAttn",
43
- "norm_eps": 1e-05,
44
- "norm_num_groups": 32,
45
- "num_attention_heads": null,
46
- "num_class_embeds": null,
47
- "only_cross_attention": false,
48
- "out_channels": 4,
49
- "projection_class_embeddings_input_dim": 2816,
50
- "resnet_out_scale_factor": 1.0,
51
- "resnet_skip_time_act": false,
52
- "resnet_time_scale_shift": "default",
53
- "sample_size": 128,
54
- "time_cond_proj_dim": null,
55
- "time_embedding_act_fn": null,
56
- "time_embedding_dim": null,
57
- "time_embedding_type": "positional",
58
- "timestep_post_act": null,
59
- "transformer_layers_per_block": [
60
- 1,
61
- 2,
62
- 10
63
- ],
64
- "up_block_types": [
65
- "CrossAttnUpBlock2D",
66
- "CrossAttnUpBlock2D",
67
- "UpBlock2D"
68
- ],
69
- "upcast_attention": false,
70
- "use_linear_projection": true
71
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9777699db732e3077d9249990a51f49ff9f57a9bec38d5fae69a5ad3e11e3f3d
3
- size 3885969408
 
 
 
 
models/newdream-sdxl-20/vae/config.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "_class_name": "AutoencoderKL",
3
- "_diffusers_version": "0.21.2",
4
- "act_fn": "silu",
5
- "block_out_channels": [
6
- 128,
7
- 256,
8
- 512,
9
- 512
10
- ],
11
- "down_block_types": [
12
- "DownEncoderBlock2D",
13
- "DownEncoderBlock2D",
14
- "DownEncoderBlock2D",
15
- "DownEncoderBlock2D"
16
- ],
17
- "force_upcast": true,
18
- "in_channels": 3,
19
- "latent_channels": 4,
20
- "layers_per_block": 2,
21
- "norm_num_groups": 32,
22
- "out_channels": 3,
23
- "sample_size": 1024,
24
- "scaling_factor": 0.13025,
25
- "up_block_types": [
26
- "UpDecoderBlock2D",
27
- "UpDecoderBlock2D",
28
- "UpDecoderBlock2D",
29
- "UpDecoderBlock2D"
30
- ]
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:33c40ff3dc7adeb21dce76cd411d65828037efa0aa54432e3592418401cf8467
3
- size 167404866
 
 
 
 
models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:915b909d0eeef5985462226b2c9950ca9da42b5a6ec8c296c2e73f6419ae465c
3
- size 167335342
 
 
 
 
pyproject.toml DELETED
@@ -1,21 +0,0 @@
1
- [build-system]
2
- requires = ["setuptools >= 61.0"]
3
- build-backend = "setuptools.build_meta"
4
-
5
- [project]
6
- name = "edge-maxxing-4090-newdream"
7
- description = "An edge-maxxing model submission for the 4090 newdream contest"
8
- requires-python = ">=3.10,<3.11"
9
- version = "1.0.0"
10
- dependencies = [
11
- "diffusers==0.30.2",
12
- "transformers==4.41.2",
13
- "accelerate==0.31.0",
14
- "omegaconf==2.3.0",
15
- "torch==2.4.1",
16
- "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
17
- "DeepCache",
18
- ]
19
-
20
- [project.scripts]
21
- start_inference = "main:main"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1 +0,0 @@
1
- # Specify any extra options here, like --find-links, --pre, etc. Avoid specifying dependencies here and specify them in pyproject.toml instead
 
 
src/edge_maxxing_4090_newdream.egg-info/PKG-INFO DELETED
@@ -1,12 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: edge-maxxing-4090-newdream
3
- Version: 1.0.0
4
- Summary: An edge-maxxing model submission for the 4090 newdream contest
5
- Requires-Python: <3.11,>=3.10
6
- Requires-Dist: diffusers==0.30.2
7
- Requires-Dist: transformers==4.41.2
8
- Requires-Dist: accelerate==0.31.0
9
- Requires-Dist: omegaconf==2.3.0
10
- Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines
11
- Requires-Dist: nltk
12
- Requires-Dist: stable-fast@ git+https://github.com/chengzeyi/stable-fast.git@main#egg=stable-fast
 
 
 
 
 
 
 
 
 
 
 
 
 
src/edge_maxxing_4090_newdream.egg-info/SOURCES.txt DELETED
@@ -1,10 +0,0 @@
1
- README.md
2
- pyproject.toml
3
- src/main.py
4
- src/pipeline.py
5
- src/edge_maxxing_4090_newdream.egg-info/PKG-INFO
6
- src/edge_maxxing_4090_newdream.egg-info/SOURCES.txt
7
- src/edge_maxxing_4090_newdream.egg-info/dependency_links.txt
8
- src/edge_maxxing_4090_newdream.egg-info/entry_points.txt
9
- src/edge_maxxing_4090_newdream.egg-info/requires.txt
10
- src/edge_maxxing_4090_newdream.egg-info/top_level.txt
 
 
 
 
 
 
 
 
 
 
 
src/edge_maxxing_4090_newdream.egg-info/dependency_links.txt DELETED
@@ -1 +0,0 @@
1
-
 
 
src/edge_maxxing_4090_newdream.egg-info/entry_points.txt DELETED
@@ -1,2 +0,0 @@
1
- [console_scripts]
2
- start_inference = main:main
 
 
 
src/edge_maxxing_4090_newdream.egg-info/requires.txt DELETED
@@ -1,7 +0,0 @@
1
- diffusers==0.30.2
2
- transformers==4.41.2
3
- accelerate==0.31.0
4
- omegaconf==2.3.0
5
- edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines
6
- nltk
7
- stable-fast@ git+https://github.com/chengzeyi/stable-fast.git@main#egg=stable-fast
 
 
 
 
 
 
 
 
src/edge_maxxing_4090_newdream.egg-info/top_level.txt DELETED
@@ -1,2 +0,0 @@
1
- main
2
- pipeline
 
 
 
src/main.py DELETED
@@ -1,59 +0,0 @@
1
- import atexit
2
- from io import BytesIO
3
- from multiprocessing.connection import Listener
4
- from os import chmod, remove
5
- from os.path import abspath, exists
6
- from pathlib import Path
7
-
8
- import torch
9
-
10
- from PIL.JpegImagePlugin import JpegImageFile
11
- from pipelines.models import TextToImageRequest
12
-
13
- from pipeline import load_pipeline, infer
14
-
15
- SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
-
17
-
18
- def at_exit():
19
- torch.cuda.empty_cache()
20
-
21
-
22
- def main():
23
- atexit.register(at_exit)
24
-
25
- print(f"Loading pipeline")
26
- pipeline = load_pipeline()
27
-
28
- print(f"Pipeline loaded, creating socket at '{SOCKET}'")
29
-
30
- if exists(SOCKET):
31
- remove(SOCKET)
32
-
33
- with Listener(SOCKET) as listener:
34
- chmod(SOCKET, 0o777)
35
-
36
- print(f"Awaiting connections")
37
- with listener.accept() as connection:
38
- print(f"Connected")
39
-
40
- while True:
41
- try:
42
- request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
- except EOFError:
44
- print(f"Inference socket exiting")
45
-
46
- return
47
-
48
- image = infer(request, pipeline)
49
-
50
- data = BytesIO()
51
- image.save(data, format=JpegImageFile.format)
52
-
53
- packet = data.getvalue()
54
-
55
- connection.send_bytes(packet)
56
-
57
-
58
- if __name__ == '__main__':
59
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/pipeline.py DELETED
@@ -1,41 +0,0 @@
1
- import torch
2
- from PIL.Image import Image
3
- from diffusers import StableDiffusionXLPipeline
4
- from pipelines.models import TextToImageRequest
5
- from torch import Generator
6
- from DeepCache import DeepCacheSDHelper
7
-
8
-
9
- def load_pipeline() -> StableDiffusionXLPipeline:
10
- pipeline = StableDiffusionXLPipeline.from_pretrained(
11
- "./models/newdream-sdxl-20",
12
- torch_dtype=torch.float16,
13
- #local_files_only=True,
14
- use_safetensors=True,
15
- variant='fp16',
16
- ).to("cuda")
17
-
18
- helper = DeepCacheSDHelper(pipe=pipe)
19
- helper.set_params(cache_interval=3, cache_branch_id=0)
20
- helper.enable()
21
-
22
- for _ in range(5):
23
- pipeline(prompt="")
24
-
25
- return pipeline
26
-
27
-
28
- def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
29
- if request.seed is None:
30
- generator = None
31
- else:
32
- generator = Generator(pipeline.device).manual_seed(request.seed)
33
-
34
- return pipeline(
35
- prompt=request.prompt,
36
- negative_prompt=request.negative_prompt,
37
- width=request.width,
38
- height=request.height,
39
- generator=generator,
40
- num_inference_steps=20,
41
- ).images[0]