jeqin commited on
Commit
0d65fdc
·
1 Parent(s): 6d90379

update scripts

Browse files
Files changed (6) hide show
  1. .gitignore +14 -0
  2. download_from_hf.py +0 -20
  3. download_models.py +19 -0
  4. models.py +107 -0
  5. print_model.py +0 -21
  6. requirements.txt +5 -0
.gitignore ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .idea
2
+ __pycache__
3
+ .vscode
4
+ venv
5
+ .DS_Store
6
+ .env
7
+ *cache
8
+ .python-version
9
+
10
+ build
11
+ dist
12
+ .eggs
13
+
14
+ snippets/*
download_from_hf.py DELETED
@@ -1,20 +0,0 @@
1
- from pathlib import Path
2
- from huggingface_hub import snapshot_download
3
-
4
- # HF_ENDPOINT=https://hf-mirror.com python download_from_hf.py
5
-
6
- repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
7
- # repo_id = "stabilityai/stable-diffusion-xl-base-1.0"
8
-
9
- local_path = str(Path("/Users/jeqin/work/code/sd/models") / repo_id)
10
- # snapshot_download(repo_id, local_dir=local_path, local_dir_use_symlinks=False,
11
- # # ignore_patterns=[".gitattributes", "*.md", "*.bin", "*.safetensors", "*.ckpt", "*.onnx"],
12
- # allow_patterns=pattern_list)
13
- snapshot_download(repo_id, local_dir=local_path, local_dir_use_symlinks=False,
14
- ignore_patterns=[
15
- ".gitattributes", "*.bin", "*.onnx", "*.ckpt", "*.onnx_data", "*.png", "*.jpg", "*.md"
16
- ],
17
- allow_patterns=["text_encoder/model.safetensors"]
18
- )
19
-
20
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
download_models.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from huggingface_hub import snapshot_download
3
+ from models import MODELS
4
+
5
+ # HF_ENDPOINT=https://hf-mirror.com python download_models.py
6
+
7
+
8
+ for repo_id, items in MODELS.items():
9
+ snapshot_download(repo_id, local_dir=f'../models/{repo_id}', local_dir_use_symlinks=False,
10
+ allow_patterns=list(items.values()))
11
+
12
+ # snapshot_download(repo_id, local_dir=local_path, local_dir_use_symlinks=False,
13
+ # ignore_patterns=[
14
+ # ".gitattributes", "*.bin", "*.onnx", "*.ckpt", "*.onnx_data", "*.png", "*.jpg", "*.md"
15
+ # ],
16
+ # allow_patterns=["text_encoder/model.safetensors"]
17
+ # )
18
+
19
+
models.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # _AVAILABLE_MODELS = [
2
+ # "stabilityai/stable-diffusion-2-1-base",
3
+ # "Lykon/dreamshaper-8",
4
+ # "Lykon/absolute-reality-1.81",
5
+ # # "stabilityai/sdxl-turbo",
6
+ # # "Lykon/dreamshaper-xl-turbo",
7
+ # # "stabilityai/stable-diffusion-xl-base-1.0",
8
+ # ]
9
+
10
+ # _DEFAULT_MODEL = _AVAILABLE_MODELS[0]
11
+
12
+ # def generate_model_dict():
13
+ # return {
14
+ # "unet_config": "unet/config.json",
15
+ # "unet": "unet/diffusion_pytorch_model.safetensors",
16
+ # "text_encoder_config": "text_encoder/config.json",
17
+ # "text_encoder": "text_encoder/model.safetensors",
18
+ # "vae_config": "vae/config.json",
19
+ # "vae": "vae/diffusion_pytorch_model.safetensors",
20
+ # "diffusion_config": "scheduler/scheduler_config.json",
21
+ # "tokenizer_vocab": "tokenizer/vocab.json",
22
+ # "tokenizer_merges": "tokenizer/merges.txt",
23
+ # }
24
+
25
+ # _MODELS = {model: generate_model_dict() for model in _AVAILABLE_MODELS}
26
+
27
+
28
+ MODELS = {
29
+ # See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
30
+ "stabilityai/sdxl-turbo": {
31
+ "unet_config": "unet/config.json",
32
+ "unet": "unet/diffusion_pytorch_model.safetensors",
33
+ "text_encoder_config": "text_encoder/config.json",
34
+ "text_encoder": "text_encoder/model.safetensors",
35
+ "text_encoder_2_config": "text_encoder_2/config.json",
36
+ "text_encoder_2": "text_encoder_2/model.safetensors",
37
+ "vae_config": "vae/config.json",
38
+ "vae": "vae/diffusion_pytorch_model.safetensors",
39
+ "diffusion_config": "scheduler/scheduler_config.json",
40
+ "tokenizer_vocab": "tokenizer/vocab.json",
41
+ "tokenizer_merges": "tokenizer/merges.txt",
42
+ "tokenizer_2_vocab": "tokenizer_2/vocab.json",
43
+ "tokenizer_2_merges": "tokenizer_2/merges.txt",
44
+ },
45
+ "stabilityai/stable-diffusion-xl-base-1.0":{
46
+ "unet_config": "unet/config.json",
47
+ "unet": "unet/diffusion_pytorch_model.safetensors",
48
+ "text_encoder_config": "text_encoder/config.json",
49
+ "text_encoder": "text_encoder/model.safetensors",
50
+ "text_encoder_2_config": "text_encoder_2/config.json",
51
+ "text_encoder_2": "text_encoder_2/model.safetensors",
52
+ "vae_config": "vae/config.json",
53
+ "vae": "vae/diffusion_pytorch_model.safetensors",
54
+ "diffusion_config": "scheduler/scheduler_config.json",
55
+ "tokenizer_vocab": "tokenizer/vocab.json",
56
+ "tokenizer_merges": "tokenizer/merges.txt",
57
+ "tokenizer_2_vocab": "tokenizer_2/vocab.json",
58
+ "tokenizer_2_merges": "tokenizer_2/merges.txt",
59
+
60
+ },
61
+ # See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
62
+ "stabilityai/stable-diffusion-2-1-base": {
63
+ "unet_config": "unet/config.json",
64
+ "unet": "unet/diffusion_pytorch_model.safetensors",
65
+ "text_encoder_config": "text_encoder/config.json",
66
+ "text_encoder": "text_encoder/model.safetensors",
67
+ "vae_config": "vae/config.json",
68
+ "vae": "vae/diffusion_pytorch_model.safetensors",
69
+ "diffusion_config": "scheduler/scheduler_config.json",
70
+ "tokenizer_vocab": "tokenizer/vocab.json",
71
+ "tokenizer_merges": "tokenizer/merges.txt",
72
+ },
73
+ "runwayml/stable-diffusion-v1-5": {
74
+ "unet_config": "unet/config.json",
75
+ "unet": "unet/diffusion_pytorch_model.safetensors",
76
+ "text_encoder_config": "text_encoder/config.json",
77
+ "text_encoder": "text_encoder/model.safetensors",
78
+ "vae_config": "vae/config.json",
79
+ "vae": "vae/diffusion_pytorch_model.safetensors",
80
+ "diffusion_config": "scheduler/scheduler_config.json",
81
+ "tokenizer_vocab": "tokenizer/vocab.json",
82
+ "tokenizer_merges": "tokenizer/merges.txt",
83
+ },
84
+ # "Lykon/dreamshaper-8": {
85
+ # "unet_config": "unet/config.json",
86
+ # "unet": "unet/diffusion_pytorch_model.safetensors",
87
+ # "text_encoder_config": "text_encoder/config.json",
88
+ # "text_encoder": "text_encoder/model.safetensors",
89
+ # "vae_config": "vae/config.json",
90
+ # "vae": "vae/diffusion_pytorch_model.safetensors",
91
+ # "diffusion_config": "scheduler/scheduler_config.json",
92
+ # "tokenizer_vocab": "tokenizer/vocab.json",
93
+ # "tokenizer_merges": "tokenizer/merges.txt",
94
+ # },
95
+ "SG161222/Realistic_Vision_V3.0_VAE": {
96
+ "unet_config": "unet/config.json",
97
+ "unet": "unet/diffusion_pytorch_model.safetensors",
98
+ "text_encoder_config": "text_encoder/config.json",
99
+ "text_encoder": "text_encoder/model.safetensors",
100
+ "vae_config": "vae/config.json",
101
+ "vae": "vae/diffusion_pytorch_model.safetensors",
102
+ "diffusion_config": "scheduler/scheduler_config.json",
103
+ "tokenizer_vocab": "tokenizer/vocab.json",
104
+ "tokenizer_merges": "tokenizer/merges.txt",
105
+ }
106
+ }
107
+ _AVAILABLE_MODELS = list(MODELS.keys())
print_model.py DELETED
@@ -1,21 +0,0 @@
1
- from safetensors import safe_open
2
-
3
-
4
- def print_keys(model_path):
5
- tensors = {}
6
- with safe_open(model_path, framework="pt") as f:
7
- print("key length: ", len(f.keys()))
8
- for k in f.keys():
9
- v = f.get_tensor(k)
10
- if v.shape:
11
- print(f"{k}: {type(v)}, {v.shape}")
12
- else:
13
- print(f"{k}: {v}")
14
- tensors[k] = v
15
-
16
-
17
- if __name__ == '__main__':
18
- model = "../models/lora_1.5/ColoringBook-sd15.safetensors"
19
- # model = "../models/lora_2.1/pytorch_lora_weights-sd21-comfyui.safetensors"
20
- print(model)
21
- print_keys(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ accelerate==0.30.1
2
+ diffusers==0.29.0
3
+ huggingface-hub==0.23.3
4
+ peft==0.11.1
5
+ torch==2.3.0