Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- llamagen-siglip-sb-block-causal/SelftokPipeline/.gitignore +1 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/README.md +163 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/configs/renderer/renderer-AB66-data-eval.yml +180 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/configs/renderer/selftok_sd35_E31-512_renderer_8b.yml +167 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/configs/renderer/selftok_sd3_E31-512_renderer.yml +167 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/configs/res256/256_AB66-eval.yml +194 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/configs/res512/selftok_sd3_E31-512_modified.yml +166 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/__init__.py +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/__pycache__/__init__.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/__init__.py +4 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/base_dataset.py +161 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/build.py +11 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/selftok_dataset.py +685 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/slim_zipfile.py +812 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/slim_zipfile2.py +810 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/t2i_dataset.py +464 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/transforms.py +604 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/__init__.py +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/__version__.py +1 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/checker.py +136 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/dist.py +54 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/log.py +63 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/ordered_yaml.py +26 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/__init__.py +29 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/__pycache__/__init__.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/trainer_selftok_enc.py +419 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__init__.py +72 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/checkpoint.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/cloud_copy.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/context_utils.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/selftok_hook.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/train_loop.cpython-39.pyc +0 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/checkpoint.py +836 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/cloud_copy.py +450 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/context_utils.py +39 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/ema.py +90 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/lion.py +85 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/optimizer.py +206 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/parameter.py +231 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profile.py +71 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profile_npu.py +57 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profiler_npu/profile_utils.py +85 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profiler_npu/timeline_analysis.py +129 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/record.py +321 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/scheduler.py +33 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok/lr_scheduler.py +53 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok/threshold_scheduler.py +64 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok_hook.py +302 -0
- llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok_validation.py +238 -0
llamagen-siglip-sb-block-causal/SelftokPipeline/.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
llamagen-siglip-sb-block-causal/SelftokPipeline/README.md
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SelftokPipeline
|
| 2 |
+
|
| 3 |
+
This repository provides an inference pipeline for **Selftok**, a tokenizer and renderer framework for image generation. It consists of three primary components:
|
| 4 |
+
|
| 5 |
+
1. **Tokenization**: Convert images into discrete tokens.
|
| 6 |
+
2. **Diffusion Decoding**: Reconstruct images from tokens using a 50-step diffusion model.
|
| 7 |
+
3. **One-step Decoding**: Reconstruct images using a fast, one-step renderer.
|
| 8 |
+
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
```
|
| 12 |
+
git clone ssh://git@kwe-y.codehub.huawei.com:2222/p84402465/SelftokPipeline.git
|
| 13 |
+
cd ./SelftokPipeline
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
## Model Config and checkpoint address
|
| 17 |
+
|
| 18 |
+
### AB66
|
| 19 |
+
|
| 20 |
+
**Base:**
|
| 21 |
+
|
| 22 |
+
- yml: /configs/res256/256_AB66-eval.yml
|
| 23 |
+
- checkpoint: s3://bucket-5125-guiyang/outputs/l50043800/selftok/256-32768_AB66/2025-03-05_time_1942/output/ckpt/iter_182999.pth
|
| 24 |
+
|
| 25 |
+
**Renderer:**
|
| 26 |
+
|
| 27 |
+
- yml: /configs/renderer/renderer-AB66-data-eval.yml
|
| 28 |
+
- checkpoint: s3://bucket-5125-guiyang/outputs/l50043800/selftok/selftok256-renderer-GAN-AB66-512tokens_rFID/2025-04-16_time_2129/output/ckpt/iter_318999.pth
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
### E31
|
| 32 |
+
|
| 33 |
+
**Base:**
|
| 34 |
+
|
| 35 |
+
- yml: /configs/res512/selftok_sd3_E31-512_modified.yml
|
| 36 |
+
- checkpoint: s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31/2025-05-06_time_2230/output/ckpt/iter_237999.pth
|
| 37 |
+
|
| 38 |
+
**Renderer:**
|
| 39 |
+
- yml: /configs/renderer/selftok_sd3_E31-512_renderer.yml (2B)
|
| 40 |
+
- yml: /configs/renderer/selftok_sd35_E31-512_renderer_8b.yml (8B)
|
| 41 |
+
- checkpoint: s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31-renderer-GAN/2025-05-13_time_1708/output/ckpt/iter_331999.pth (2B)
|
| 42 |
+
- checkpoint: s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31-renderer-GAN-large/2025-05-19_time_2056/output/ckpt/iter_347999.pth
|
| 43 |
+
|
| 44 |
+
## 1. Tokenization
|
| 45 |
+
|
| 46 |
+
This script demonstrates how to convert images into token sequences using a pretrained Selftok model.
|
| 47 |
+
|
| 48 |
+
```python
|
| 49 |
+
import argparse
|
| 50 |
+
from mimogpt.engine.utils import parse_args_from_yaml
|
| 51 |
+
from torchvision import transforms
|
| 52 |
+
from PIL import Image
|
| 53 |
+
import torch
|
| 54 |
+
import numpy as np
|
| 55 |
+
from mimogpt.infer.SelftokPipeline import SelftokPipeline
|
| 56 |
+
from mimogpt.infer.SelftokPipeline import NormalizeToTensor
|
| 57 |
+
from torchvision.utils import save_image
|
| 58 |
+
|
| 59 |
+
parser = argparse.ArgumentParser()
|
| 60 |
+
# parser.add_argument("--yml-path", type=str, default="path/to/your/config.yml")
|
| 61 |
+
# parser.add_argument("--pretrained", type=str, default="path/to/your/ckpt.pth")
|
| 62 |
+
parser.add_argument("--yml-path", type=str, default="./configs/res512/selftok_sd3_E31-512_modified.yml")
|
| 63 |
+
parser.add_argument("--pretrained", type=str, default="s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31/2025-05-06_time_2230/output/ckpt/iter_237999.pth")
|
| 64 |
+
parser.add_argument("--data_size", type=int, default=512)
|
| 65 |
+
|
| 66 |
+
cfg = parse_args_from_yaml(args.yml_path)
|
| 67 |
+
model = SelftokPipeline(cfg=cfg, ckpt_path=args.pretrained, datasize=args.data_size, device='cuda')
|
| 68 |
+
|
| 69 |
+
img_transform = transforms.Compose([
|
| 70 |
+
transforms.Resize(args.data_size),
|
| 71 |
+
transforms.CenterCrop(args.data_size),
|
| 72 |
+
NormalizeToTensor(),
|
| 73 |
+
])
|
| 74 |
+
|
| 75 |
+
image_paths = ['path/to/image1.png', 'path/to/image2.png']
|
| 76 |
+
images = [img_transform(Image.open(p)) for p in image_paths]
|
| 77 |
+
images = torch.stack(images).to('cuda')
|
| 78 |
+
|
| 79 |
+
tokens = model.encoding(images, device='cuda')
|
| 80 |
+
np.save('token.npy', tokens.detach().cpu().numpy())
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
---
|
| 84 |
+
|
| 85 |
+
## 2. Diffusion Decoding
|
| 86 |
+
|
| 87 |
+
Reconstruct images from token sequences using the full diffusion model (50 steps):
|
| 88 |
+
|
| 89 |
+
```python
|
| 90 |
+
import argparse
|
| 91 |
+
from mimogpt.engine.utils import parse_args_from_yaml
|
| 92 |
+
from torchvision import transforms
|
| 93 |
+
from PIL import Image
|
| 94 |
+
import torch
|
| 95 |
+
import numpy as np
|
| 96 |
+
from mimogpt.infer.SelftokPipeline import SelftokPipeline
|
| 97 |
+
from mimogpt.infer.SelftokPipeline import NormalizeToTensor
|
| 98 |
+
from torchvision.utils import save_image
|
| 99 |
+
|
| 100 |
+
parser = argparse.ArgumentParser()
|
| 101 |
+
# parser.add_argument("--yml-path", type=str, default="path/to/your/config.yml")
|
| 102 |
+
# parser.add_argument("--pretrained", type=str, default="path/to/your/ckpt.pth")
|
| 103 |
+
parser.add_argument("--yml-path", type=str, default="./configs/res512/selftok_sd3_E31-512_modified.yml")
|
| 104 |
+
parser.add_argument("--pretrained", type=str, default="s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31/2025-05-06_time_2230/output/ckpt/iter_237999.pth")
|
| 105 |
+
parser.add_argument("--data_size", type=int, default=512)
|
| 106 |
+
|
| 107 |
+
cfg = parse_args_from_yaml(args.yml_path)
|
| 108 |
+
model = SelftokPipeline(cfg=cfg, ckpt_path=args.pretrained, datasize=args.data_size, device='cuda')
|
| 109 |
+
|
| 110 |
+
tokens = np.load('token.npy')
|
| 111 |
+
images = model.decoding(tokens, device='cuda')
|
| 112 |
+
|
| 113 |
+
for b, img in enumerate(images):
|
| 114 |
+
save_image(img, f"re_{b}.png")
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
---
|
| 118 |
+
|
| 119 |
+
## 3. One-step Renderer Decoding
|
| 120 |
+
|
| 121 |
+
Reconstruct images using a fast, one-step renderer:
|
| 122 |
+
|
| 123 |
+
```python
|
| 124 |
+
import argparse
|
| 125 |
+
from mimogpt.engine.utils import parse_args_from_yaml
|
| 126 |
+
from torchvision import transforms
|
| 127 |
+
from PIL import Image
|
| 128 |
+
import torch
|
| 129 |
+
import numpy as np
|
| 130 |
+
from mimogpt.infer.SelftokPipeline import SelftokPipeline
|
| 131 |
+
from mimogpt.infer.SelftokPipeline import NormalizeToTensor
|
| 132 |
+
from torchvision.utils import save_image
|
| 133 |
+
|
| 134 |
+
parser = argparse.ArgumentParser()
|
| 135 |
+
## yml and checkpoint path for 2B model
|
| 136 |
+
parser.add_argument("--yml-path", type=str, default="./configs/renderer/selftok_sd3_E31-512_renderer.yml")
|
| 137 |
+
parser.add_argument("--pretrained", type=str, default="s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31-renderer-GAN/2025-05-13_time_1708/output/ckpt/iter_301999.pth")
|
| 138 |
+
|
| 139 |
+
## yml and checkpoint path for 8B model, NPU memory cost is around 45G for bs=2
|
| 140 |
+
|
| 141 |
+
# parser.add_argument("--yml-path", type=str, default="/home/ma-user/work/p84402465/SelftokPipeline_old_v2/configs/renderer/selftok_sd35_E31-512_renderer_8b.yml")
|
| 142 |
+
# parser.add_argument("--pretrained", type=str, default="s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31-renderer-GAN-large/2025-05-19_time_2056/output/ckpt/iter_347999.pth")
|
| 143 |
+
|
| 144 |
+
parser.add_argument("--data_size", type=int, default=512)
|
| 145 |
+
|
| 146 |
+
cfg = parse_args_from_yaml(args.yml_path)
|
| 147 |
+
model = SelftokPipeline(cfg=cfg, ckpt_path=args.pretrained, datasize=args.data_size, device='cuda')
|
| 148 |
+
|
| 149 |
+
tokens = np.load('token.npy')
|
| 150 |
+
images = model.decoding_with_renderer(tokens, device='cuda')
|
| 151 |
+
|
| 152 |
+
for b, img in enumerate(images):
|
| 153 |
+
save_image(img, f"render_{b}.png")
|
| 154 |
+
```
|
| 155 |
+
|
| 156 |
+
---
|
| 157 |
+
|
| 158 |
+
## Notes
|
| 159 |
+
|
| 160 |
+
* Replace all `path/to/...` with actual paths on your system or object storage.
|
| 161 |
+
* The scripts assume CUDA is available; modify `device='cuda'` to `'cpu'` if running on CPU.
|
| 162 |
+
|
| 163 |
+
|
llamagen-siglip-sb-block-causal/SelftokPipeline/configs/renderer/renderer-AB66-data-eval.yml
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
common:
|
| 2 |
+
output_path: 'output'
|
| 3 |
+
log_path: '/cache/logs'
|
| 4 |
+
tb_path: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 5 |
+
val_url: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 6 |
+
alex_path: '/cache/data/alexnet-owt-7be5be79.pth'
|
| 7 |
+
save_per_epochs: 1.0
|
| 8 |
+
eval_per_epochs: 1.0
|
| 9 |
+
eval_first: 0
|
| 10 |
+
use_fp16: 0
|
| 11 |
+
use_bf16: 1
|
| 12 |
+
use_zero: 0
|
| 13 |
+
use_fsdp: 0
|
| 14 |
+
use_2d_rope: 0
|
| 15 |
+
use_deepspeed: 0
|
| 16 |
+
random_seed: 123
|
| 17 |
+
log_interval: 50
|
| 18 |
+
machines: 1
|
| 19 |
+
task: 'selftokenc'
|
| 20 |
+
experiment_index: 0
|
| 21 |
+
delete_after_upload: True
|
| 22 |
+
log_recon_interval: 100
|
| 23 |
+
val_interval: 0
|
| 24 |
+
ckpt_interval: 1000
|
| 25 |
+
vae_path: '/cache/data/sd3_medium.ckpt'
|
| 26 |
+
resume_exclude_opt: True
|
| 27 |
+
pre_encode: False
|
| 28 |
+
resume_from_steps: 0
|
| 29 |
+
is_eval: True
|
| 30 |
+
|
| 31 |
+
model:
|
| 32 |
+
pretrain_model: '/cache/model/iter_149999.pth'
|
| 33 |
+
fix_encoder: True
|
| 34 |
+
full_tokens: True
|
| 35 |
+
fix_decoder: False
|
| 36 |
+
|
| 37 |
+
optimize:
|
| 38 |
+
max_epochs: 10000
|
| 39 |
+
warmup_epochs: 0.01
|
| 40 |
+
ema_in_cpu: False
|
| 41 |
+
grad_norm: 0.0
|
| 42 |
+
set_lpips_loss: True
|
| 43 |
+
perceptual_weight: 0.1
|
| 44 |
+
lr_scheduler:
|
| 45 |
+
dit_lr: 5.0e-5
|
| 46 |
+
token_lr: 5.0e-5
|
| 47 |
+
init_lr: 1.0e-4
|
| 48 |
+
init_step1: 5000
|
| 49 |
+
init_step2: 50000
|
| 50 |
+
max_step: 100000
|
| 51 |
+
min_lr1: 1.0e-4
|
| 52 |
+
min_lr2: 1.0e-4
|
| 53 |
+
|
| 54 |
+
tokenizer:
|
| 55 |
+
is_text_tokenized: False
|
| 56 |
+
pretrained_dit_path: '/cache/data/sd3_medium.ckpt'
|
| 57 |
+
params:
|
| 58 |
+
image_size: 256
|
| 59 |
+
k: 512
|
| 60 |
+
# stages: '600,800,1000'
|
| 61 |
+
# k_per_stage: '450,50,12'
|
| 62 |
+
# stages: '100,400,600,1000'
|
| 63 |
+
# k_per_stage: '120,300,48,12'
|
| 64 |
+
# stages: '100,400,600,1000'
|
| 65 |
+
# k_per_stage: '128,320,51,13'
|
| 66 |
+
stages: '1000'
|
| 67 |
+
k_per_stage: '512'
|
| 68 |
+
# k_m: 0.0
|
| 69 |
+
# k_s: 1.0
|
| 70 |
+
gradient_checkpointing: False
|
| 71 |
+
in_channels: 16
|
| 72 |
+
encoder_hidden_size: 16
|
| 73 |
+
ema_enc: False
|
| 74 |
+
enc_decay: 0.99
|
| 75 |
+
L2_lr: 0.
|
| 76 |
+
two_part_losses: False
|
| 77 |
+
|
| 78 |
+
diffusion_type: 'flow'
|
| 79 |
+
noise_schedule_config:
|
| 80 |
+
schedule: 'log_norm'
|
| 81 |
+
parameterization: 'velocity'
|
| 82 |
+
force_recon: False
|
| 83 |
+
m: 0.0
|
| 84 |
+
s: 1.0
|
| 85 |
+
|
| 86 |
+
enc: 'Enc-Qformer-Uni-XL/2'
|
| 87 |
+
enable_enc_variable_size: True
|
| 88 |
+
encoder_config:
|
| 89 |
+
time_adaln: True
|
| 90 |
+
qformer_mode: 'dual'
|
| 91 |
+
pre_norm: False
|
| 92 |
+
post_norm: True
|
| 93 |
+
xavier_init: False
|
| 94 |
+
qk_norm: False
|
| 95 |
+
attn_mask: False
|
| 96 |
+
|
| 97 |
+
quantizer_config:
|
| 98 |
+
codebook_size: 32768
|
| 99 |
+
code_dim: 16
|
| 100 |
+
w_diversity: 1.0
|
| 101 |
+
ema_entropy_ratio: 0.8
|
| 102 |
+
w_commit: 1.0
|
| 103 |
+
decay: 0.99
|
| 104 |
+
dead_code_threshold: 0.2
|
| 105 |
+
reset_cluster_size: 0.2
|
| 106 |
+
smart_react: True
|
| 107 |
+
continuous: False
|
| 108 |
+
reg: [0.1, 0.3]
|
| 109 |
+
K: 512
|
| 110 |
+
|
| 111 |
+
model: 'MMDiT_XL_Renderer'
|
| 112 |
+
decoder_config:
|
| 113 |
+
repeat: True
|
| 114 |
+
sd3_cond_pooling: None
|
| 115 |
+
class_dropout_prob: 0.
|
| 116 |
+
train_filter: 'all'
|
| 117 |
+
freeze_filter: ''
|
| 118 |
+
# qk_norm: 'rms'
|
| 119 |
+
init_method: None
|
| 120 |
+
#register_length: 8
|
| 121 |
+
time_adaln: 'pos_emb'
|
| 122 |
+
|
| 123 |
+
data: # except those with comments, other fields are useless for tokenizer training
|
| 124 |
+
batch_size: 12
|
| 125 |
+
num_workers: 8
|
| 126 |
+
checker:
|
| 127 |
+
status: disable
|
| 128 |
+
anchor_path: checkings/A100_bla
|
| 129 |
+
check_iter: 20
|
| 130 |
+
save_check: False
|
| 131 |
+
master_only: False
|
| 132 |
+
load_keys:
|
| 133 |
+
- image
|
| 134 |
+
- prompt
|
| 135 |
+
- timesteps
|
| 136 |
+
- noise
|
| 137 |
+
|
| 138 |
+
data_checker:
|
| 139 |
+
status: disable
|
| 140 |
+
save_prob: 0.1
|
| 141 |
+
save_amount: 10
|
| 142 |
+
save_path: data/precision/result
|
| 143 |
+
master_only: False
|
| 144 |
+
|
| 145 |
+
dataset:
|
| 146 |
+
target: base.Base
|
| 147 |
+
# meta_info: configs_t2i/data/selftok_ocr_debug.yml
|
| 148 |
+
# meta_info: configs_t2i/data/selftok_hucai_debug.yml
|
| 149 |
+
# meta_info: configs_t2i/data/selftok_256_debug.yml
|
| 150 |
+
meta_info: configs_t2i/data/selftok_imagenet_hucai_ocr.yml
|
| 151 |
+
# meta_info: configs_t2i/data/selftok_imagenet.yml
|
| 152 |
+
enlarge_ratio: 1 # N epochs
|
| 153 |
+
max_size: 256 # max resolution
|
| 154 |
+
resize_f: 16
|
| 155 |
+
filter_max_token_len: 128
|
| 156 |
+
load_t5_cache: False
|
| 157 |
+
prompt_augmentation: True
|
| 158 |
+
preprocess_func: center_crop # augmentation
|
| 159 |
+
sync_wait_time: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 160 |
+
sync_wait_ratio: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
cloud_copy:
|
| 164 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/RAW_DATA/imagenet_untar/
|
| 165 |
+
# dst: /cache/data/imagenet/
|
| 166 |
+
- url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA/D2/INIT_PARQUET/imagenet_untar/
|
| 167 |
+
dst: /cache/data/imagenet_untar/imagenet_untar_parquet/
|
| 168 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/ocr-height_width_ratio_0.9_1.1/parquet_filter_ratio_512_20250303182106/
|
| 169 |
+
dst: /cache/data/ocr/ocr_parquet/
|
| 170 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/hucai-face_filter/parquet_part_center_crop_contain_full_face_1p2_20250304195907/
|
| 171 |
+
dst: /cache/data/hucai/hucai_parquet/
|
| 172 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/mimo/DiT/DiT-XL-2-256x256.pt
|
| 173 |
+
dst: /cache/data/DiT-XL-2-256x256.pt
|
| 174 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/selftok/alexnet-owt-7be5be79.pth
|
| 175 |
+
dst: /cache/data/alexnet-owt-7be5be79.pth
|
| 176 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/sd3/transformer/sd3_medium.ckpt
|
| 177 |
+
dst: /cache/data/sd3_medium.ckpt
|
| 178 |
+
- url: s3://bucket-5125-guiyang/outputs/l50043800/selftok/256-32768_AB66/2025-03-05_time_1942/output/ckpt/iter_149999.pth
|
| 179 |
+
dst: /cache/model/iter_149999.pth
|
| 180 |
+
|
llamagen-siglip-sb-block-causal/SelftokPipeline/configs/renderer/selftok_sd35_E31-512_renderer_8b.yml
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
common:
|
| 2 |
+
output_path: 'output'
|
| 3 |
+
log_path: '/cache/logs'
|
| 4 |
+
tb_path: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 5 |
+
val_url: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 6 |
+
alex_path: '/cache/data/alexnet-owt-7be5be79.pth'
|
| 7 |
+
save_per_epochs: 1.0
|
| 8 |
+
eval_per_epochs: 1.0
|
| 9 |
+
eval_first: 0
|
| 10 |
+
use_fp16: 0
|
| 11 |
+
use_bf16: 1
|
| 12 |
+
use_zero: 0
|
| 13 |
+
use_fsdp: 1
|
| 14 |
+
use_2d_rope: 0
|
| 15 |
+
use_deepspeed: 0
|
| 16 |
+
random_seed: 123
|
| 17 |
+
log_interval: 50
|
| 18 |
+
machines: 1
|
| 19 |
+
task: 'selftokenc'
|
| 20 |
+
experiment_index: 0
|
| 21 |
+
delete_after_upload: True
|
| 22 |
+
log_recon_interval: 100
|
| 23 |
+
val_interval: 0
|
| 24 |
+
ckpt_interval: 2000
|
| 25 |
+
vae_path: '/cache/data/sd3_medium.ckpt'
|
| 26 |
+
# vae_path: '/cache/model/sd3.5_large.ckpt'
|
| 27 |
+
resume_exclude_opt: True
|
| 28 |
+
pre_encode: False
|
| 29 |
+
resume_from_steps: 0
|
| 30 |
+
is_eval: True
|
| 31 |
+
|
| 32 |
+
model:
|
| 33 |
+
pretrain_model: '/cache/model/iter_237999.pth'
|
| 34 |
+
fix_encoder: True
|
| 35 |
+
full_tokens: True
|
| 36 |
+
fix_decoder: False
|
| 37 |
+
|
| 38 |
+
optimize:
|
| 39 |
+
max_epochs: 10000
|
| 40 |
+
warmup_epochs: 0.01
|
| 41 |
+
ema_in_cpu: False
|
| 42 |
+
grad_norm: 0.0
|
| 43 |
+
set_lpips_loss: True
|
| 44 |
+
perceptual_weight: 0.1
|
| 45 |
+
lr_scheduler:
|
| 46 |
+
dit_lr: 5.0e-5
|
| 47 |
+
token_lr: 5.0e-5
|
| 48 |
+
init_lr: 1.0e-4
|
| 49 |
+
init_step1: 5000
|
| 50 |
+
init_step2: 50000
|
| 51 |
+
max_step: 100000
|
| 52 |
+
min_lr1: 1.0e-4
|
| 53 |
+
min_lr2: 1.0e-4
|
| 54 |
+
|
| 55 |
+
tokenizer:
|
| 56 |
+
is_text_tokenized: False
|
| 57 |
+
pretrained_dit_path: '/cache/data/sd3.5_large.ckpt'
|
| 58 |
+
params:
|
| 59 |
+
image_size: 512
|
| 60 |
+
k: 1536
|
| 61 |
+
stages: '200,400,600,800,1000'
|
| 62 |
+
k_per_stage: '512,512,256,192,64'
|
| 63 |
+
# k_m: 0.0
|
| 64 |
+
# k_s: 1.0
|
| 65 |
+
gradient_checkpointing: False
|
| 66 |
+
in_channels: 16
|
| 67 |
+
encoder_hidden_size: 16
|
| 68 |
+
ema_enc: False
|
| 69 |
+
enc_decay: 0.99
|
| 70 |
+
L2_lr: 0.
|
| 71 |
+
two_part_losses: False
|
| 72 |
+
|
| 73 |
+
diffusion_type: 'flow'
|
| 74 |
+
noise_schedule_config:
|
| 75 |
+
schedule: 'log_norm'
|
| 76 |
+
parameterization: 'velocity'
|
| 77 |
+
force_recon: False
|
| 78 |
+
m: 0.0
|
| 79 |
+
s: 1.0
|
| 80 |
+
|
| 81 |
+
enc: 'Enc-Qformer-Uni-XL/2'
|
| 82 |
+
enable_enc_variable_size: True
|
| 83 |
+
encoder_config:
|
| 84 |
+
time_adaln: True
|
| 85 |
+
qformer_mode: 'dual'
|
| 86 |
+
pre_norm: False
|
| 87 |
+
post_norm: True
|
| 88 |
+
xavier_init: False
|
| 89 |
+
qk_norm: False
|
| 90 |
+
attn_mask: False
|
| 91 |
+
|
| 92 |
+
quantizer_config:
|
| 93 |
+
codebook_size: 16384
|
| 94 |
+
code_dim: 32
|
| 95 |
+
w_diversity: 1.0
|
| 96 |
+
ema_entropy_ratio: 0.8
|
| 97 |
+
w_commit: 1.0
|
| 98 |
+
decay: 0.99
|
| 99 |
+
dead_code_threshold: 0.2
|
| 100 |
+
reset_cluster_size: 0.2
|
| 101 |
+
smart_react: True
|
| 102 |
+
continuous: False
|
| 103 |
+
reg: [0.1, 0.3]
|
| 104 |
+
K: 1536
|
| 105 |
+
|
| 106 |
+
model: 'MMDiT_XXL_Renderer'
|
| 107 |
+
decoder_config:
|
| 108 |
+
sd3_cond_pooling: None
|
| 109 |
+
class_dropout_prob: 0.1
|
| 110 |
+
train_filter: 'all'
|
| 111 |
+
freeze_filter: ''
|
| 112 |
+
qk_norm: 'rms'
|
| 113 |
+
init_method: None
|
| 114 |
+
#register_length: 8
|
| 115 |
+
time_adaln: 'pos_emb'
|
| 116 |
+
|
| 117 |
+
data: # except those with comments, other fields are useless for tokenizer training
|
| 118 |
+
batch_size: 4
|
| 119 |
+
num_workers: 8
|
| 120 |
+
checker:
|
| 121 |
+
status: disable
|
| 122 |
+
anchor_path: checkings/A100_bla
|
| 123 |
+
check_iter: 20
|
| 124 |
+
save_check: False
|
| 125 |
+
master_only: False
|
| 126 |
+
load_keys:
|
| 127 |
+
- image
|
| 128 |
+
- prompt
|
| 129 |
+
- timesteps
|
| 130 |
+
- noise
|
| 131 |
+
|
| 132 |
+
data_checker:
|
| 133 |
+
status: disable
|
| 134 |
+
save_prob: 0.1
|
| 135 |
+
save_amount: 10
|
| 136 |
+
save_path: data/precision/result
|
| 137 |
+
master_only: False
|
| 138 |
+
|
| 139 |
+
dataset:
|
| 140 |
+
target: base.Base
|
| 141 |
+
meta_info: configs_t2i/data/selftok_512_renderer.yml
|
| 142 |
+
enlarge_ratio: 1000 # N epochs
|
| 143 |
+
max_size: 512 # max resolution
|
| 144 |
+
resize_f: 16
|
| 145 |
+
filter_max_token_len: 128
|
| 146 |
+
load_t5_cache: False
|
| 147 |
+
prompt_augmentation: True
|
| 148 |
+
preprocess_func: center_crop # augmentation
|
| 149 |
+
sync_wait_time: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 150 |
+
sync_wait_ratio: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
cloud_copy:
|
| 154 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/selftok/alexnet-owt-7be5be79.pth
|
| 155 |
+
dst: /cache/data/alexnet-owt-7be5be79.pth
|
| 156 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/model/sd3.5_large.ckpt
|
| 157 |
+
dst: /cache/data/sd3.5_large.ckpt
|
| 158 |
+
- url: s3://bucket-5125-guiyang/code/z84378256/rFID/fid_ckpt/inception-2015-12-05.pt
|
| 159 |
+
dst: /cache/model/fid_ckpt/inception-2015-12-05.pt
|
| 160 |
+
- url: s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31/2025-05-06_time_2230/output/ckpt/iter_237999.pth
|
| 161 |
+
dst: /cache/model/iter_237999.pth
|
| 162 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/OpenImage/parquet/parquet_part0-512_20241024171323/
|
| 163 |
+
dst: /cache/data/openimage/openimage_parquet/
|
| 164 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/ocr-height_width_ratio_0.9_1.1/parquet_filter_ratio_512_20250303182106/
|
| 165 |
+
dst: /cache/data/ocr/ocr_parquet/
|
| 166 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/hucai-face_filter/parquet_part_center_crop_contain_full_face_1p2_20250304195907/
|
| 167 |
+
dst: /cache/data/hucai/hucai_parquet/
|
llamagen-siglip-sb-block-causal/SelftokPipeline/configs/renderer/selftok_sd3_E31-512_renderer.yml
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
common:
|
| 2 |
+
output_path: 'output'
|
| 3 |
+
log_path: '/cache/logs'
|
| 4 |
+
tb_path: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 5 |
+
val_url: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 6 |
+
alex_path: '/cache/data/alexnet-owt-7be5be79.pth'
|
| 7 |
+
save_per_epochs: 1.0
|
| 8 |
+
eval_per_epochs: 1.0
|
| 9 |
+
eval_first: 0
|
| 10 |
+
use_fp16: 0
|
| 11 |
+
use_bf16: 1
|
| 12 |
+
use_zero: 0
|
| 13 |
+
use_fsdp: 0
|
| 14 |
+
use_2d_rope: 0
|
| 15 |
+
use_deepspeed: 0
|
| 16 |
+
random_seed: 123
|
| 17 |
+
log_interval: 50
|
| 18 |
+
machines: 1
|
| 19 |
+
task: 'selftokenc'
|
| 20 |
+
experiment_index: 0
|
| 21 |
+
delete_after_upload: True
|
| 22 |
+
log_recon_interval: 100
|
| 23 |
+
val_interval: 0
|
| 24 |
+
ckpt_interval: 2000
|
| 25 |
+
vae_path: '/cache/data/sd3_medium.ckpt'
|
| 26 |
+
resume_exclude_opt: True
|
| 27 |
+
pre_encode: False
|
| 28 |
+
resume_from_steps: 0
|
| 29 |
+
is_eval: True
|
| 30 |
+
|
| 31 |
+
model:
|
| 32 |
+
pretrain_model: '/cache/data/iter_69999.pth'
|
| 33 |
+
|
| 34 |
+
optimize:
|
| 35 |
+
max_epochs: 1000
|
| 36 |
+
warmup_epochs: 0.01
|
| 37 |
+
ema_in_cpu: False
|
| 38 |
+
grad_norm: 0.0
|
| 39 |
+
lr_scheduler:
|
| 40 |
+
dit_lr: 2.0e-5
|
| 41 |
+
token_lr: 1.0e-4
|
| 42 |
+
init_lr: 1.0e-4
|
| 43 |
+
init_step1: 5000
|
| 44 |
+
init_step2: 50000
|
| 45 |
+
max_step: 100000
|
| 46 |
+
min_lr1: 1.0e-4
|
| 47 |
+
min_lr2: 1.0e-4
|
| 48 |
+
|
| 49 |
+
tokenizer:
|
| 50 |
+
is_text_tokenized: False
|
| 51 |
+
pretrained_dit_path: '/cache/data/sd3_medium.ckpt'
|
| 52 |
+
params:
|
| 53 |
+
image_size: 512
|
| 54 |
+
k: 1536
|
| 55 |
+
stages: '200,400,600,800,1000'
|
| 56 |
+
k_per_stage: '512,512,256,192,64'
|
| 57 |
+
# k_m: 0.0
|
| 58 |
+
# k_s: 1.0
|
| 59 |
+
gradient_checkpointing: False
|
| 60 |
+
in_channels: 16
|
| 61 |
+
encoder_hidden_size: 16
|
| 62 |
+
ema_enc: False
|
| 63 |
+
enc_decay: 0.99
|
| 64 |
+
L2_lr: 0.
|
| 65 |
+
two_part_losses: False
|
| 66 |
+
|
| 67 |
+
diffusion_type: 'flow'
|
| 68 |
+
noise_schedule_config:
|
| 69 |
+
schedule: 'log_norm'
|
| 70 |
+
parameterization: 'velocity'
|
| 71 |
+
force_recon: False
|
| 72 |
+
m: 0.0
|
| 73 |
+
s: 1.0
|
| 74 |
+
|
| 75 |
+
enc: 'Enc-Qformer-Uni-XL/2'
|
| 76 |
+
enable_enc_variable_size: True
|
| 77 |
+
encoder_config:
|
| 78 |
+
time_adaln: True
|
| 79 |
+
qformer_mode: 'dual'
|
| 80 |
+
pre_norm: False
|
| 81 |
+
post_norm: True
|
| 82 |
+
xavier_init: False
|
| 83 |
+
qk_norm: False
|
| 84 |
+
attn_mask: False
|
| 85 |
+
|
| 86 |
+
quantizer_config:
|
| 87 |
+
codebook_size: 16384
|
| 88 |
+
code_dim: 32
|
| 89 |
+
w_diversity: 1.0
|
| 90 |
+
ema_entropy_ratio: 0.8
|
| 91 |
+
w_commit: 1.0
|
| 92 |
+
decay: 0.99
|
| 93 |
+
dead_code_threshold: 0.2
|
| 94 |
+
reset_cluster_size: 0.2
|
| 95 |
+
smart_react: True
|
| 96 |
+
# redistribute: False
|
| 97 |
+
continuous: False
|
| 98 |
+
reg: [0.1, 0.3]
|
| 99 |
+
K: 1536
|
| 100 |
+
# share: True
|
| 101 |
+
|
| 102 |
+
model: 'MMDiT_XL_Renderer'
|
| 103 |
+
context_see_xt: True
|
| 104 |
+
decoder_config:
|
| 105 |
+
repeat: False
|
| 106 |
+
sd3_cond_pooling: None
|
| 107 |
+
class_dropout_prob: 0.1
|
| 108 |
+
train_filter: 'all'
|
| 109 |
+
freeze_filter: ''
|
| 110 |
+
# qk_norm: 'rms'
|
| 111 |
+
init_method: None
|
| 112 |
+
#register_length: 8
|
| 113 |
+
time_adaln: 'pos_emb'
|
| 114 |
+
|
| 115 |
+
data: # except those with comments, other fields are useless for tokenizer training
|
| 116 |
+
batch_size: 8
|
| 117 |
+
num_workers: 8
|
| 118 |
+
checker:
|
| 119 |
+
status: disable
|
| 120 |
+
anchor_path: checkings/A100_bla
|
| 121 |
+
check_iter: 20
|
| 122 |
+
save_check: False
|
| 123 |
+
master_only: False
|
| 124 |
+
load_keys:
|
| 125 |
+
- image
|
| 126 |
+
- prompt
|
| 127 |
+
- timesteps
|
| 128 |
+
- noise
|
| 129 |
+
|
| 130 |
+
data_checker:
|
| 131 |
+
status: disable
|
| 132 |
+
save_prob: 0.1
|
| 133 |
+
save_amount: 10
|
| 134 |
+
save_path: data/precision/result
|
| 135 |
+
master_only: False
|
| 136 |
+
|
| 137 |
+
dataset:
|
| 138 |
+
target: base.Base
|
| 139 |
+
meta_info: configs_t2i/data/selftok_openimage.yml
|
| 140 |
+
enlarge_ratio: 1000 # N epochs
|
| 141 |
+
max_size: 512 # max resolution
|
| 142 |
+
resize_f: 16
|
| 143 |
+
filter_max_token_len: 128
|
| 144 |
+
load_t5_cache: False
|
| 145 |
+
prompt_augmentation: True
|
| 146 |
+
preprocess_func: center_crop # augmentation
|
| 147 |
+
sync_wait_time: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 148 |
+
sync_wait_ratio: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
cloud_copy:
|
| 152 |
+
# - url: s3://bucket-5125-guiyang/misc/MGM/models/mimo/DiT/DiT-XL-2-256x256.pt
|
| 153 |
+
# dst: /cache/data/DiT-XL-2-256x256.pt
|
| 154 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/selftok/alexnet-owt-7be5be79.pth
|
| 155 |
+
dst: /cache/data/alexnet-owt-7be5be79.pth
|
| 156 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/sd3/transformer/sd3_medium.ckpt
|
| 157 |
+
dst: /cache/data/sd3_medium.ckpt
|
| 158 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/OpenImage/parquet/parquet_part0-512_20241024171323/
|
| 159 |
+
dst: /cache/data/openimage/openimage_parquet/
|
| 160 |
+
# - url: s3://bucket-5125-guiyang/code/z84397892/datasets/tokenizer_pretrain/parquet_imagenet21k_w_hw_480/
|
| 161 |
+
# dst: /cache/data/imagenet480/imagenet480_parquet/
|
| 162 |
+
# - url: s3://bucket-5125-guiyang/data/z84399568/ocr-height_width_ratio_0.9_1.1/parquet_filter_ratio_512_20250303182106/
|
| 163 |
+
# dst: /cache/data/ocr/ocr_parquet/
|
| 164 |
+
# - url: s3://bucket-5125-guiyang/data/z84399568/hucai-face_filter/parquet_part_center_crop_contain_full_face_1p2_20250304195907/
|
| 165 |
+
# dst: /cache/data/hucai/hucai_parquet/
|
| 166 |
+
- url: s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31/2025-05-03_time_2230/output/ckpt/iter_69999.pth
|
| 167 |
+
dst: /cache/data/iter_69999.pth
|
llamagen-siglip-sb-block-causal/SelftokPipeline/configs/res256/256_AB66-eval.yml
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
common:
|
| 2 |
+
output_path: 'output'
|
| 3 |
+
log_path: '/cache/logs'
|
| 4 |
+
tb_path: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 5 |
+
val_url: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 6 |
+
alex_path: '/cache/data/alexnet-owt-7be5be79.pth'
|
| 7 |
+
save_per_epochs: 1.0
|
| 8 |
+
eval_per_epochs: 1.0
|
| 9 |
+
eval_first: 0
|
| 10 |
+
use_fp16: 0
|
| 11 |
+
use_bf16: 1
|
| 12 |
+
use_zero: 0
|
| 13 |
+
use_fsdp: 0
|
| 14 |
+
use_2d_rope: 0
|
| 15 |
+
use_deepspeed: 0
|
| 16 |
+
random_seed: 123
|
| 17 |
+
log_interval: 50
|
| 18 |
+
machines: 1
|
| 19 |
+
task: 'selftokenc'
|
| 20 |
+
experiment_index: 0
|
| 21 |
+
delete_after_upload: True
|
| 22 |
+
log_recon_interval: 100
|
| 23 |
+
val_interval: 0
|
| 24 |
+
ckpt_interval: 1000
|
| 25 |
+
vae_path: '/cache/data/sd3_medium.ckpt'
|
| 26 |
+
resume_exclude_opt: False
|
| 27 |
+
pre_encode: False
|
| 28 |
+
resume_from_steps: 0
|
| 29 |
+
is_eval: True
|
| 30 |
+
|
| 31 |
+
model:
|
| 32 |
+
pretrain_model: ''
|
| 33 |
+
|
| 34 |
+
optimize:
|
| 35 |
+
max_epochs: 1000
|
| 36 |
+
warmup_epochs: 0.01
|
| 37 |
+
ema_in_cpu: False
|
| 38 |
+
grad_norm: 0.0
|
| 39 |
+
lr_scheduler:
|
| 40 |
+
dit_lr: 1.0e-5
|
| 41 |
+
token_lr: 5.0e-5
|
| 42 |
+
init_lr: 5.0e-5
|
| 43 |
+
init_step1: 5000
|
| 44 |
+
init_step2: 50000
|
| 45 |
+
max_step: 100000
|
| 46 |
+
min_lr1: 5.0e-5
|
| 47 |
+
min_lr2: 5.0e-5
|
| 48 |
+
|
| 49 |
+
tokenizer:
|
| 50 |
+
is_text_tokenized: False
|
| 51 |
+
pretrained_dit_path: '/cache/data/sd3_medium.ckpt'
|
| 52 |
+
params:
|
| 53 |
+
image_size: 256
|
| 54 |
+
k: 512
|
| 55 |
+
# stages: '600,800,1000'
|
| 56 |
+
# k_per_stage: '450,50,12'
|
| 57 |
+
# stages: '100,400,600,1000'
|
| 58 |
+
# k_per_stage: '120,300,48,12'
|
| 59 |
+
# stages: '100,400,600,1000'
|
| 60 |
+
# k_per_stage: '128,320,51,13'
|
| 61 |
+
stages: '200,400,600,800,1000'
|
| 62 |
+
k_per_stage: '192,184,72,48,16'
|
| 63 |
+
# k_m: 0.0
|
| 64 |
+
# k_s: 1.0
|
| 65 |
+
gradient_checkpointing: False
|
| 66 |
+
in_channels: 16
|
| 67 |
+
encoder_hidden_size: 16
|
| 68 |
+
ema_enc: False
|
| 69 |
+
enc_decay: 0.99
|
| 70 |
+
L2_lr: 0.
|
| 71 |
+
two_part_losses: False
|
| 72 |
+
|
| 73 |
+
diffusion_type: 'flow'
|
| 74 |
+
noise_schedule_config:
|
| 75 |
+
schedule: 'log_norm'
|
| 76 |
+
parameterization: 'velocity'
|
| 77 |
+
force_recon: False
|
| 78 |
+
m: 0.0
|
| 79 |
+
s: 1.0
|
| 80 |
+
|
| 81 |
+
enc: 'Enc-Qformer-Uni-XL/2'
|
| 82 |
+
enable_enc_variable_size: True
|
| 83 |
+
encoder_config:
|
| 84 |
+
time_adaln: True
|
| 85 |
+
qformer_mode: 'dual'
|
| 86 |
+
pre_norm: False
|
| 87 |
+
post_norm: True
|
| 88 |
+
xavier_init: False
|
| 89 |
+
qk_norm: False
|
| 90 |
+
attn_mask: False
|
| 91 |
+
|
| 92 |
+
quantizer_config:
|
| 93 |
+
codebook_size: 32768
|
| 94 |
+
code_dim: 16
|
| 95 |
+
# w_diversity: 0.5
|
| 96 |
+
w_diversity: 1.0
|
| 97 |
+
ema_entropy_ratio: 0.8
|
| 98 |
+
w_commit: 1.0
|
| 99 |
+
decay: 0.99
|
| 100 |
+
dead_code_threshold: 0.2
|
| 101 |
+
reset_cluster_size: 0.2
|
| 102 |
+
smart_react: True
|
| 103 |
+
continuous: False
|
| 104 |
+
reg: [0.1, 0.3]
|
| 105 |
+
K: 512
|
| 106 |
+
|
| 107 |
+
model: 'MMDiT_XL'
|
| 108 |
+
context_see_xt: True
|
| 109 |
+
decoder_config:
|
| 110 |
+
sd3_cond_pooling: None
|
| 111 |
+
class_dropout_prob: 0.1
|
| 112 |
+
train_filter: 'all'
|
| 113 |
+
freeze_filter: ''
|
| 114 |
+
# qk_norm: 'rms'
|
| 115 |
+
init_method: None
|
| 116 |
+
#register_length: 8
|
| 117 |
+
time_adaln: 'pos_emb'
|
| 118 |
+
|
| 119 |
+
data: # except those with comments, other fields are useless for tokenizer training
|
| 120 |
+
batch_size: 12
|
| 121 |
+
num_workers: 8
|
| 122 |
+
checker:
|
| 123 |
+
status: disable
|
| 124 |
+
anchor_path: checkings/A100_bla
|
| 125 |
+
check_iter: 20
|
| 126 |
+
save_check: False
|
| 127 |
+
master_only: False
|
| 128 |
+
load_keys:
|
| 129 |
+
- image
|
| 130 |
+
- prompt
|
| 131 |
+
- timesteps
|
| 132 |
+
- noise
|
| 133 |
+
|
| 134 |
+
data_checker:
|
| 135 |
+
status: disable
|
| 136 |
+
save_prob: 0.1
|
| 137 |
+
save_amount: 10
|
| 138 |
+
save_path: data/precision/result
|
| 139 |
+
master_only: False
|
| 140 |
+
|
| 141 |
+
dataset:
|
| 142 |
+
target: base.Base
|
| 143 |
+
# meta_info: configs_t2i/data/selftok_ocr_debug.yml
|
| 144 |
+
# meta_info: configs_t2i/data/selftok_hucai_debug.yml
|
| 145 |
+
# meta_info: configs_t2i/data/selftok_256_debug.yml
|
| 146 |
+
meta_info: configs_t2i/data/selftok_imagenet_hucai_ocr.yml
|
| 147 |
+
enlarge_ratio: 1 # N epochs
|
| 148 |
+
max_size: 256 # max resolution
|
| 149 |
+
resize_f: 16
|
| 150 |
+
filter_max_token_len: 128
|
| 151 |
+
load_t5_cache: False
|
| 152 |
+
prompt_augmentation: True
|
| 153 |
+
preprocess_func: center_crop # augmentation
|
| 154 |
+
sync_wait_time: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 155 |
+
sync_wait_ratio: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
cloud_copy:
|
| 159 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/RAW_DATA/imagenet_untar/
|
| 160 |
+
# dst: /cache/data/imagenet/
|
| 161 |
+
- url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA/D2/INIT_PARQUET/imagenet_untar/
|
| 162 |
+
dst: /cache/data/imagenet_untar/imagenet_untar_parquet/
|
| 163 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/ocr-height_width_ratio_0.9_1.1/parquet_filter_ratio_512_20250303182106/
|
| 164 |
+
dst: /cache/data/ocr/ocr_parquet/
|
| 165 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/hucai-face_filter/parquet_part_center_crop_contain_full_face_1p2_20250304195907/
|
| 166 |
+
dst: /cache/data/hucai/hucai_parquet/
|
| 167 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/mimo/DiT/DiT-XL-2-256x256.pt
|
| 168 |
+
dst: /cache/data/DiT-XL-2-256x256.pt
|
| 169 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/selftok/alexnet-owt-7be5be79.pth
|
| 170 |
+
dst: /cache/data/alexnet-owt-7be5be79.pth
|
| 171 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/sd3/transformer/sd3_medium.ckpt
|
| 172 |
+
dst: /cache/data/sd3_medium.ckpt
|
| 173 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/flickr/parquet_part1007/
|
| 174 |
+
# dst: /cache/data/flickr/parquet_part1007/
|
| 175 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/flickr/parquet_1006_3/
|
| 176 |
+
# dst: /cache/data/flickr/parquet_1006_3/
|
| 177 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/visual_china/parquet_10111055/
|
| 178 |
+
# dst: /cache/data/visual_china/parquet_10111055/
|
| 179 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA/V0601/parquet1_internvl2/
|
| 180 |
+
# dst: /cache/data/V0601/parquet1_internvl2/
|
| 181 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/laion2B-multi/parquet_part10091027-512_20241014102423/
|
| 182 |
+
# dst: /cache/data/laion2B-multi/parquet_part10091027-512_20241014102423/
|
| 183 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/laion2B-multi/parquet_part4-512_20241014103020/
|
| 184 |
+
# dst: /cache/data/laion2B-multi/parquet_part4-512_20241014103020/
|
| 185 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/laion2B-multi/parquet_1007_2/
|
| 186 |
+
# dst: /cache/data/laion2B-multi/parquet_1007_2/
|
| 187 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/laion2B-multi/parquet_1009_2/
|
| 188 |
+
# dst: /cache/data/laion2B-multi/parquet_1009_2/
|
| 189 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/datacomp/parquet_part1009-512_20241014120509/
|
| 190 |
+
# dst: /cache/data/datacomp/parquet_part1009-512_20241014120509/
|
| 191 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/datacomp/parquet_part10101630-512_20241014215832/
|
| 192 |
+
# dst: /cache/data/datacomp/parquet_part10101630-512_20241014215832/
|
| 193 |
+
# - url: s3://bucket-5125-guiyang/data/AIGC/TRAIN_DATA_WHOLE_PACK/D2_data/EDD.D2.PT256.B002-1B/datacomp/parquet_part10111115-512_20241014125233/
|
| 194 |
+
# dst: /cache/data/datacomp/parquet_part10111115-512_20241014125233/
|
llamagen-siglip-sb-block-causal/SelftokPipeline/configs/res512/selftok_sd3_E31-512_modified.yml
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
common:
|
| 2 |
+
output_path: 'output'
|
| 3 |
+
log_path: '/cache/logs'
|
| 4 |
+
tb_path: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 5 |
+
val_url: 's3://bucket-5125-guiyang/outputs/selftok_enc_tb/v4'
|
| 6 |
+
alex_path: '/cache/data/alexnet-owt-7be5be79.pth'
|
| 7 |
+
save_per_epochs: 1.0
|
| 8 |
+
eval_per_epochs: 1.0
|
| 9 |
+
eval_first: 0
|
| 10 |
+
use_fp16: 0
|
| 11 |
+
use_bf16: 1
|
| 12 |
+
use_zero: 0
|
| 13 |
+
use_fsdp: 0
|
| 14 |
+
use_2d_rope: 0
|
| 15 |
+
use_deepspeed: 0
|
| 16 |
+
random_seed: 123
|
| 17 |
+
log_interval: 50
|
| 18 |
+
machines: 1
|
| 19 |
+
task: 'selftokenc'
|
| 20 |
+
experiment_index: 0
|
| 21 |
+
delete_after_upload: True
|
| 22 |
+
log_recon_interval: 100
|
| 23 |
+
val_interval: 0
|
| 24 |
+
ckpt_interval: 2000
|
| 25 |
+
vae_path: '/cache/data/sd3_medium.ckpt'
|
| 26 |
+
resume_exclude_opt: True
|
| 27 |
+
pre_encode: False
|
| 28 |
+
resume_from_steps: 0
|
| 29 |
+
is_eval: True
|
| 30 |
+
|
| 31 |
+
model:
|
| 32 |
+
pretrain_model: '/cache/data/iter_69999.pth'
|
| 33 |
+
|
| 34 |
+
optimize:
|
| 35 |
+
max_epochs: 1000
|
| 36 |
+
warmup_epochs: 0.01
|
| 37 |
+
ema_in_cpu: False
|
| 38 |
+
grad_norm: 0.0
|
| 39 |
+
lr_scheduler:
|
| 40 |
+
dit_lr: 2.0e-5
|
| 41 |
+
token_lr: 1.0e-4
|
| 42 |
+
init_lr: 1.0e-4
|
| 43 |
+
init_step1: 5000
|
| 44 |
+
init_step2: 50000
|
| 45 |
+
max_step: 100000
|
| 46 |
+
min_lr1: 1.0e-4
|
| 47 |
+
min_lr2: 1.0e-4
|
| 48 |
+
|
| 49 |
+
tokenizer:
|
| 50 |
+
is_text_tokenized: False
|
| 51 |
+
pretrained_dit_path: '/cache/data/sd3_medium.ckpt'
|
| 52 |
+
params:
|
| 53 |
+
image_size: 512
|
| 54 |
+
k: 1536
|
| 55 |
+
stages: '200,400,600,800,1000'
|
| 56 |
+
k_per_stage: '512,512,256,192,64'
|
| 57 |
+
# k_m: 0.0
|
| 58 |
+
# k_s: 1.0
|
| 59 |
+
gradient_checkpointing: False
|
| 60 |
+
in_channels: 16
|
| 61 |
+
encoder_hidden_size: 16
|
| 62 |
+
ema_enc: False
|
| 63 |
+
enc_decay: 0.99
|
| 64 |
+
L2_lr: 0.
|
| 65 |
+
two_part_losses: False
|
| 66 |
+
|
| 67 |
+
diffusion_type: 'flow'
|
| 68 |
+
noise_schedule_config:
|
| 69 |
+
schedule: 'log_norm'
|
| 70 |
+
parameterization: 'velocity'
|
| 71 |
+
force_recon: False
|
| 72 |
+
m: 0.0
|
| 73 |
+
s: 1.0
|
| 74 |
+
|
| 75 |
+
enc: 'Enc-Qformer-Uni-XL/2'
|
| 76 |
+
enable_enc_variable_size: True
|
| 77 |
+
encoder_config:
|
| 78 |
+
time_adaln: True
|
| 79 |
+
qformer_mode: 'dual'
|
| 80 |
+
pre_norm: False
|
| 81 |
+
post_norm: True
|
| 82 |
+
xavier_init: False
|
| 83 |
+
qk_norm: False
|
| 84 |
+
attn_mask: False
|
| 85 |
+
|
| 86 |
+
quantizer_config:
|
| 87 |
+
codebook_size: 16384
|
| 88 |
+
code_dim: 32
|
| 89 |
+
w_diversity: 1.0
|
| 90 |
+
ema_entropy_ratio: 0.8
|
| 91 |
+
w_commit: 1.0
|
| 92 |
+
decay: 0.99
|
| 93 |
+
dead_code_threshold: 0.2
|
| 94 |
+
reset_cluster_size: 0.2
|
| 95 |
+
smart_react: True
|
| 96 |
+
# redistribute: False
|
| 97 |
+
continuous: False
|
| 98 |
+
reg: [0.1, 0.3]
|
| 99 |
+
K: 1536
|
| 100 |
+
# share: True
|
| 101 |
+
|
| 102 |
+
model: 'MMDiT_XL'
|
| 103 |
+
context_see_xt: True
|
| 104 |
+
decoder_config:
|
| 105 |
+
sd3_cond_pooling: None
|
| 106 |
+
class_dropout_prob: 0.1
|
| 107 |
+
train_filter: 'all'
|
| 108 |
+
freeze_filter: ''
|
| 109 |
+
# qk_norm: 'rms'
|
| 110 |
+
init_method: None
|
| 111 |
+
#register_length: 8
|
| 112 |
+
time_adaln: 'pos_emb'
|
| 113 |
+
|
| 114 |
+
data: # except those with comments, other fields are useless for tokenizer training
|
| 115 |
+
batch_size: 8
|
| 116 |
+
num_workers: 8
|
| 117 |
+
checker:
|
| 118 |
+
status: disable
|
| 119 |
+
anchor_path: checkings/A100_bla
|
| 120 |
+
check_iter: 20
|
| 121 |
+
save_check: False
|
| 122 |
+
master_only: False
|
| 123 |
+
load_keys:
|
| 124 |
+
- image
|
| 125 |
+
- prompt
|
| 126 |
+
- timesteps
|
| 127 |
+
- noise
|
| 128 |
+
|
| 129 |
+
data_checker:
|
| 130 |
+
status: disable
|
| 131 |
+
save_prob: 0.1
|
| 132 |
+
save_amount: 10
|
| 133 |
+
save_path: data/precision/result
|
| 134 |
+
master_only: False
|
| 135 |
+
|
| 136 |
+
dataset:
|
| 137 |
+
target: base.Base
|
| 138 |
+
meta_info: configs_t2i/data/selftok_openimage.yml
|
| 139 |
+
enlarge_ratio: 1000 # N epochs
|
| 140 |
+
max_size: 512 # max resolution
|
| 141 |
+
resize_f: 16
|
| 142 |
+
filter_max_token_len: 128
|
| 143 |
+
load_t5_cache: False
|
| 144 |
+
prompt_augmentation: True
|
| 145 |
+
preprocess_func: center_crop # augmentation
|
| 146 |
+
sync_wait_time: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 147 |
+
sync_wait_ratio: 1 # if ROMA bandwidth limit reached, set ratio as >=2, wait>=1
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
cloud_copy:
|
| 151 |
+
# - url: s3://bucket-5125-guiyang/misc/MGM/models/mimo/DiT/DiT-XL-2-256x256.pt
|
| 152 |
+
# dst: /cache/data/DiT-XL-2-256x256.pt
|
| 153 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/selftok/alexnet-owt-7be5be79.pth
|
| 154 |
+
dst: /cache/data/alexnet-owt-7be5be79.pth
|
| 155 |
+
- url: s3://bucket-5125-guiyang/misc/MGM/models/sd3/transformer/sd3_medium.ckpt
|
| 156 |
+
dst: /cache/data/sd3_medium.ckpt
|
| 157 |
+
- url: s3://bucket-5125-guiyang/data/z84399568/OpenImage/parquet/parquet_part0-512_20241024171323/
|
| 158 |
+
dst: /cache/data/openimage/openimage_parquet/
|
| 159 |
+
# - url: s3://bucket-5125-guiyang/code/z84397892/datasets/tokenizer_pretrain/parquet_imagenet21k_w_hw_480/
|
| 160 |
+
# dst: /cache/data/imagenet480/imagenet480_parquet/
|
| 161 |
+
# - url: s3://bucket-5125-guiyang/data/z84399568/ocr-height_width_ratio_0.9_1.1/parquet_filter_ratio_512_20250303182106/
|
| 162 |
+
# dst: /cache/data/ocr/ocr_parquet/
|
| 163 |
+
# - url: s3://bucket-5125-guiyang/data/z84399568/hucai-face_filter/parquet_part_center_crop_contain_full_face_1p2_20250304195907/
|
| 164 |
+
# dst: /cache/data/hucai/hucai_parquet/
|
| 165 |
+
- url: s3://bucket-5125-guiyang/outputs/l50043800/selftok/E31/2025-05-03_time_2230/output/ckpt/iter_69999.pth
|
| 166 |
+
dst: /cache/data/iter_69999.pth
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/__init__.py
ADDED
|
File without changes
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (195 Bytes). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from .build import build_dataloader
|
| 4 |
+
from .selftok_dataset import *
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/base_dataset.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import traceback
|
| 5 |
+
import warnings
|
| 6 |
+
from PIL import Image, ImageFile
|
| 7 |
+
from torchvision import transforms
|
| 8 |
+
from torch.utils.data import Dataset
|
| 9 |
+
from mimogpt.tokenizer import get_text_tokenizer
|
| 10 |
+
from mimogpt.datasets.build import DATALOADER_REGISTRY
|
| 11 |
+
from mimogpt.datasets.transforms import get_zip_idx, make_zip_fns, make_zip_dataset
|
| 12 |
+
import torch
|
| 13 |
+
#from .slim_zipfile import SlimZipFile as ZipFile
|
| 14 |
+
from zipfile import ZipFile
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
import moxing as mox
|
| 18 |
+
except ImportError:
|
| 19 |
+
print("no moxing !!!")
|
| 20 |
+
|
| 21 |
+
warnings.filterwarnings("ignore")
|
| 22 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class DatasetBase(Dataset):
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
zip_root,
|
| 29 |
+
pkl_root,
|
| 30 |
+
data_list,
|
| 31 |
+
columns,
|
| 32 |
+
local_shuffle_type,
|
| 33 |
+
text_tokenizer_path=None,
|
| 34 |
+
tokenizer_type="mimo_chinese",
|
| 35 |
+
text_seq_length=128,
|
| 36 |
+
image_seq_length=256,
|
| 37 |
+
total_seq_length=640,
|
| 38 |
+
is_text_tokenized=True,
|
| 39 |
+
split_range=(0, 1024),
|
| 40 |
+
do_unzip=False,
|
| 41 |
+
pkl_format="pickle",
|
| 42 |
+
train_task="mimo",
|
| 43 |
+
resize_size=256,
|
| 44 |
+
txt_zip_root=None,
|
| 45 |
+
txt_zip_name=None,
|
| 46 |
+
transform=None,
|
| 47 |
+
target_transform=None,
|
| 48 |
+
):
|
| 49 |
+
assert pkl_format in ("pkl", "pickle", "pandas", "parquet")
|
| 50 |
+
|
| 51 |
+
if local_shuffle_type in (4, 5):
|
| 52 |
+
self.this_card_all_idx = get_zip_idx(split_range, train_task)
|
| 53 |
+
else:
|
| 54 |
+
self.this_card_all_idx = list(range(int(split_range[0]), int(split_range[1])))
|
| 55 |
+
|
| 56 |
+
self.samples = make_zip_dataset(os.path.join(pkl_root, data_list), self.this_card_all_idx, columns, pkl_format)
|
| 57 |
+
self.zip_files = make_zip_fns(os.path.join(zip_root, data_list), self.this_card_all_idx)
|
| 58 |
+
self.zip_fns = [None] * len(self.this_card_all_idx)
|
| 59 |
+
if txt_zip_root:
|
| 60 |
+
self.txt_zip_files = make_zip_fns(os.path.join(txt_zip_root, txt_zip_name), self.this_card_all_idx)
|
| 61 |
+
self.txt_zip_fns = [None] * len(self.this_card_all_idx)
|
| 62 |
+
|
| 63 |
+
self.do_unzip = do_unzip
|
| 64 |
+
self.columns = columns
|
| 65 |
+
self.zip_root = zip_root
|
| 66 |
+
self.text_seq_length = text_seq_length
|
| 67 |
+
self.image_seq_length = image_seq_length
|
| 68 |
+
self.total_seq_length = total_seq_length
|
| 69 |
+
self.max_size = len(self.samples)
|
| 70 |
+
self.transform = transform
|
| 71 |
+
self.target_transform = target_transform
|
| 72 |
+
self.resize_size = resize_size
|
| 73 |
+
|
| 74 |
+
# init tokenizer
|
| 75 |
+
self.is_text_tokenized = is_text_tokenized
|
| 76 |
+
if tokenizer_type == "PanguTokenizer":
|
| 77 |
+
tokenizer_type = "unify_interleaved_pangu"
|
| 78 |
+
if is_text_tokenized:
|
| 79 |
+
self.tokenizer = get_text_tokenizer(text_tokenizer_path, tokenizer_type=tokenizer_type)
|
| 80 |
+
self.tokenizer_type = tokenizer_type
|
| 81 |
+
|
| 82 |
+
def set_max_size(self, max_size=None):
|
| 83 |
+
if max_size is not None:
|
| 84 |
+
self.max_size = max_size
|
| 85 |
+
|
| 86 |
+
def init_zip_fns(self, zip_idx):
|
| 87 |
+
# for more details, https://discuss.pytorch.org/t/dataloader-with-zipfile-failed/42795
|
| 88 |
+
if self.zip_fns[zip_idx] is None:
|
| 89 |
+
self.zip_fns[zip_idx] = ZipFile(self.zip_files[zip_idx])
|
| 90 |
+
|
| 91 |
+
def get_img_desc(self, index, ratio_filter=False):
|
| 92 |
+
while True:
|
| 93 |
+
ann = self.samples.loc[index]
|
| 94 |
+
res = {col: ann.get(col) for col in self.columns}
|
| 95 |
+
|
| 96 |
+
key = ann.get(self.columns[0])
|
| 97 |
+
zip_idx = ann.get("zip_idx")
|
| 98 |
+
|
| 99 |
+
try:
|
| 100 |
+
if self.do_unzip:
|
| 101 |
+
real_zip_idx = self.this_card_all_idx[zip_idx]
|
| 102 |
+
with open(os.path.join(self.zip_root, str(real_zip_idx), str(key)), "rb") as f:
|
| 103 |
+
sample = Image.open(f).convert("RGB")
|
| 104 |
+
else:
|
| 105 |
+
self.init_zip_fns(zip_idx)
|
| 106 |
+
with self.zip_fns[zip_idx].open(str(key), "r") as f:
|
| 107 |
+
sample = Image.open(f).convert("RGB")
|
| 108 |
+
try:
|
| 109 |
+
rank = torch.distributed.get_rank()
|
| 110 |
+
with open(f'/cache/load_record_{rank}.txt',"a",encoding='utf-8') as file:
|
| 111 |
+
data_name = str(ann.get('dataset'))
|
| 112 |
+
file_name = str(ann.get('relative_img_path'))
|
| 113 |
+
file.write(f'{data_name}/{file_name}'+"\n")
|
| 114 |
+
except Exception as e:
|
| 115 |
+
data_name = str(ann.get('dataset'))
|
| 116 |
+
file_name = str(ann.get('relative_img_path'))
|
| 117 |
+
print(f'load {data_name}/{file_name} failed'+"\n")
|
| 118 |
+
|
| 119 |
+
if ratio_filter:
|
| 120 |
+
w, h = sample.width, sample.height
|
| 121 |
+
if (w * 1.0 / h) < 0.48: # 竖图
|
| 122 |
+
# image = transforms.functional.crop(image, top, left, h, w)
|
| 123 |
+
sample = transforms.functional.crop(sample, int((h - 2 * w) / 2.0), 0, int(2 * w), w)
|
| 124 |
+
if (w * 1.0 / h) > 2.1: # 横图
|
| 125 |
+
sample = transforms.functional.crop(sample, 0, int((w - 2 * h) / 2.0), h, int(2 * h))
|
| 126 |
+
|
| 127 |
+
res["image"] = sample
|
| 128 |
+
res["zip_fn"] = self.zip_fns[zip_idx]
|
| 129 |
+
res["zip_idx"] = zip_idx
|
| 130 |
+
return res
|
| 131 |
+
except Exception as e:
|
| 132 |
+
# solve missing image case.
|
| 133 |
+
old_index = index
|
| 134 |
+
index = index + random.randint(1, 9)
|
| 135 |
+
index = max(0, index)
|
| 136 |
+
if index >= len(self.samples) - 1:
|
| 137 |
+
# for last index fail case.
|
| 138 |
+
index = 0
|
| 139 |
+
print(
|
| 140 |
+
"Warning: load zip_idx={}-{} fail, change index {}->{} ".format(
|
| 141 |
+
self.zip_files[zip_idx], old_index, old_index, index
|
| 142 |
+
)
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def __len__(self):
|
| 146 |
+
return self.max_size
|
| 147 |
+
|
| 148 |
+
def __del__(self):
|
| 149 |
+
for zip_fn in self.zip_fns:
|
| 150 |
+
if zip_fn is not None:
|
| 151 |
+
zip_fn.close()
|
| 152 |
+
|
| 153 |
+
def truncate(self, text, max_len=256):
|
| 154 |
+
if self.tokenizer_type == "unify_interleaved_pangu":
|
| 155 |
+
text_enc = self.tokenizer.tokenizer.tokenize(text)
|
| 156 |
+
else:
|
| 157 |
+
text_enc = self.tokenizer.tokenizer.encode(text, add_special_tokens=False)
|
| 158 |
+
if len(text_enc) > max_len:
|
| 159 |
+
text_enc = text_enc[:max_len]
|
| 160 |
+
text_truncated = self.tokenizer.decode_text(text_enc)
|
| 161 |
+
return text_truncated
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/build.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from ..utils import Registry
|
| 4 |
+
|
| 5 |
+
DATALOADER_REGISTRY = Registry("DATALOADER")
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def build_dataloader(cfg, name=None):
|
| 9 |
+
loader_name = cfg.dataloader.train.dataloader if name is None else name
|
| 10 |
+
loader = DATALOADER_REGISTRY.get(loader_name)(cfg)
|
| 11 |
+
return loader
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/selftok_dataset.py
ADDED
|
@@ -0,0 +1,685 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import traceback
|
| 4 |
+
import warnings
|
| 5 |
+
from mimogpt.utils import hf_logger
|
| 6 |
+
from torchvision.datasets import ImageFolder
|
| 7 |
+
from PIL import Image, ImageFile
|
| 8 |
+
from torchvision import transforms
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data import Dataset, DataLoader, ConcatDataset
|
| 11 |
+
from mimogpt.datasets.build import DATALOADER_REGISTRY
|
| 12 |
+
from .slim_zipfile import SlimZipFile as ZipFile
|
| 13 |
+
import numpy as np
|
| 14 |
+
import pandas as pd
|
| 15 |
+
import glob
|
| 16 |
+
import torch.distributed as dist
|
| 17 |
+
import torch.utils.data as data
|
| 18 |
+
import operator
|
| 19 |
+
import itertools
|
| 20 |
+
from mimogpt.datasets.base_dataset import ZipFile, DatasetBase
|
| 21 |
+
from mimogpt.datasets.transforms import set_visual_transforms,NormalizeToTensor, RandomResize
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import moxing as mox
|
| 25 |
+
except ImportError:
|
| 26 |
+
print("no moxing !!!")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
RES_LIST_256 = [(128, 512), (128, 480), (128, 448), (128, 416),
|
| 30 |
+
(160, 416), (160, 384), (160, 352), (192, 352),
|
| 31 |
+
(192, 320), (224, 320), (224, 288), (224, 256),
|
| 32 |
+
(256, 256), (256, 224), (288, 224), (320, 224),
|
| 33 |
+
(320, 192), (352, 192), (352, 160), (384, 160),
|
| 34 |
+
(416, 160), (416, 128), (448, 128), (480, 128), (512, 128)]
|
| 35 |
+
|
| 36 |
+
RES_LIST_512 = [(256, 832), (256, 896), (256, 960), (256, 1024),
|
| 37 |
+
(320, 704), (320, 768), (320, 832), (320, 896),
|
| 38 |
+
(384, 576), (384, 640), (384, 704), (448, 512),
|
| 39 |
+
(448, 576), (512, 448), (512, 512), (576, 384),
|
| 40 |
+
(576, 448), (640, 384), (704, 320), (704, 384),
|
| 41 |
+
(768, 320), (832, 256), (832, 320), (896, 256),
|
| 42 |
+
(896, 320), (960, 256), (1024, 256)]
|
| 43 |
+
|
| 44 |
+
class ConditionalResize(transforms.Resize):
|
| 45 |
+
"""
|
| 46 |
+
Resize transform but only if the input is smaller than the resize dims
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def resize_height(self):
|
| 51 |
+
return self.size if isinstance(self.size, int) else self.size[0]
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def resize_width(self):
|
| 55 |
+
return self.size if isinstance(self.size, int) else self.size[1]
|
| 56 |
+
|
| 57 |
+
def forward(self, img):
|
| 58 |
+
"""
|
| 59 |
+
Args:
|
| 60 |
+
img (PIL Image or Tensor): Image to be scaled.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
PIL Image or Tensor: Rescaled image.
|
| 64 |
+
"""
|
| 65 |
+
r_h, r_w = self.resize_height, self.resize_width
|
| 66 |
+
if isinstance(img, torch.Tensor):
|
| 67 |
+
h, w = img.shape[1:]
|
| 68 |
+
else: # PIL Image
|
| 69 |
+
w, h = img.size
|
| 70 |
+
if w < r_w or h < r_h:
|
| 71 |
+
return super().forward(img)
|
| 72 |
+
else:
|
| 73 |
+
return img
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def center_crop_arr(pil_image, image_size):
|
| 77 |
+
"""
|
| 78 |
+
Center cropping implementation from ADM.
|
| 79 |
+
https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
|
| 80 |
+
"""
|
| 81 |
+
while min(*pil_image.size) >= 2 * image_size:
|
| 82 |
+
pil_image = pil_image.resize(
|
| 83 |
+
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
scale = image_size / min(*pil_image.size)
|
| 87 |
+
pil_image = pil_image.resize(
|
| 88 |
+
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
arr = np.array(pil_image)
|
| 92 |
+
crop_y = (arr.shape[0] - image_size) // 2
|
| 93 |
+
crop_x = (arr.shape[1] - image_size) // 2
|
| 94 |
+
return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
|
| 95 |
+
|
| 96 |
+
def worker_init_reset_seed(worker_id):
|
| 97 |
+
initial_seed = torch.initial_seed() % 2**31
|
| 98 |
+
seed_all_rng(initial_seed + worker_id)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def cal_resize_crop(img_h, img_w, resos_list):
|
| 102 |
+
aspect_list = [h / w for h, w in resos_list]
|
| 103 |
+
aspect_list = np.array(aspect_list)
|
| 104 |
+
|
| 105 |
+
img_reso = img_h / img_w
|
| 106 |
+
ar_errors = img_reso - aspect_list
|
| 107 |
+
select_bucket_id = np.abs(ar_errors).argmin()
|
| 108 |
+
crop_size = (resos_list[select_bucket_id][0], resos_list[select_bucket_id][1])
|
| 109 |
+
target_ratio = crop_size[0] / crop_size[1]
|
| 110 |
+
|
| 111 |
+
if img_reso > target_ratio:
|
| 112 |
+
scale = resos_list[select_bucket_id][1] / img_w
|
| 113 |
+
resized_size = (int(img_h * scale + 0.5), resos_list[select_bucket_id][1])
|
| 114 |
+
else:
|
| 115 |
+
scale = resos_list[select_bucket_id][0] / img_h
|
| 116 |
+
resized_size = (resos_list[select_bucket_id][0], int(img_w * scale + 0.5))
|
| 117 |
+
|
| 118 |
+
return resized_size, crop_size, target_ratio, select_bucket_id
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class CustomLatentDataset(Dataset):
|
| 123 |
+
def __init__(self, features_dir, preload_data=False):
|
| 124 |
+
self.features_dir = features_dir
|
| 125 |
+
self.preload_data = preload_data
|
| 126 |
+
self.features_files = sorted(os.listdir(features_dir))
|
| 127 |
+
|
| 128 |
+
if preload_data:
|
| 129 |
+
self.features = []
|
| 130 |
+
for i in range(len(self.features_files)):
|
| 131 |
+
if i % 50000 == 0:
|
| 132 |
+
print(f"Loaded {float(i) / len(self.features_files) * 100.0:.1f}% data...")
|
| 133 |
+
feature_file = self.features_files[i]
|
| 134 |
+
feature = np.load(os.path.join(self.features_dir, feature_file))
|
| 135 |
+
self.features.append(feature)
|
| 136 |
+
|
| 137 |
+
def __len__(self):
|
| 138 |
+
return len(self.features_files)
|
| 139 |
+
|
| 140 |
+
def __getitem__(self, idx):
|
| 141 |
+
if self.preload_data:
|
| 142 |
+
feature = torch.from_numpy(self.features[idx])
|
| 143 |
+
return feature
|
| 144 |
+
else:
|
| 145 |
+
feature_file = self.features_files[idx]
|
| 146 |
+
features = np.load(os.path.join(self.features_dir, feature_file))
|
| 147 |
+
return torch.from_numpy(features)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
#################################################################
|
| 153 |
+
############### Multi-res Multi-ratio Dataset ##################
|
| 154 |
+
#################################################################
|
| 155 |
+
class BucketManger:
|
| 156 |
+
def __init__(self, image_path):
|
| 157 |
+
self.image_path = image_path
|
| 158 |
+
self.buckets = {}
|
| 159 |
+
self.image_list = []
|
| 160 |
+
self.aspect_errors = []
|
| 161 |
+
for root, dirs, files in os.walk(image_path):
|
| 162 |
+
for file in files:
|
| 163 |
+
image_file = os.path.join(root,file)
|
| 164 |
+
with open(image_file, 'rb') as f:
|
| 165 |
+
sample = Image.open(f).convert("RGB")
|
| 166 |
+
ratio = sample.height / sample.width
|
| 167 |
+
resized_size, crop_size, res_ratio, select_bucket_id = cal_resize_crop(
|
| 168 |
+
sample.height, sample.width, RES_LIST)
|
| 169 |
+
error = abs(ratio - res_ratio)
|
| 170 |
+
if select_bucket_id not in self.buckets:
|
| 171 |
+
self.buckets[select_bucket_id] = []
|
| 172 |
+
self.aspect_errors.append(error)
|
| 173 |
+
info = {'img_path':image_file,
|
| 174 |
+
'bucket_id':select_bucket_id,
|
| 175 |
+
'res_ratio':res_ratio,
|
| 176 |
+
'resized_size':resized_size,
|
| 177 |
+
'crop_size':crop_size}
|
| 178 |
+
self.buckets[select_bucket_id].append(image_file)
|
| 179 |
+
self.image_list.append(info)
|
| 180 |
+
random.shuffle(self.image_list)
|
| 181 |
+
print(f"aspect error: mean {np.array(self.aspect_errors).mean()}, median {np.median(np.array(self.aspect_errors))}, max {np.array(self.aspect_errors).max()}")
|
| 182 |
+
for bucket_id in reversed(sorted(self.buckets.keys(), key=lambda b: len(self.buckets[b]))):
|
| 183 |
+
print(f"bucket {bucket_id}: {RES_LIST[bucket_id]}, entries {len(self.buckets[bucket_id])}")
|
| 184 |
+
|
| 185 |
+
class MimoSimpleImageDataset(DatasetBase):
|
| 186 |
+
def __init__(self, bucket_id_shift=0, data_res='256-fix', ResizeType=None, *args, **kwargs):
|
| 187 |
+
super().__init__(*args, **kwargs)
|
| 188 |
+
self.bucket_id_shift = bucket_id_shift
|
| 189 |
+
|
| 190 |
+
if data_res == '256-fix':
|
| 191 |
+
self.data_size= 256
|
| 192 |
+
else:
|
| 193 |
+
self.data_size= 512
|
| 194 |
+
|
| 195 |
+
self.preprocess = transforms.Compose(
|
| 196 |
+
[
|
| 197 |
+
ResizeType,
|
| 198 |
+
transforms.CenterCrop(self.data_size),
|
| 199 |
+
NormalizeToTensor(),
|
| 200 |
+
]
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
def __getitem__(self, index):
|
| 204 |
+
sample = self.get_img_desc(index)
|
| 205 |
+
image = sample.get("image")
|
| 206 |
+
image = self.preprocess(image)
|
| 207 |
+
|
| 208 |
+
data = {'img':image, 'bucket_id':self.bucket_id_shift}
|
| 209 |
+
return data
|
| 210 |
+
|
| 211 |
+
class MimoLowtoHighImageDataset(DatasetBase):
|
| 212 |
+
def __init__(self, bucket_id_shift=0, data_res='256-fix', ResizeType=None, *args, **kwargs):
|
| 213 |
+
super().__init__(*args, **kwargs)
|
| 214 |
+
self.bucket_id_shift = bucket_id_shift
|
| 215 |
+
|
| 216 |
+
if data_res == '1024-fix':
|
| 217 |
+
self.data_size= 1024
|
| 218 |
+
self.lth = [256, 512, 1024]
|
| 219 |
+
elif data_res == '512-fix':
|
| 220 |
+
self.data_size= 512
|
| 221 |
+
self.lth = [256, 512]
|
| 222 |
+
else:
|
| 223 |
+
self.data_size= 256
|
| 224 |
+
self.lth = [256]
|
| 225 |
+
|
| 226 |
+
self.preprocess = transforms.Compose(
|
| 227 |
+
[
|
| 228 |
+
ResizeType,
|
| 229 |
+
transforms.RandomCrop(self.data_size),
|
| 230 |
+
#NormalizeToTensor(),
|
| 231 |
+
]
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
def __getitem__(self, index):
|
| 235 |
+
sample = self.get_img_desc(index)
|
| 236 |
+
image = sample.get("image")
|
| 237 |
+
image = self.preprocess(image)
|
| 238 |
+
all_res_img = {}
|
| 239 |
+
for res in self.lth:
|
| 240 |
+
all_res_img[str(res)] = transforms.Compose([transforms.Resize(res),NormalizeToTensor()])(image)
|
| 241 |
+
image = NormalizeToTensor()(image)
|
| 242 |
+
|
| 243 |
+
data = {'img':image, 'all_res_img':all_res_img, 'bucket_id':self.bucket_id_shift}
|
| 244 |
+
return data
|
| 245 |
+
|
| 246 |
+
class MimoMultiImageDataset(DatasetBase):
|
| 247 |
+
def __init__(self, bucket_id_shift=0, data_res='256', ResizeType=None, *args, **kwargs):
|
| 248 |
+
super().__init__(*args, **kwargs)
|
| 249 |
+
self.bucket_id_shift = bucket_id_shift
|
| 250 |
+
self.data_res = data_res
|
| 251 |
+
if data_res == '256':
|
| 252 |
+
self.res_list= RES_LIST_256
|
| 253 |
+
else:
|
| 254 |
+
self.res_list= RES_LIST_512
|
| 255 |
+
|
| 256 |
+
def __getitem__(self, index):
|
| 257 |
+
sample = self.get_img_desc(index)
|
| 258 |
+
image = sample.get("image")
|
| 259 |
+
|
| 260 |
+
resized_size, crop_size, res_ratio, select_bucket_id = cal_resize_crop(
|
| 261 |
+
image.height, image.width, self.res_list)
|
| 262 |
+
|
| 263 |
+
shape_transform= transforms.Compose([
|
| 264 |
+
transforms.Resize(resized_size),
|
| 265 |
+
transforms.CenterCrop(crop_size),
|
| 266 |
+
transforms.RandomHorizontalFlip(),
|
| 267 |
+
transforms.ToTensor(),
|
| 268 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
| 269 |
+
])
|
| 270 |
+
img = shape_transform(image)
|
| 271 |
+
data = {'img':img, 'bucket_id':select_bucket_id+self.bucket_id_shift}
|
| 272 |
+
return data
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class CustomBucketImageDataset(Dataset):
|
| 276 |
+
def __init__(self, image_path, bucket_id_shift, data_res):
|
| 277 |
+
self.image_path = image_path
|
| 278 |
+
self.imglist = []
|
| 279 |
+
for root, dirs, files in os.walk(image_path):
|
| 280 |
+
for file in files:
|
| 281 |
+
image_file = os.path.join(root,file)
|
| 282 |
+
self.imglist.append(image_file)
|
| 283 |
+
self.bucket_id_shift = bucket_id_shift
|
| 284 |
+
if data_res == '256':
|
| 285 |
+
self.res_list= RES_LIST_256
|
| 286 |
+
else:
|
| 287 |
+
self.res_list= RES_LIST_512
|
| 288 |
+
|
| 289 |
+
def __len__(self):
|
| 290 |
+
return len(self.imglist)
|
| 291 |
+
|
| 292 |
+
def __getitem__(self, idx):
|
| 293 |
+
img_path = self.imglist[idx]
|
| 294 |
+
with open(img_path, 'rb') as f:
|
| 295 |
+
img = Image.open(f).convert("RGB")
|
| 296 |
+
|
| 297 |
+
resized_size, crop_size, res_ratio, select_bucket_id = cal_resize_crop(
|
| 298 |
+
img.height, img.width, self.res_list)
|
| 299 |
+
|
| 300 |
+
shape_transform= transforms.Compose([
|
| 301 |
+
transforms.Resize(resized_size),
|
| 302 |
+
transforms.CenterCrop(crop_size),
|
| 303 |
+
transforms.RandomHorizontalFlip(),
|
| 304 |
+
transforms.ToTensor(),
|
| 305 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
| 306 |
+
])
|
| 307 |
+
img = shape_transform(img)
|
| 308 |
+
data = {'img':img, 'bucket_id':select_bucket_id+self.bucket_id_shift}
|
| 309 |
+
return data
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
class CustomBucketLatentDataset(Dataset):
|
| 313 |
+
def __init__(self, features_dir, bucket_id_shift,**kwargs):
|
| 314 |
+
self.features_dir = features_dir
|
| 315 |
+
self.imglist = sorted(os.listdir(features_dir))
|
| 316 |
+
random.shuffle(self.imglist)
|
| 317 |
+
self.bucket_id_shift = bucket_id_shift
|
| 318 |
+
|
| 319 |
+
# self.buckets = {}
|
| 320 |
+
# for feature_file in self.imglist[0,]:
|
| 321 |
+
# info = np.load(os.path.join(self.features_dir, feature_file))
|
| 322 |
+
|
| 323 |
+
# select_bucket_id = int(info['arr_1'])
|
| 324 |
+
# if select_bucket_id not in self.buckets:
|
| 325 |
+
# self.buckets[select_bucket_id] = 0
|
| 326 |
+
# self.buckets[select_bucket_id] += 1
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
# for bucket_id in reversed(sorted(self.buckets.keys(), key=lambda b: self.buckets[b])):
|
| 330 |
+
# print(f"{features_dir} bucket {bucket_id}: {RES_LIST_256[bucket_id]}, entries {self.buckets[bucket_id]}")
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def __len__(self):
|
| 334 |
+
return len(self.imglist)
|
| 335 |
+
|
| 336 |
+
def __getitem__(self, idx):
|
| 337 |
+
feature_file = self.imglist[idx]
|
| 338 |
+
info = np.load(os.path.join(self.features_dir, feature_file))
|
| 339 |
+
data = {'img':torch.from_numpy(info['arr_0']), 'bucket_id':torch.from_numpy(info['arr_1'])+self.bucket_id_shift}
|
| 340 |
+
return data
|
| 341 |
+
|
| 342 |
+
class AspectRatioGroupedDataset(data.IterableDataset):
|
| 343 |
+
def __init__(self, dataset, batch_size, bucket_len, *args, **kwargs):
|
| 344 |
+
self.dataset = dataset
|
| 345 |
+
self.batch_size = batch_size
|
| 346 |
+
self.bucket_len = bucket_len
|
| 347 |
+
self._caches = [[] for _ in range(bucket_len)]
|
| 348 |
+
|
| 349 |
+
def __iter__(self):
|
| 350 |
+
for sample in self.dataset:
|
| 351 |
+
bucket_id = sample['bucket_id']
|
| 352 |
+
cache = self._caches[bucket_id]
|
| 353 |
+
|
| 354 |
+
if 'all_res_img' in sample.keys():
|
| 355 |
+
cache.append([sample['img'],sample['all_res_img']])
|
| 356 |
+
else:
|
| 357 |
+
cache.append(sample['img'])
|
| 358 |
+
|
| 359 |
+
if len(cache) == self.batch_size:
|
| 360 |
+
data = cache[:]
|
| 361 |
+
del cache[:]
|
| 362 |
+
if 'all_res_img' in sample.keys():
|
| 363 |
+
out_data = {}
|
| 364 |
+
temp = [sap[0] for sap in data]
|
| 365 |
+
out_data['img'] = torch.stack(temp,dim=0)
|
| 366 |
+
for res in sample['all_res_img'].keys():
|
| 367 |
+
temp = [sap[1][res] for sap in data]
|
| 368 |
+
out_data[res] = torch.stack(temp,dim=0)
|
| 369 |
+
yield out_data
|
| 370 |
+
else:
|
| 371 |
+
yield torch.stack(data,dim=0)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def get_resize_transform(resize_type, res):
|
| 375 |
+
if 'RandomResize' in resize_type:
|
| 376 |
+
rand_ratio = float(resize_type.split('-')[-1])
|
| 377 |
+
hf_logger.info(f"Using RandomResize ratio {rand_ratio}...")
|
| 378 |
+
ResizeType = RandomResize(res, ratio=rand_ratio, interpolation=transforms.InterpolationMode.BICUBIC)
|
| 379 |
+
elif resize_type == 'ConditionalResize':
|
| 380 |
+
hf_logger.info(f"Using ConditionalResize...")
|
| 381 |
+
ResizeType = ConditionalResize(res, interpolation=transforms.InterpolationMode.BICUBIC)
|
| 382 |
+
else:
|
| 383 |
+
hf_logger.info(f"Using Resize...")
|
| 384 |
+
ResizeType = transforms.Resize(res, interpolation=transforms.InterpolationMode.BICUBIC)
|
| 385 |
+
return ResizeType
|
| 386 |
+
|
| 387 |
+
def build_bucketloader(cfg, image_path_list, batch_size):
|
| 388 |
+
#train_bm = BucketManger(image_path)
|
| 389 |
+
#imglist = train_bm.image_list
|
| 390 |
+
bucket_len_dict = {'256':len(RES_LIST_256),'512':len(RES_LIST_512), '256-fix':1,'512-fix':1}
|
| 391 |
+
bucket_len = 0
|
| 392 |
+
datasets = []
|
| 393 |
+
res_dict = []
|
| 394 |
+
train_balance_ratios=[]
|
| 395 |
+
train_balance_number=[]
|
| 396 |
+
bucket_id_shift = {}
|
| 397 |
+
|
| 398 |
+
# type, zip_root, pkl_root, data_list,columns, pkl_format, split_range, data_res, ratio = info[:8]
|
| 399 |
+
|
| 400 |
+
for image_path_info in image_path_list:
|
| 401 |
+
data_res = image_path_info[-3]
|
| 402 |
+
train_balance_ratios.append(image_path_info[-1])
|
| 403 |
+
if data_res not in res_dict:
|
| 404 |
+
bucket_id_shift[data_res] = sum([bucket_len_dict[x] for x in res_dict])
|
| 405 |
+
res_dict.append(data_res)
|
| 406 |
+
bucket_len = bucket_len + bucket_len_dict[data_res]
|
| 407 |
+
|
| 408 |
+
for image_path_info in image_path_list:
|
| 409 |
+
type = image_path_info[0]
|
| 410 |
+
if type.startswith('Mimo'):
|
| 411 |
+
zip_root, pkl_root, data_list, columns, pkl_format, split_range, data_res, resize_type = image_path_info[1:9]
|
| 412 |
+
if type == 'MimoMultiImageDataset':
|
| 413 |
+
dataset_type = MimoMultiImageDataset
|
| 414 |
+
vision_transform = None
|
| 415 |
+
elif type == 'MimoLowtoHighImageDataset':
|
| 416 |
+
dataset_type = MimoLowtoHighImageDataset
|
| 417 |
+
input_res = cfg.dataloader.train.get("input_res", 256)
|
| 418 |
+
resize_res = cfg.dataloader.train.get("resize_res", 256)
|
| 419 |
+
vision_transform = set_visual_transforms(mode="train", resize_res=resize_res, input_res=input_res, augment_choice="selftok")
|
| 420 |
+
ResizeType = get_resize_transform(resize_type, resize_res)
|
| 421 |
+
else:
|
| 422 |
+
dataset_type = MimoSimpleImageDataset
|
| 423 |
+
input_res = cfg.dataloader.train.get("input_res", 256)
|
| 424 |
+
resize_res = cfg.dataloader.train.get("resize_res", 256)
|
| 425 |
+
vision_transform = set_visual_transforms(mode="train", resize_res=resize_res, input_res=input_res, augment_choice="selftok")
|
| 426 |
+
ResizeType = get_resize_transform(resize_type, resize_res)
|
| 427 |
+
|
| 428 |
+
dataset = dataset_type(
|
| 429 |
+
zip_root=zip_root,
|
| 430 |
+
pkl_root=pkl_root,
|
| 431 |
+
data_list=data_list,
|
| 432 |
+
split_range=split_range,
|
| 433 |
+
columns=columns,
|
| 434 |
+
local_shuffle_type=cfg.dataloader.local_shuffle_type,
|
| 435 |
+
is_text_tokenized=False,
|
| 436 |
+
transform=vision_transform,
|
| 437 |
+
pkl_format=pkl_format,
|
| 438 |
+
bucket_id_shift=bucket_id_shift[data_res],
|
| 439 |
+
data_res=data_res,
|
| 440 |
+
ResizeType = ResizeType,
|
| 441 |
+
)
|
| 442 |
+
tensor = torch.zeros(cfg.world_size).cuda()
|
| 443 |
+
tensor[cfg.rank] = len(dataset)
|
| 444 |
+
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
|
| 445 |
+
# min_dataset_length = int(tensor.min().item())
|
| 446 |
+
min_dataset_length = int(tensor.min())
|
| 447 |
+
print("rank{}, set dataset size from {} to {}".format(cfg.rank, len(dataset), min_dataset_length))
|
| 448 |
+
dataset.set_max_size(min_dataset_length)
|
| 449 |
+
dataset_len = min_dataset_length
|
| 450 |
+
else:
|
| 451 |
+
if type == 'CustomBucketImageDataset':
|
| 452 |
+
dataset_type = CustomBucketImageDataset
|
| 453 |
+
elif type == 'CustomBucketLatentDataset':
|
| 454 |
+
dataset_type = CustomBucketLatentDataset
|
| 455 |
+
else:
|
| 456 |
+
dataset_type = CustomBucketLatentDataset
|
| 457 |
+
|
| 458 |
+
zip_root = image_path_info[1]
|
| 459 |
+
data_res = image_path_info[2]
|
| 460 |
+
dataset = dataset_type(
|
| 461 |
+
zip_root,
|
| 462 |
+
bucket_id_shift[data_res],
|
| 463 |
+
data_res=data_res,
|
| 464 |
+
)
|
| 465 |
+
dataset_len = len(dataset)
|
| 466 |
+
datasets.append(dataset)
|
| 467 |
+
train_balance_number.append(dataset_len)
|
| 468 |
+
|
| 469 |
+
train_dataset = ConcatDataset(datasets)
|
| 470 |
+
if len(res_dict) > 1 and any(item > 0 for item in train_balance_ratios):
|
| 471 |
+
weights = []
|
| 472 |
+
for i in range(len(train_balance_ratios)):
|
| 473 |
+
count = train_balance_number[i]
|
| 474 |
+
ratio = train_balance_ratios[i]
|
| 475 |
+
weights += np.full(count, ratio / count).tolist()
|
| 476 |
+
sampler = torch.utils.data.WeightedRandomSampler(weights=weights, num_samples=len(weights))
|
| 477 |
+
else:
|
| 478 |
+
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=1, rank=0)
|
| 479 |
+
|
| 480 |
+
dataloader = DataLoader(
|
| 481 |
+
train_dataset,
|
| 482 |
+
sampler=sampler,
|
| 483 |
+
batch_sampler=None,
|
| 484 |
+
num_workers=cfg.dataloader.num_workers,
|
| 485 |
+
pin_memory=True,
|
| 486 |
+
prefetch_factor = 16, # mfu1
|
| 487 |
+
collate_fn=operator.itemgetter(0),
|
| 488 |
+
worker_init_fn=None,
|
| 489 |
+
)
|
| 490 |
+
dataloader = AspectRatioGroupedDataset(dataloader, batch_size, bucket_len)
|
| 491 |
+
|
| 492 |
+
return dataloader, sum(train_balance_number) // batch_size
|
| 493 |
+
|
| 494 |
+
def build_simpleimageloader(cfg, val_image_path, batch_size):
|
| 495 |
+
|
| 496 |
+
val_transform = transforms.Compose([
|
| 497 |
+
transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, cfg.dataloader.train.get("input_res", 256))),
|
| 498 |
+
transforms.ToTensor(),
|
| 499 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
| 500 |
+
])
|
| 501 |
+
val_path = val_image_path[0][1]
|
| 502 |
+
val_dataset = ImageFolder(val_path, transform=val_transform)
|
| 503 |
+
hf_logger.info(f"val set length: {len(val_dataset)}...")
|
| 504 |
+
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
|
| 505 |
+
val_dataloader = DataLoader(
|
| 506 |
+
val_dataset,
|
| 507 |
+
batch_size=batch_size,
|
| 508 |
+
shuffle=False,
|
| 509 |
+
num_workers=cfg.dataloader.num_workers,
|
| 510 |
+
sampler = val_sampler,
|
| 511 |
+
prefetch_factor = 16, # mfu1
|
| 512 |
+
pin_memory=True,
|
| 513 |
+
drop_last=True
|
| 514 |
+
)
|
| 515 |
+
return val_dataloader, len(val_dataset) // batch_size
|
| 516 |
+
|
| 517 |
+
@DATALOADER_REGISTRY.register
|
| 518 |
+
def CustomBucketDataloader(cfg):
|
| 519 |
+
train_image_path = cfg.dataloader.train_img_path
|
| 520 |
+
val_image_path = cfg.dataloader.val_img_path
|
| 521 |
+
train_dataloader, train_loader_len = build_bucketloader(cfg, train_image_path, cfg.dataloader.train.batch_size)
|
| 522 |
+
if cfg.dataloader.setval:
|
| 523 |
+
if val_image_path[0] in ['CustomBucketLatentDataset','CustomBucketImageDataset']:
|
| 524 |
+
val_dataloader, val_loader_len = build_bucketloader(cfg, val_image_path, cfg.dataloader.train.batch_size)
|
| 525 |
+
else:
|
| 526 |
+
val_dataloader, val_loader_len = build_simpleimageloader(cfg, val_image_path, cfg.dataloader.val.batch_size)
|
| 527 |
+
else:
|
| 528 |
+
val_dataloader, val_loader_len = None, None
|
| 529 |
+
|
| 530 |
+
return {'train':train_dataloader, 'val':val_dataloader, 'train_loader_len':train_loader_len, 'val_loader_len':val_loader_len }
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
class MultiResImageFolder(ImageFolder):
|
| 534 |
+
def __init__(self, root, extra_transform, base_res, low_res_list):
|
| 535 |
+
super().__init__(root)
|
| 536 |
+
self.extra_transform = extra_transform
|
| 537 |
+
self.base_t = transforms.Compose([
|
| 538 |
+
transforms.Resize(base_res),
|
| 539 |
+
transforms.CenterCrop(base_res),
|
| 540 |
+
extra_transform
|
| 541 |
+
])
|
| 542 |
+
if type(low_res_list) is list:
|
| 543 |
+
self.low_res_list = low_res_list
|
| 544 |
+
else:
|
| 545 |
+
self.low_res_list = [low_res_list]
|
| 546 |
+
self.low_res_t_dict = {}
|
| 547 |
+
self.low_res_t_dict['res'] = {}
|
| 548 |
+
for res in self.low_res_list:
|
| 549 |
+
self.low_res_t_dict[res] = transforms.Compose([
|
| 550 |
+
transforms.Resize(res),
|
| 551 |
+
transforms.CenterCrop(res),
|
| 552 |
+
extra_transform
|
| 553 |
+
])
|
| 554 |
+
self.root = root
|
| 555 |
+
|
| 556 |
+
def __getitem__(self, index):
|
| 557 |
+
"""
|
| 558 |
+
Args:
|
| 559 |
+
index (int): Index
|
| 560 |
+
Returns:
|
| 561 |
+
tuple: (sample, target) where target is class_index of the target class.
|
| 562 |
+
"""
|
| 563 |
+
path, target = self.samples[index]
|
| 564 |
+
sample = self.loader(path)
|
| 565 |
+
base_sample = self.base_t(sample)
|
| 566 |
+
if self.target_transform is not None:
|
| 567 |
+
target = self.target_transform(target)
|
| 568 |
+
extras = {'target': target}
|
| 569 |
+
for res in self.low_res_list:
|
| 570 |
+
extras[f'res_{res}'] = self.low_res_t_dict[res](sample)
|
| 571 |
+
return base_sample, extras
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
@DATALOADER_REGISTRY.register
|
| 575 |
+
def CustomImgDataloader(cfg):
|
| 576 |
+
if hasattr(cfg.dataloader, 'low_res_list') and cfg.dataloader.low_res_list is not None:
|
| 577 |
+
extra_transform = transforms.Compose([
|
| 578 |
+
transforms.ToTensor(),
|
| 579 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
| 580 |
+
])
|
| 581 |
+
train_dataset = MultiResImageFolder(cfg.dataloader.train_img_path, extra_transform, cfg.tokenizer.params.image_size, cfg.dataloader.low_res_list)
|
| 582 |
+
else:
|
| 583 |
+
train_transform = transforms.Compose([
|
| 584 |
+
transforms.Resize(cfg.tokenizer.params.image_size),
|
| 585 |
+
transforms.CenterCrop(cfg.tokenizer.params.image_size),
|
| 586 |
+
transforms.ToTensor(),
|
| 587 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
| 588 |
+
])
|
| 589 |
+
train_dataset = ImageFolder(cfg.dataloader.train_img_path, transform=train_transform)
|
| 590 |
+
|
| 591 |
+
val_transform = transforms.Compose([
|
| 592 |
+
transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, cfg.tokenizer.params.image_size)),
|
| 593 |
+
transforms.ToTensor(),
|
| 594 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
|
| 595 |
+
])
|
| 596 |
+
# hf_logger.info(f"Current train set length: {len(train_dataset)}...")
|
| 597 |
+
if cfg.dataloader.trainval:
|
| 598 |
+
val_for_train = ImageFolder(cfg.dataloader.val_img_path, transform=train_transform)
|
| 599 |
+
train_dataset = ConcatDataset([train_dataset, val_for_train])
|
| 600 |
+
# hf_logger.info(f"New train set length: {len(train_dataset)}...")
|
| 601 |
+
|
| 602 |
+
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
|
| 603 |
+
train_dataloader = DataLoader(
|
| 604 |
+
train_dataset,
|
| 605 |
+
batch_size=cfg.dataloader.batch_size,
|
| 606 |
+
#shuffle=True,
|
| 607 |
+
sampler = train_sampler,
|
| 608 |
+
num_workers=cfg.dataloader.num_workers,
|
| 609 |
+
prefetch_factor = 16, # mfu1
|
| 610 |
+
pin_memory=True,
|
| 611 |
+
drop_last=True
|
| 612 |
+
)
|
| 613 |
+
hf_logger.info(f'Train Dataset contains {len(train_dataset):,}')
|
| 614 |
+
if cfg.dataloader.setval:
|
| 615 |
+
val_dataset = ImageFolder(cfg.dataloader.val_img_path, transform=val_transform)
|
| 616 |
+
hf_logger.info(f"val set length: {len(val_dataset)}...")
|
| 617 |
+
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
|
| 618 |
+
val_dataloader = DataLoader(
|
| 619 |
+
val_dataset,
|
| 620 |
+
batch_size=16,
|
| 621 |
+
shuffle=False,
|
| 622 |
+
num_workers=cfg.dataloader.num_workers,
|
| 623 |
+
sampler = val_sampler,
|
| 624 |
+
prefetch_factor = 16, # mfu1
|
| 625 |
+
pin_memory=True,
|
| 626 |
+
drop_last=True
|
| 627 |
+
)
|
| 628 |
+
hf_logger.info(f'Val Dataset contains {len(val_dataset):,}')
|
| 629 |
+
else:
|
| 630 |
+
val_dataset, val_dataloader = None, None
|
| 631 |
+
|
| 632 |
+
return {'train':train_dataloader, 'val':val_dataloader}
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
@DATALOADER_REGISTRY.register
|
| 636 |
+
def CustomLatentDataloader(cfg):
|
| 637 |
+
|
| 638 |
+
features_dir = f"{cfg.dataloader.feature_path}/{cfg.dataloader.dataset_name}{cfg.tokenizer.params.image_size}_features"
|
| 639 |
+
val_features_dir = f"{cfg.dataloader.val_feature_path}/{cfg.dataloader.dataset_name}{cfg.tokenizer.params.image_size}_features"
|
| 640 |
+
# labels_dir = f"{cfg.dataloader.feature_path}/{cfg.dataloader.dataset_name}{cfg.tokenizer.params.image_size}_labels"
|
| 641 |
+
# val_labels_dir = f"{cfg.dataloader.val_feature_path}/{cfg.dataloader.dataset_name}{cfg.tokenizer.params.image_size}_labels"
|
| 642 |
+
if cfg.tokenizer.params.model == 'MMDiT_XL':
|
| 643 |
+
# sd3
|
| 644 |
+
features_dir += "/sd3-features-256"
|
| 645 |
+
val_features_dir += "/sd3-features-256"
|
| 646 |
+
train_dataset = CustomLatentDataset(features_dir, cfg.dataloader.preload_data)
|
| 647 |
+
if cfg.dataloader.get("open_image", False):
|
| 648 |
+
openimg_dataset = CustomLatentDataset(cfg.dataloader.openimg_features_dir, cfg.dataloader.preload_data)
|
| 649 |
+
train_dataset = ConcatDataset([train_dataset, openimg_dataset])
|
| 650 |
+
hf_logger.info(f"New train set length: {len(train_dataset)}...")
|
| 651 |
+
|
| 652 |
+
# if cfg.dataloader.trainval:
|
| 653 |
+
# val_for_train = CustomLatentDataset(val_features_dir, val_labels_dir, cfg.dataloader.preload_data)
|
| 654 |
+
# train_dataset = ConcatDataset([train_dataset, val_for_train])
|
| 655 |
+
# hf_logger.info(f"New train set length: {len(train_dataset)}...")
|
| 656 |
+
|
| 657 |
+
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
|
| 658 |
+
train_dataloader = DataLoader(
|
| 659 |
+
train_dataset,
|
| 660 |
+
batch_size=cfg.dataloader.batch_size,
|
| 661 |
+
shuffle=False,
|
| 662 |
+
sampler = train_sampler,
|
| 663 |
+
num_workers=cfg.dataloader.num_workers,
|
| 664 |
+
prefetch_factor = 16, # mfu1
|
| 665 |
+
pin_memory=True,
|
| 666 |
+
drop_last=True
|
| 667 |
+
)
|
| 668 |
+
if cfg.dataloader.setval:
|
| 669 |
+
val_dataset = CustomLatentDataset(val_features_dir, cfg.dataloader.preload_data)
|
| 670 |
+
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
|
| 671 |
+
val_dataloader = DataLoader(
|
| 672 |
+
val_dataset,
|
| 673 |
+
batch_size=8,
|
| 674 |
+
shuffle=False,
|
| 675 |
+
num_workers=cfg.dataloader.num_workers,
|
| 676 |
+
sampler = val_sampler,
|
| 677 |
+
prefetch_factor = 16, # mfu1
|
| 678 |
+
pin_memory=True,
|
| 679 |
+
drop_last=True
|
| 680 |
+
)
|
| 681 |
+
else:
|
| 682 |
+
val_dataset, val_dataloader = None, None
|
| 683 |
+
|
| 684 |
+
return {'train':train_dataloader, 'val':val_dataloader}
|
| 685 |
+
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/slim_zipfile.py
ADDED
|
@@ -0,0 +1,812 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Read and write ZIP files.
|
| 3 |
+
XXX references to utf-8 need further investigation.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import binascii
|
| 7 |
+
import importlib.util
|
| 8 |
+
import io
|
| 9 |
+
import itertools
|
| 10 |
+
import os
|
| 11 |
+
import posixpath
|
| 12 |
+
import shutil
|
| 13 |
+
import stat
|
| 14 |
+
import struct
|
| 15 |
+
import sys
|
| 16 |
+
import threading
|
| 17 |
+
import time
|
| 18 |
+
import contextlib
|
| 19 |
+
import pathlib
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
import zlib # We may need its compression method
|
| 23 |
+
|
| 24 |
+
crc32 = zlib.crc32
|
| 25 |
+
except ImportError:
|
| 26 |
+
zlib = None
|
| 27 |
+
crc32 = binascii.crc32
|
| 28 |
+
|
| 29 |
+
try:
|
| 30 |
+
import moxing as mox
|
| 31 |
+
except ImportError:
|
| 32 |
+
mox = None
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class BadZipFile(Exception):
|
| 36 |
+
pass
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
ZIP64_LIMIT = (1 << 31) - 1
|
| 40 |
+
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
|
| 41 |
+
ZIP_MAX_COMMENT = (1 << 16) - 1
|
| 42 |
+
|
| 43 |
+
# constants for Zip file compression methods
|
| 44 |
+
ZIP_STORED = 0
|
| 45 |
+
ZIP_DEFLATED = 8
|
| 46 |
+
ZIP_BZIP2 = 12
|
| 47 |
+
ZIP_LZMA = 14
|
| 48 |
+
# Other ZIP compression methods not supported
|
| 49 |
+
|
| 50 |
+
DEFAULT_VERSION = 20
|
| 51 |
+
ZIP64_VERSION = 45
|
| 52 |
+
BZIP2_VERSION = 46
|
| 53 |
+
LZMA_VERSION = 63
|
| 54 |
+
# we recognize (but not necessarily support) all features up to that version
|
| 55 |
+
MAX_EXTRACT_VERSION = 63
|
| 56 |
+
|
| 57 |
+
# Below are some formats and associated data for reading/writing headers using
|
| 58 |
+
# the struct module. The names and structures of headers/records are those used
|
| 59 |
+
# in the PKWARE description of the ZIP file format:
|
| 60 |
+
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
| 61 |
+
# (URL valid as of January 2008)
|
| 62 |
+
|
| 63 |
+
# The "end of central directory" structure, magic number, size, and indices
|
| 64 |
+
# (section V.I in the format document)
|
| 65 |
+
structEndArchive = b"<4s4H2LH"
|
| 66 |
+
stringEndArchive = b"PK\005\006"
|
| 67 |
+
sizeEndCentDir = struct.calcsize(structEndArchive)
|
| 68 |
+
|
| 69 |
+
_ECD_SIGNATURE = 0
|
| 70 |
+
_ECD_DISK_NUMBER = 1
|
| 71 |
+
_ECD_DISK_START = 2
|
| 72 |
+
_ECD_ENTRIES_THIS_DISK = 3
|
| 73 |
+
_ECD_ENTRIES_TOTAL = 4
|
| 74 |
+
_ECD_SIZE = 5
|
| 75 |
+
_ECD_OFFSET = 6
|
| 76 |
+
_ECD_COMMENT_SIZE = 7
|
| 77 |
+
# These last two indices are not part of the structure as defined in the
|
| 78 |
+
# spec, but they are used internally by this module as a convenience
|
| 79 |
+
_ECD_COMMENT = 8
|
| 80 |
+
_ECD_LOCATION = 9
|
| 81 |
+
|
| 82 |
+
# The "central directory" structure, magic number, size, and indices
|
| 83 |
+
# of entries in the structure (section V.F in the format document)
|
| 84 |
+
structCentralDir = "<4s4B4HL2L5H2L"
|
| 85 |
+
stringCentralDir = b"PK\001\002"
|
| 86 |
+
sizeCentralDir = struct.calcsize(structCentralDir)
|
| 87 |
+
|
| 88 |
+
# indexes of entries in the central directory structure
|
| 89 |
+
_CD_SIGNATURE = 0
|
| 90 |
+
_CD_CREATE_VERSION = 1
|
| 91 |
+
_CD_CREATE_SYSTEM = 2
|
| 92 |
+
_CD_EXTRACT_VERSION = 3
|
| 93 |
+
_CD_EXTRACT_SYSTEM = 4
|
| 94 |
+
_CD_FLAG_BITS = 5
|
| 95 |
+
_CD_COMPRESS_TYPE = 6
|
| 96 |
+
_CD_TIME = 7
|
| 97 |
+
_CD_DATE = 8
|
| 98 |
+
_CD_CRC = 9
|
| 99 |
+
_CD_COMPRESSED_SIZE = 10
|
| 100 |
+
_CD_UNCOMPRESSED_SIZE = 11
|
| 101 |
+
_CD_FILENAME_LENGTH = 12
|
| 102 |
+
_CD_EXTRA_FIELD_LENGTH = 13
|
| 103 |
+
_CD_COMMENT_LENGTH = 14
|
| 104 |
+
_CD_DISK_NUMBER_START = 15
|
| 105 |
+
_CD_INTERNAL_FILE_ATTRIBUTES = 16
|
| 106 |
+
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
|
| 107 |
+
_CD_LOCAL_HEADER_OFFSET = 18
|
| 108 |
+
|
| 109 |
+
# General purpose bit flags
|
| 110 |
+
# Zip Appnote: 4.4.4 general purpose bit flag: (2 bytes)
|
| 111 |
+
_MASK_ENCRYPTED = 1 << 0
|
| 112 |
+
# Bits 1 and 2 have different meanings depending on the compression used.
|
| 113 |
+
_MASK_COMPRESS_OPTION_1 = 1 << 1
|
| 114 |
+
# _MASK_COMPRESS_OPTION_2 = 1 << 2
|
| 115 |
+
# _MASK_USE_DATA_DESCRIPTOR: If set, crc-32, compressed size and uncompressed
|
| 116 |
+
# size are zero in the local header and the real values are written in the data
|
| 117 |
+
# descriptor immediately following the compressed data.
|
| 118 |
+
_MASK_USE_DATA_DESCRIPTOR = 1 << 3
|
| 119 |
+
# Bit 4: Reserved for use with compression method 8, for enhanced deflating.
|
| 120 |
+
# _MASK_RESERVED_BIT_4 = 1 << 4
|
| 121 |
+
_MASK_COMPRESSED_PATCH = 1 << 5
|
| 122 |
+
_MASK_STRONG_ENCRYPTION = 1 << 6
|
| 123 |
+
# _MASK_UNUSED_BIT_7 = 1 << 7
|
| 124 |
+
# _MASK_UNUSED_BIT_8 = 1 << 8
|
| 125 |
+
# _MASK_UNUSED_BIT_9 = 1 << 9
|
| 126 |
+
# _MASK_UNUSED_BIT_10 = 1 << 10
|
| 127 |
+
_MASK_UTF_FILENAME = 1 << 11
|
| 128 |
+
# Bit 12: Reserved by PKWARE for enhanced compression.
|
| 129 |
+
# _MASK_RESERVED_BIT_12 = 1 << 12
|
| 130 |
+
# _MASK_ENCRYPTED_CENTRAL_DIR = 1 << 13
|
| 131 |
+
# Bit 14, 15: Reserved by PKWARE
|
| 132 |
+
# _MASK_RESERVED_BIT_14 = 1 << 14
|
| 133 |
+
# _MASK_RESERVED_BIT_15 = 1 << 15
|
| 134 |
+
|
| 135 |
+
# The "local file header" structure, magic number, size, and indices
|
| 136 |
+
# (section V.A in the format document)
|
| 137 |
+
structFileHeader = "<4s2B4HL2L2H"
|
| 138 |
+
stringFileHeader = b"PK\003\004"
|
| 139 |
+
sizeFileHeader = struct.calcsize(structFileHeader)
|
| 140 |
+
|
| 141 |
+
_FH_SIGNATURE = 0
|
| 142 |
+
_FH_EXTRACT_VERSION = 1
|
| 143 |
+
_FH_EXTRACT_SYSTEM = 2
|
| 144 |
+
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
|
| 145 |
+
_FH_COMPRESSION_METHOD = 4
|
| 146 |
+
_FH_LAST_MOD_TIME = 5
|
| 147 |
+
_FH_LAST_MOD_DATE = 6
|
| 148 |
+
_FH_CRC = 7
|
| 149 |
+
_FH_COMPRESSED_SIZE = 8
|
| 150 |
+
_FH_UNCOMPRESSED_SIZE = 9
|
| 151 |
+
_FH_FILENAME_LENGTH = 10
|
| 152 |
+
_FH_EXTRA_FIELD_LENGTH = 11
|
| 153 |
+
|
| 154 |
+
# The "Zip64 end of central directory locator" structure, magic number, and size
|
| 155 |
+
structEndArchive64Locator = "<4sLQL"
|
| 156 |
+
stringEndArchive64Locator = b"PK\x06\x07"
|
| 157 |
+
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
|
| 158 |
+
|
| 159 |
+
# The "Zip64 end of central directory" record, magic number, size, and indices
|
| 160 |
+
# (section V.G in the format document)
|
| 161 |
+
structEndArchive64 = "<4sQ2H2L4Q"
|
| 162 |
+
stringEndArchive64 = b"PK\x06\x06"
|
| 163 |
+
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
|
| 164 |
+
|
| 165 |
+
_CD64_SIGNATURE = 0
|
| 166 |
+
_CD64_DIRECTORY_RECSIZE = 1
|
| 167 |
+
_CD64_CREATE_VERSION = 2
|
| 168 |
+
_CD64_EXTRACT_VERSION = 3
|
| 169 |
+
_CD64_DISK_NUMBER = 4
|
| 170 |
+
_CD64_DISK_NUMBER_START = 5
|
| 171 |
+
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
|
| 172 |
+
_CD64_NUMBER_ENTRIES_TOTAL = 7
|
| 173 |
+
_CD64_DIRECTORY_SIZE = 8
|
| 174 |
+
_CD64_OFFSET_START_CENTDIR = 9
|
| 175 |
+
|
| 176 |
+
_DD_SIGNATURE = 0x08074B50
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def _EndRecData64(fpin, offset, endrec):
|
| 180 |
+
"""
|
| 181 |
+
Read the ZIP64 end-of-archive records and use that to update endrec
|
| 182 |
+
"""
|
| 183 |
+
try:
|
| 184 |
+
fpin.seek(offset - sizeEndCentDir64Locator, 2)
|
| 185 |
+
except OSError:
|
| 186 |
+
# If the seek fails, the file is not large enough to contain a ZIP64
|
| 187 |
+
# end-of-archive record, so just return the end record we were given.
|
| 188 |
+
return endrec
|
| 189 |
+
|
| 190 |
+
data = fpin.read(sizeEndCentDir64Locator)
|
| 191 |
+
if len(data) != sizeEndCentDir64Locator:
|
| 192 |
+
return endrec
|
| 193 |
+
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
|
| 194 |
+
if sig != stringEndArchive64Locator:
|
| 195 |
+
return endrec
|
| 196 |
+
|
| 197 |
+
if diskno != 0 or disks > 1:
|
| 198 |
+
raise BadZipFile("zipfiles that span multiple disks are not supported")
|
| 199 |
+
|
| 200 |
+
# Assume no 'zip64 extensible data'
|
| 201 |
+
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
|
| 202 |
+
data = fpin.read(sizeEndCentDir64)
|
| 203 |
+
if len(data) != sizeEndCentDir64:
|
| 204 |
+
return endrec
|
| 205 |
+
sig, sz, _, read_version, disk_num, disk_dir, dircount, dircount2, dirsize, diroffset = struct.unpack(
|
| 206 |
+
structEndArchive64, data
|
| 207 |
+
)
|
| 208 |
+
if sig != stringEndArchive64:
|
| 209 |
+
return endrec
|
| 210 |
+
|
| 211 |
+
# Update the original endrec using data from the ZIP64 record
|
| 212 |
+
endrec[_ECD_SIGNATURE] = sig
|
| 213 |
+
endrec[_ECD_DISK_NUMBER] = disk_num
|
| 214 |
+
endrec[_ECD_DISK_START] = disk_dir
|
| 215 |
+
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
|
| 216 |
+
endrec[_ECD_ENTRIES_TOTAL] = dircount2
|
| 217 |
+
endrec[_ECD_SIZE] = dirsize
|
| 218 |
+
endrec[_ECD_OFFSET] = diroffset
|
| 219 |
+
return endrec
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _EndRecData(fpin):
|
| 223 |
+
"""Return data from the "End of Central Directory" record, or None.
|
| 224 |
+
The data is a list of the nine items in the ZIP "End of central dir"
|
| 225 |
+
record followed by a tenth item, the file seek offset of this record."""
|
| 226 |
+
|
| 227 |
+
# Determine file size
|
| 228 |
+
fpin.seek(0, 2)
|
| 229 |
+
filesize = fpin.tell()
|
| 230 |
+
|
| 231 |
+
# Check to see if this is ZIP file with no archive comment (the
|
| 232 |
+
# "end of central directory" structure should be the last item in the
|
| 233 |
+
# file if this is the case).
|
| 234 |
+
try:
|
| 235 |
+
fpin.seek(-sizeEndCentDir, 2)
|
| 236 |
+
except OSError:
|
| 237 |
+
return None
|
| 238 |
+
data = fpin.read()
|
| 239 |
+
if len(data) == sizeEndCentDir and data[0:4] == stringEndArchive and data[-2:] == b"\000\000":
|
| 240 |
+
# the signature is correct and there's no comment, unpack structure
|
| 241 |
+
endrec = struct.unpack(structEndArchive, data)
|
| 242 |
+
endrec = list(endrec)
|
| 243 |
+
|
| 244 |
+
# Append a blank comment and record start offset
|
| 245 |
+
endrec.append(b"")
|
| 246 |
+
endrec.append(filesize - sizeEndCentDir)
|
| 247 |
+
|
| 248 |
+
# Try to read the "Zip64 end of central directory" structure
|
| 249 |
+
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
|
| 250 |
+
|
| 251 |
+
# Either this is not a ZIP file, or it is a ZIP file with an archive
|
| 252 |
+
# comment. Search the end of the file for the "end of central directory"
|
| 253 |
+
# record signature. The comment is the last item in the ZIP file and may be
|
| 254 |
+
# up to 64K long. It is assumed that the "end of central directory" magic
|
| 255 |
+
# number does not appear in the comment.
|
| 256 |
+
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
|
| 257 |
+
fpin.seek(maxCommentStart, 0)
|
| 258 |
+
data = fpin.read()
|
| 259 |
+
start = data.rfind(stringEndArchive)
|
| 260 |
+
if start >= 0:
|
| 261 |
+
# found the magic number; attempt to unpack and interpret
|
| 262 |
+
recData = data[start : start + sizeEndCentDir]
|
| 263 |
+
if len(recData) != sizeEndCentDir:
|
| 264 |
+
# Zip file is corrupted.
|
| 265 |
+
return None
|
| 266 |
+
endrec = list(struct.unpack(structEndArchive, recData))
|
| 267 |
+
commentSize = endrec[_ECD_COMMENT_SIZE] # as claimed by the zip file
|
| 268 |
+
comment = data[start + sizeEndCentDir : start + sizeEndCentDir + commentSize]
|
| 269 |
+
endrec.append(comment)
|
| 270 |
+
endrec.append(maxCommentStart + start)
|
| 271 |
+
|
| 272 |
+
# Try to read the "Zip64 end of central directory" structure
|
| 273 |
+
return _EndRecData64(fpin, maxCommentStart + start - filesize, endrec)
|
| 274 |
+
|
| 275 |
+
# Unable to find a valid end of central directory structure
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class ZipInfo(object):
|
| 280 |
+
__slots__ = (
|
| 281 |
+
"filename",
|
| 282 |
+
"header_offset",
|
| 283 |
+
"CRC",
|
| 284 |
+
"compress_size",
|
| 285 |
+
"file_size",
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
def __init__(self, filename="NoName"):
|
| 289 |
+
self.filename = filename
|
| 290 |
+
self.compress_size = 0 # Size of the compressed file
|
| 291 |
+
self.file_size = 0 # Size of the uncompressed file
|
| 292 |
+
|
| 293 |
+
def _decodeExtra(self, extra):
|
| 294 |
+
# Try to decode the extra field.
|
| 295 |
+
unpack = struct.unpack
|
| 296 |
+
while len(extra) >= 4:
|
| 297 |
+
tp, ln = unpack("<HH", extra[:4])
|
| 298 |
+
if ln + 4 > len(extra):
|
| 299 |
+
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
|
| 300 |
+
if tp == 0x0001:
|
| 301 |
+
data = extra[4 : ln + 4]
|
| 302 |
+
# ZIP64 extension (large files and/or large archives)
|
| 303 |
+
try:
|
| 304 |
+
if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF):
|
| 305 |
+
field = "File size"
|
| 306 |
+
(self.file_size,) = unpack("<Q", data[:8])
|
| 307 |
+
data = data[8:]
|
| 308 |
+
if self.compress_size == 0xFFFF_FFFF:
|
| 309 |
+
field = "Compress size"
|
| 310 |
+
(self.compress_size,) = unpack("<Q", data[:8])
|
| 311 |
+
data = data[8:]
|
| 312 |
+
if self.header_offset == 0xFFFF_FFFF:
|
| 313 |
+
field = "Header offset"
|
| 314 |
+
(self.header_offset,) = unpack("<Q", data[:8])
|
| 315 |
+
except struct.error:
|
| 316 |
+
raise BadZipFile(f"Corrupt zip64 extra field. " f"{field} not found.") from None
|
| 317 |
+
|
| 318 |
+
extra = extra[ln + 4 :]
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
class _SharedFile:
|
| 322 |
+
def __init__(self, file, pos, close, lock, writing):
|
| 323 |
+
self._file = file
|
| 324 |
+
self._pos = pos
|
| 325 |
+
self._close = close
|
| 326 |
+
self._lock = lock
|
| 327 |
+
self._writing = writing
|
| 328 |
+
self.seekable = file.seekable
|
| 329 |
+
|
| 330 |
+
def tell(self):
|
| 331 |
+
return self._pos
|
| 332 |
+
|
| 333 |
+
def seek(self, offset, whence=0):
|
| 334 |
+
with self._lock:
|
| 335 |
+
if self._writing():
|
| 336 |
+
raise ValueError(
|
| 337 |
+
"Can't reposition in the ZIP file while "
|
| 338 |
+
"there is an open writing handle on it. "
|
| 339 |
+
"Close the writing handle before trying to read."
|
| 340 |
+
)
|
| 341 |
+
self._file.seek(offset, whence)
|
| 342 |
+
self._pos = self._file.tell()
|
| 343 |
+
return self._pos
|
| 344 |
+
|
| 345 |
+
def read(self, n=-1):
|
| 346 |
+
with self._lock:
|
| 347 |
+
if self._writing():
|
| 348 |
+
raise ValueError(
|
| 349 |
+
"Can't read from the ZIP file while there "
|
| 350 |
+
"is an open writing handle on it. "
|
| 351 |
+
"Close the writing handle before trying to read."
|
| 352 |
+
)
|
| 353 |
+
self._file.seek(self._pos)
|
| 354 |
+
data = self._file.read(n)
|
| 355 |
+
self._pos = self._file.tell()
|
| 356 |
+
return data
|
| 357 |
+
|
| 358 |
+
def close(self):
|
| 359 |
+
if self._file is not None:
|
| 360 |
+
fileobj = self._file
|
| 361 |
+
self._file = None
|
| 362 |
+
self._close(fileobj)
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
class ZipExtFile(io.BufferedIOBase):
|
| 366 |
+
"""File-like object for reading an archive member.
|
| 367 |
+
Is returned by ZipFile.open().
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
# Max size supported by decompressor.
|
| 371 |
+
MAX_N = 1 << 31 - 1
|
| 372 |
+
|
| 373 |
+
# Read from compressed files in 4k blocks.
|
| 374 |
+
MIN_READ_SIZE = 4096
|
| 375 |
+
|
| 376 |
+
# Chunk size to read during seek
|
| 377 |
+
MAX_SEEK_READ = 1 << 24
|
| 378 |
+
|
| 379 |
+
def __init__(self, fileobj, mode, zipinfo, close_fileobj=False):
|
| 380 |
+
self._fileobj = fileobj
|
| 381 |
+
self._close_fileobj = close_fileobj
|
| 382 |
+
self._compress_left = zipinfo.compress_size
|
| 383 |
+
self._left = zipinfo.file_size
|
| 384 |
+
self._eof = False
|
| 385 |
+
self._readbuffer = b""
|
| 386 |
+
self._offset = 0
|
| 387 |
+
|
| 388 |
+
self.newlines = None
|
| 389 |
+
|
| 390 |
+
self.mode = mode
|
| 391 |
+
self.name = zipinfo.filename
|
| 392 |
+
|
| 393 |
+
if hasattr(zipinfo, "CRC"):
|
| 394 |
+
self._expected_crc = zipinfo.CRC
|
| 395 |
+
self._running_crc = crc32(b"")
|
| 396 |
+
else:
|
| 397 |
+
self._expected_crc = None
|
| 398 |
+
|
| 399 |
+
self._seekable = False
|
| 400 |
+
try:
|
| 401 |
+
if fileobj.seekable():
|
| 402 |
+
self._orig_compress_start = fileobj.tell()
|
| 403 |
+
self._orig_compress_size = zipinfo.compress_size
|
| 404 |
+
self._orig_file_size = zipinfo.file_size
|
| 405 |
+
self._orig_start_crc = self._running_crc
|
| 406 |
+
self._seekable = True
|
| 407 |
+
except AttributeError:
|
| 408 |
+
pass
|
| 409 |
+
|
| 410 |
+
self._decrypter = None
|
| 411 |
+
|
| 412 |
+
def _init_decrypter(self):
|
| 413 |
+
raise NotImplementedError
|
| 414 |
+
|
| 415 |
+
def readline(self, limit=-1):
|
| 416 |
+
"""Read and return a line from the stream.
|
| 417 |
+
If limit is specified, at most limit bytes will be read.
|
| 418 |
+
"""
|
| 419 |
+
|
| 420 |
+
if limit < 0:
|
| 421 |
+
# Shortcut common case - newline found in buffer.
|
| 422 |
+
i = self._readbuffer.find(b"\n", self._offset) + 1
|
| 423 |
+
if i > 0:
|
| 424 |
+
line = self._readbuffer[self._offset : i]
|
| 425 |
+
self._offset = i
|
| 426 |
+
return line
|
| 427 |
+
|
| 428 |
+
return io.BufferedIOBase.readline(self, limit)
|
| 429 |
+
|
| 430 |
+
def peek(self, n=1):
|
| 431 |
+
"""Returns buffered bytes without advancing the position."""
|
| 432 |
+
if n > len(self._readbuffer) - self._offset:
|
| 433 |
+
chunk = self.read(n)
|
| 434 |
+
if len(chunk) > self._offset:
|
| 435 |
+
self._readbuffer = chunk + self._readbuffer[self._offset :]
|
| 436 |
+
self._offset = 0
|
| 437 |
+
else:
|
| 438 |
+
self._offset -= len(chunk)
|
| 439 |
+
|
| 440 |
+
# Return up to 512 bytes to reduce allocation overhead for tight loops.
|
| 441 |
+
return self._readbuffer[self._offset : self._offset + 512]
|
| 442 |
+
|
| 443 |
+
def readable(self):
|
| 444 |
+
if self.closed:
|
| 445 |
+
raise ValueError("I/O operation on closed file.")
|
| 446 |
+
return True
|
| 447 |
+
|
| 448 |
+
def read(self, n=-1):
|
| 449 |
+
"""Read and return up to n bytes.
|
| 450 |
+
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
|
| 451 |
+
"""
|
| 452 |
+
if self.closed:
|
| 453 |
+
raise ValueError("read from closed file.")
|
| 454 |
+
if n is None or n < 0:
|
| 455 |
+
buf = self._readbuffer[self._offset :]
|
| 456 |
+
self._readbuffer = b""
|
| 457 |
+
self._offset = 0
|
| 458 |
+
while not self._eof:
|
| 459 |
+
buf += self._read1(self.MAX_N)
|
| 460 |
+
return buf
|
| 461 |
+
|
| 462 |
+
end = n + self._offset
|
| 463 |
+
if end < len(self._readbuffer):
|
| 464 |
+
buf = self._readbuffer[self._offset : end]
|
| 465 |
+
self._offset = end
|
| 466 |
+
return buf
|
| 467 |
+
|
| 468 |
+
n = end - len(self._readbuffer)
|
| 469 |
+
buf = self._readbuffer[self._offset :]
|
| 470 |
+
self._readbuffer = b""
|
| 471 |
+
self._offset = 0
|
| 472 |
+
while n > 0 and not self._eof:
|
| 473 |
+
data = self._read1(n)
|
| 474 |
+
if n < len(data):
|
| 475 |
+
self._readbuffer = data
|
| 476 |
+
self._offset = n
|
| 477 |
+
buf += data[:n]
|
| 478 |
+
break
|
| 479 |
+
buf += data
|
| 480 |
+
n -= len(data)
|
| 481 |
+
return buf
|
| 482 |
+
|
| 483 |
+
def _update_crc(self, newdata):
|
| 484 |
+
# Update the CRC using the given data.
|
| 485 |
+
if self._expected_crc is None:
|
| 486 |
+
# No need to compute the CRC if we don't have a reference value
|
| 487 |
+
return
|
| 488 |
+
self._running_crc = crc32(newdata, self._running_crc)
|
| 489 |
+
# Check the CRC if we're at the end of the file
|
| 490 |
+
if self._eof and self._running_crc != self._expected_crc:
|
| 491 |
+
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
|
| 492 |
+
|
| 493 |
+
def read1(self, n):
|
| 494 |
+
"""Read up to n bytes with at most one read() system call."""
|
| 495 |
+
|
| 496 |
+
if n is None or n < 0:
|
| 497 |
+
buf = self._readbuffer[self._offset :]
|
| 498 |
+
self._readbuffer = b""
|
| 499 |
+
self._offset = 0
|
| 500 |
+
while not self._eof:
|
| 501 |
+
data = self._read1(self.MAX_N)
|
| 502 |
+
if data:
|
| 503 |
+
buf += data
|
| 504 |
+
break
|
| 505 |
+
return buf
|
| 506 |
+
|
| 507 |
+
end = n + self._offset
|
| 508 |
+
if end < len(self._readbuffer):
|
| 509 |
+
buf = self._readbuffer[self._offset : end]
|
| 510 |
+
self._offset = end
|
| 511 |
+
return buf
|
| 512 |
+
|
| 513 |
+
n = end - len(self._readbuffer)
|
| 514 |
+
buf = self._readbuffer[self._offset :]
|
| 515 |
+
self._readbuffer = b""
|
| 516 |
+
self._offset = 0
|
| 517 |
+
if n > 0:
|
| 518 |
+
while not self._eof:
|
| 519 |
+
data = self._read1(n)
|
| 520 |
+
if n < len(data):
|
| 521 |
+
self._readbuffer = data
|
| 522 |
+
self._offset = n
|
| 523 |
+
buf += data[:n]
|
| 524 |
+
break
|
| 525 |
+
if data:
|
| 526 |
+
buf += data
|
| 527 |
+
break
|
| 528 |
+
return buf
|
| 529 |
+
|
| 530 |
+
def _read1(self, n):
|
| 531 |
+
# Read up to n compressed bytes with at most one read() system call,
|
| 532 |
+
# decrypt and decompress them.
|
| 533 |
+
if self._eof or n <= 0:
|
| 534 |
+
return b""
|
| 535 |
+
|
| 536 |
+
# Read from file.
|
| 537 |
+
data = self._read2(n)
|
| 538 |
+
self._eof = self._compress_left <= 0
|
| 539 |
+
data = data[: self._left]
|
| 540 |
+
self._left -= len(data)
|
| 541 |
+
if self._left <= 0:
|
| 542 |
+
self._eof = True
|
| 543 |
+
self._update_crc(data)
|
| 544 |
+
return data
|
| 545 |
+
|
| 546 |
+
def _read2(self, n):
|
| 547 |
+
if self._compress_left <= 0:
|
| 548 |
+
return b""
|
| 549 |
+
|
| 550 |
+
n = max(n, self.MIN_READ_SIZE)
|
| 551 |
+
n = min(n, self._compress_left)
|
| 552 |
+
|
| 553 |
+
data = self._fileobj.read(n)
|
| 554 |
+
self._compress_left -= len(data)
|
| 555 |
+
if not data:
|
| 556 |
+
raise EOFError
|
| 557 |
+
|
| 558 |
+
if self._decrypter is not None:
|
| 559 |
+
data = self._decrypter(data)
|
| 560 |
+
return data
|
| 561 |
+
|
| 562 |
+
def close(self):
|
| 563 |
+
try:
|
| 564 |
+
if self._close_fileobj:
|
| 565 |
+
self._fileobj.close()
|
| 566 |
+
finally:
|
| 567 |
+
super().close()
|
| 568 |
+
|
| 569 |
+
def seekable(self):
|
| 570 |
+
if self.closed:
|
| 571 |
+
raise ValueError("I/O operation on closed file.")
|
| 572 |
+
return self._seekable
|
| 573 |
+
|
| 574 |
+
def seek(self, offset, whence=0):
|
| 575 |
+
if self.closed:
|
| 576 |
+
raise ValueError("seek on closed file.")
|
| 577 |
+
if not self._seekable:
|
| 578 |
+
raise io.UnsupportedOperation("underlying stream is not seekable")
|
| 579 |
+
curr_pos = self.tell()
|
| 580 |
+
if whence == 0: # Seek from start of file
|
| 581 |
+
new_pos = offset
|
| 582 |
+
elif whence == 1: # Seek from current position
|
| 583 |
+
new_pos = curr_pos + offset
|
| 584 |
+
elif whence == 2: # Seek from EOF
|
| 585 |
+
new_pos = self._orig_file_size + offset
|
| 586 |
+
else:
|
| 587 |
+
raise ValueError("whence must be os.SEEK_SET (0), " "os.SEEK_CUR (1), or os.SEEK_END (2)")
|
| 588 |
+
|
| 589 |
+
if new_pos > self._orig_file_size:
|
| 590 |
+
new_pos = self._orig_file_size
|
| 591 |
+
|
| 592 |
+
if new_pos < 0:
|
| 593 |
+
new_pos = 0
|
| 594 |
+
|
| 595 |
+
read_offset = new_pos - curr_pos
|
| 596 |
+
buff_offset = read_offset + self._offset
|
| 597 |
+
|
| 598 |
+
if buff_offset >= 0 and buff_offset < len(self._readbuffer):
|
| 599 |
+
# Just move the _offset index if the new position is in the _readbuffer
|
| 600 |
+
self._offset = buff_offset
|
| 601 |
+
read_offset = 0
|
| 602 |
+
elif read_offset < 0:
|
| 603 |
+
# Position is before the current position. Reset the ZipExtFile
|
| 604 |
+
self._fileobj.seek(self._orig_compress_start)
|
| 605 |
+
self._running_crc = self._orig_start_crc
|
| 606 |
+
self._compress_left = self._orig_compress_size
|
| 607 |
+
self._left = self._orig_file_size
|
| 608 |
+
self._readbuffer = b""
|
| 609 |
+
self._offset = 0
|
| 610 |
+
self._eof = False
|
| 611 |
+
read_offset = new_pos
|
| 612 |
+
if self._decrypter is not None:
|
| 613 |
+
self._init_decrypter()
|
| 614 |
+
|
| 615 |
+
while read_offset > 0:
|
| 616 |
+
read_len = min(self.MAX_SEEK_READ, read_offset)
|
| 617 |
+
self.read(read_len)
|
| 618 |
+
read_offset -= read_len
|
| 619 |
+
|
| 620 |
+
return self.tell()
|
| 621 |
+
|
| 622 |
+
def tell(self):
|
| 623 |
+
if self.closed:
|
| 624 |
+
raise ValueError("tell on closed file.")
|
| 625 |
+
if not self._seekable:
|
| 626 |
+
raise io.UnsupportedOperation("underlying stream is not seekable")
|
| 627 |
+
filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
|
| 628 |
+
return filepos
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
class SlimZipFile:
|
| 632 |
+
fp = None # Set here since __del__ checks it
|
| 633 |
+
|
| 634 |
+
def __init__(self, file: str, mode="r", allowZip64=True, compresslevel=None):
|
| 635 |
+
if mode not in ("r",):
|
| 636 |
+
raise ValueError("ZipFile requires mode 'r'")
|
| 637 |
+
|
| 638 |
+
self._allowZip64 = allowZip64
|
| 639 |
+
self._didModify = False
|
| 640 |
+
self.debug = 0 # Level of printing: 0 through 3
|
| 641 |
+
self.NameToInfo = {} # Find file info given name
|
| 642 |
+
self.mode = mode
|
| 643 |
+
self.pwd = None
|
| 644 |
+
self._comment = b""
|
| 645 |
+
|
| 646 |
+
self._filePassed = 0
|
| 647 |
+
self.filename = file
|
| 648 |
+
if file.startswith("s3://"):
|
| 649 |
+
assert mox is not None, f"File path starts with s3:// {file}, you need to run on Modelarts!!"
|
| 650 |
+
self.fp = mox.file.File(file, "rb")
|
| 651 |
+
else:
|
| 652 |
+
self.fp = io.open(file, "rb")
|
| 653 |
+
self._fileRefCnt = 1
|
| 654 |
+
self._lock = threading.RLock()
|
| 655 |
+
self._seekable = True
|
| 656 |
+
self._writing = False
|
| 657 |
+
self._RealGetContents()
|
| 658 |
+
|
| 659 |
+
def __enter__(self):
|
| 660 |
+
return self
|
| 661 |
+
|
| 662 |
+
def __exit__(self, type, value, traceback):
|
| 663 |
+
self.close()
|
| 664 |
+
|
| 665 |
+
def _RealGetContents(self):
|
| 666 |
+
"""Read in the table of contents for the ZIP file."""
|
| 667 |
+
fp = self.fp
|
| 668 |
+
try:
|
| 669 |
+
endrec = _EndRecData(fp)
|
| 670 |
+
except OSError:
|
| 671 |
+
raise BadZipFile("File is not a zip file")
|
| 672 |
+
if not endrec:
|
| 673 |
+
raise BadZipFile("File is not a zip file")
|
| 674 |
+
|
| 675 |
+
size_cd = endrec[_ECD_SIZE] # bytes in central directory
|
| 676 |
+
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
|
| 677 |
+
self._comment = endrec[_ECD_COMMENT] # archive comment
|
| 678 |
+
|
| 679 |
+
# "concat" is zero, unless zip was concatenated to another file
|
| 680 |
+
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
|
| 681 |
+
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
|
| 682 |
+
# If Zip64 extension structures are present, account for them
|
| 683 |
+
concat -= sizeEndCentDir64 + sizeEndCentDir64Locator
|
| 684 |
+
|
| 685 |
+
# self.start_dir: Position of start of central directory
|
| 686 |
+
self.start_dir = offset_cd + concat
|
| 687 |
+
fp.seek(self.start_dir, 0)
|
| 688 |
+
data = fp.read(size_cd)
|
| 689 |
+
fp = io.BytesIO(data)
|
| 690 |
+
total = 0
|
| 691 |
+
while total < size_cd:
|
| 692 |
+
centdir = fp.read(sizeCentralDir)
|
| 693 |
+
if len(centdir) != sizeCentralDir:
|
| 694 |
+
raise BadZipFile("Truncated central directory")
|
| 695 |
+
centdir = struct.unpack(structCentralDir, centdir)
|
| 696 |
+
if centdir[_CD_SIGNATURE] != stringCentralDir:
|
| 697 |
+
raise BadZipFile("Bad magic number for central directory")
|
| 698 |
+
|
| 699 |
+
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
|
| 700 |
+
flags = centdir[_CD_FLAG_BITS]
|
| 701 |
+
if flags & _MASK_UTF_FILENAME:
|
| 702 |
+
# UTF-8 file names extension
|
| 703 |
+
filename = filename.decode("utf-8")
|
| 704 |
+
else:
|
| 705 |
+
# Historical ZIP filename encoding
|
| 706 |
+
filename = filename.decode("cp437")
|
| 707 |
+
# Create ZipInfo instance to store file information
|
| 708 |
+
x = ZipInfo(filename)
|
| 709 |
+
extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
|
| 710 |
+
_ = fp.read(centdir[_CD_COMMENT_LENGTH]) # x.comment
|
| 711 |
+
|
| 712 |
+
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
|
| 713 |
+
(_, _, extract_version, _, _, _, _, _, x.CRC, x.compress_size, x.file_size) = centdir[1:12]
|
| 714 |
+
if extract_version > MAX_EXTRACT_VERSION:
|
| 715 |
+
raise NotImplementedError("zip file version %.1f" % (extract_version / 10))
|
| 716 |
+
_, _, _ = centdir[15:18]
|
| 717 |
+
|
| 718 |
+
x._decodeExtra(extra)
|
| 719 |
+
x.header_offset = x.header_offset + concat
|
| 720 |
+
self.NameToInfo[x.filename] = x
|
| 721 |
+
|
| 722 |
+
# update total bytes read from central directory
|
| 723 |
+
total = (
|
| 724 |
+
total
|
| 725 |
+
+ sizeCentralDir
|
| 726 |
+
+ centdir[_CD_FILENAME_LENGTH]
|
| 727 |
+
+ centdir[_CD_EXTRA_FIELD_LENGTH]
|
| 728 |
+
+ centdir[_CD_COMMENT_LENGTH]
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
# from pympler import asizeof
|
| 732 |
+
# print("size of x = {:.6f}GB".format(asizeof.asizeof(self.NameToInfo) / 1024. / 1024. / 1024.))
|
| 733 |
+
|
| 734 |
+
def namelist(self):
|
| 735 |
+
return list(self.NameToInfo.keys())
|
| 736 |
+
|
| 737 |
+
def getinfo(self, name):
|
| 738 |
+
info = self.NameToInfo.get(name)
|
| 739 |
+
if info is None:
|
| 740 |
+
raise KeyError("There is no item named %r in the archive" % name)
|
| 741 |
+
|
| 742 |
+
return info
|
| 743 |
+
|
| 744 |
+
def read(self, name, pwd=None):
|
| 745 |
+
"""Return file bytes for name."""
|
| 746 |
+
with self.open(name, "r", pwd) as fp:
|
| 747 |
+
return fp.read()
|
| 748 |
+
|
| 749 |
+
def open(self, name, mode="r", *, force_zip64=False):
|
| 750 |
+
if mode not in {
|
| 751 |
+
"r",
|
| 752 |
+
}:
|
| 753 |
+
raise ValueError('open() requires mode "r"')
|
| 754 |
+
|
| 755 |
+
if not self.fp:
|
| 756 |
+
raise ValueError("Attempt to use ZIP archive that was already closed")
|
| 757 |
+
|
| 758 |
+
# Make sure we have an info object
|
| 759 |
+
zinfo = self.getinfo(name)
|
| 760 |
+
|
| 761 |
+
# Open for reading:
|
| 762 |
+
self._fileRefCnt += 1
|
| 763 |
+
zef_file = _SharedFile(self.fp, zinfo.header_offset, self._fpclose, self._lock, lambda: self._writing)
|
| 764 |
+
try:
|
| 765 |
+
# Skip the file header:
|
| 766 |
+
fheader = zef_file.read(sizeFileHeader)
|
| 767 |
+
if len(fheader) != sizeFileHeader:
|
| 768 |
+
raise BadZipFile("Truncated file header")
|
| 769 |
+
fheader = struct.unpack(structFileHeader, fheader)
|
| 770 |
+
if fheader[_FH_SIGNATURE] != stringFileHeader:
|
| 771 |
+
raise BadZipFile("Bad magic number for file header")
|
| 772 |
+
|
| 773 |
+
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
|
| 774 |
+
if fheader[_FH_EXTRA_FIELD_LENGTH]:
|
| 775 |
+
zef_file.seek(fheader[_FH_EXTRA_FIELD_LENGTH], whence=1)
|
| 776 |
+
|
| 777 |
+
if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & _MASK_UTF_FILENAME:
|
| 778 |
+
# UTF-8 filename
|
| 779 |
+
fname_str = fname.decode("utf-8")
|
| 780 |
+
else:
|
| 781 |
+
fname_str = fname.decode("cp437")
|
| 782 |
+
|
| 783 |
+
if fname_str != zinfo.filename:
|
| 784 |
+
raise BadZipFile("File name in directory %r and header %r differ." % (zinfo.filename, fname))
|
| 785 |
+
|
| 786 |
+
return ZipExtFile(zef_file, mode, zinfo, True)
|
| 787 |
+
except:
|
| 788 |
+
zef_file.close()
|
| 789 |
+
raise
|
| 790 |
+
|
| 791 |
+
def __del__(self):
|
| 792 |
+
"""Call the "close()" method in case the user forgot."""
|
| 793 |
+
self.close()
|
| 794 |
+
|
| 795 |
+
def close(self):
|
| 796 |
+
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
|
| 797 |
+
records."""
|
| 798 |
+
if self.fp is None:
|
| 799 |
+
return
|
| 800 |
+
|
| 801 |
+
fp = self.fp
|
| 802 |
+
self.fp = None
|
| 803 |
+
self._fpclose(fp)
|
| 804 |
+
|
| 805 |
+
def _write_end_record(self):
|
| 806 |
+
raise NotImplementedError
|
| 807 |
+
|
| 808 |
+
def _fpclose(self, fp):
|
| 809 |
+
assert self._fileRefCnt > 0
|
| 810 |
+
self._fileRefCnt -= 1
|
| 811 |
+
if not self._fileRefCnt and not self._filePassed:
|
| 812 |
+
fp.close()
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/slim_zipfile2.py
ADDED
|
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Read and write ZIP files.
|
| 3 |
+
XXX references to utf-8 need further investigation.
|
| 4 |
+
"""
|
| 5 |
+
import binascii
|
| 6 |
+
import importlib.util
|
| 7 |
+
import io
|
| 8 |
+
import itertools
|
| 9 |
+
import os
|
| 10 |
+
import posixpath
|
| 11 |
+
import shutil
|
| 12 |
+
import stat
|
| 13 |
+
import struct
|
| 14 |
+
import sys
|
| 15 |
+
import threading
|
| 16 |
+
import time
|
| 17 |
+
import contextlib
|
| 18 |
+
import pathlib
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
import zlib # We may need its compression method
|
| 22 |
+
crc32 = zlib.crc32
|
| 23 |
+
except ImportError:
|
| 24 |
+
zlib = None
|
| 25 |
+
crc32 = binascii.crc32
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import moxing as mox
|
| 29 |
+
except ImportError:
|
| 30 |
+
mox = None
|
| 31 |
+
|
| 32 |
+
class BadZipFile(Exception):
|
| 33 |
+
pass
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
ZIP64_LIMIT = (1 << 31) - 1
|
| 37 |
+
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
|
| 38 |
+
ZIP_MAX_COMMENT = (1 << 16) - 1
|
| 39 |
+
|
| 40 |
+
# constants for Zip file compression methods
|
| 41 |
+
ZIP_STORED = 0
|
| 42 |
+
ZIP_DEFLATED = 8
|
| 43 |
+
ZIP_BZIP2 = 12
|
| 44 |
+
ZIP_LZMA = 14
|
| 45 |
+
# Other ZIP compression methods not supported
|
| 46 |
+
|
| 47 |
+
DEFAULT_VERSION = 20
|
| 48 |
+
ZIP64_VERSION = 45
|
| 49 |
+
BZIP2_VERSION = 46
|
| 50 |
+
LZMA_VERSION = 63
|
| 51 |
+
# we recognize (but not necessarily support) all features up to that version
|
| 52 |
+
MAX_EXTRACT_VERSION = 63
|
| 53 |
+
|
| 54 |
+
# Below are some formats and associated data for reading/writing headers using
|
| 55 |
+
# the struct module. The names and structures of headers/records are those used
|
| 56 |
+
# in the PKWARE description of the ZIP file format:
|
| 57 |
+
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
|
| 58 |
+
# (URL valid as of January 2008)
|
| 59 |
+
|
| 60 |
+
# The "end of central directory" structure, magic number, size, and indices
|
| 61 |
+
# (section V.I in the format document)
|
| 62 |
+
structEndArchive = b"<4s4H2LH"
|
| 63 |
+
stringEndArchive = b"PK\005\006"
|
| 64 |
+
sizeEndCentDir = struct.calcsize(structEndArchive)
|
| 65 |
+
|
| 66 |
+
_ECD_SIGNATURE = 0
|
| 67 |
+
_ECD_DISK_NUMBER = 1
|
| 68 |
+
_ECD_DISK_START = 2
|
| 69 |
+
_ECD_ENTRIES_THIS_DISK = 3
|
| 70 |
+
_ECD_ENTRIES_TOTAL = 4
|
| 71 |
+
_ECD_SIZE = 5
|
| 72 |
+
_ECD_OFFSET = 6
|
| 73 |
+
_ECD_COMMENT_SIZE = 7
|
| 74 |
+
# These last two indices are not part of the structure as defined in the
|
| 75 |
+
# spec, but they are used internally by this module as a convenience
|
| 76 |
+
_ECD_COMMENT = 8
|
| 77 |
+
_ECD_LOCATION = 9
|
| 78 |
+
|
| 79 |
+
# The "central directory" structure, magic number, size, and indices
|
| 80 |
+
# of entries in the structure (section V.F in the format document)
|
| 81 |
+
structCentralDir = "<4s4B4HL2L5H2L"
|
| 82 |
+
stringCentralDir = b"PK\001\002"
|
| 83 |
+
sizeCentralDir = struct.calcsize(structCentralDir)
|
| 84 |
+
|
| 85 |
+
# indexes of entries in the central directory structure
|
| 86 |
+
_CD_SIGNATURE = 0
|
| 87 |
+
_CD_CREATE_VERSION = 1
|
| 88 |
+
_CD_CREATE_SYSTEM = 2
|
| 89 |
+
_CD_EXTRACT_VERSION = 3
|
| 90 |
+
_CD_EXTRACT_SYSTEM = 4
|
| 91 |
+
_CD_FLAG_BITS = 5
|
| 92 |
+
_CD_COMPRESS_TYPE = 6
|
| 93 |
+
_CD_TIME = 7
|
| 94 |
+
_CD_DATE = 8
|
| 95 |
+
_CD_CRC = 9
|
| 96 |
+
_CD_COMPRESSED_SIZE = 10
|
| 97 |
+
_CD_UNCOMPRESSED_SIZE = 11
|
| 98 |
+
_CD_FILENAME_LENGTH = 12
|
| 99 |
+
_CD_EXTRA_FIELD_LENGTH = 13
|
| 100 |
+
_CD_COMMENT_LENGTH = 14
|
| 101 |
+
_CD_DISK_NUMBER_START = 15
|
| 102 |
+
_CD_INTERNAL_FILE_ATTRIBUTES = 16
|
| 103 |
+
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
|
| 104 |
+
_CD_LOCAL_HEADER_OFFSET = 18
|
| 105 |
+
|
| 106 |
+
# General purpose bit flags
|
| 107 |
+
# Zip Appnote: 4.4.4 general purpose bit flag: (2 bytes)
|
| 108 |
+
_MASK_ENCRYPTED = 1 << 0
|
| 109 |
+
# Bits 1 and 2 have different meanings depending on the compression used.
|
| 110 |
+
_MASK_COMPRESS_OPTION_1 = 1 << 1
|
| 111 |
+
# _MASK_COMPRESS_OPTION_2 = 1 << 2
|
| 112 |
+
# _MASK_USE_DATA_DESCRIPTOR: If set, crc-32, compressed size and uncompressed
|
| 113 |
+
# size are zero in the local header and the real values are written in the data
|
| 114 |
+
# descriptor immediately following the compressed data.
|
| 115 |
+
_MASK_USE_DATA_DESCRIPTOR = 1 << 3
|
| 116 |
+
# Bit 4: Reserved for use with compression method 8, for enhanced deflating.
|
| 117 |
+
# _MASK_RESERVED_BIT_4 = 1 << 4
|
| 118 |
+
_MASK_COMPRESSED_PATCH = 1 << 5
|
| 119 |
+
_MASK_STRONG_ENCRYPTION = 1 << 6
|
| 120 |
+
# _MASK_UNUSED_BIT_7 = 1 << 7
|
| 121 |
+
# _MASK_UNUSED_BIT_8 = 1 << 8
|
| 122 |
+
# _MASK_UNUSED_BIT_9 = 1 << 9
|
| 123 |
+
# _MASK_UNUSED_BIT_10 = 1 << 10
|
| 124 |
+
_MASK_UTF_FILENAME = 1 << 11
|
| 125 |
+
# Bit 12: Reserved by PKWARE for enhanced compression.
|
| 126 |
+
# _MASK_RESERVED_BIT_12 = 1 << 12
|
| 127 |
+
# _MASK_ENCRYPTED_CENTRAL_DIR = 1 << 13
|
| 128 |
+
# Bit 14, 15: Reserved by PKWARE
|
| 129 |
+
# _MASK_RESERVED_BIT_14 = 1 << 14
|
| 130 |
+
# _MASK_RESERVED_BIT_15 = 1 << 15
|
| 131 |
+
|
| 132 |
+
# The "local file header" structure, magic number, size, and indices
|
| 133 |
+
# (section V.A in the format document)
|
| 134 |
+
structFileHeader = "<4s2B4HL2L2H"
|
| 135 |
+
stringFileHeader = b"PK\003\004"
|
| 136 |
+
sizeFileHeader = struct.calcsize(structFileHeader)
|
| 137 |
+
|
| 138 |
+
_FH_SIGNATURE = 0
|
| 139 |
+
_FH_EXTRACT_VERSION = 1
|
| 140 |
+
_FH_EXTRACT_SYSTEM = 2
|
| 141 |
+
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
|
| 142 |
+
_FH_COMPRESSION_METHOD = 4
|
| 143 |
+
_FH_LAST_MOD_TIME = 5
|
| 144 |
+
_FH_LAST_MOD_DATE = 6
|
| 145 |
+
_FH_CRC = 7
|
| 146 |
+
_FH_COMPRESSED_SIZE = 8
|
| 147 |
+
_FH_UNCOMPRESSED_SIZE = 9
|
| 148 |
+
_FH_FILENAME_LENGTH = 10
|
| 149 |
+
_FH_EXTRA_FIELD_LENGTH = 11
|
| 150 |
+
|
| 151 |
+
# The "Zip64 end of central directory locator" structure, magic number, and size
|
| 152 |
+
structEndArchive64Locator = "<4sLQL"
|
| 153 |
+
stringEndArchive64Locator = b"PK\x06\x07"
|
| 154 |
+
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
|
| 155 |
+
|
| 156 |
+
# The "Zip64 end of central directory" record, magic number, size, and indices
|
| 157 |
+
# (section V.G in the format document)
|
| 158 |
+
structEndArchive64 = "<4sQ2H2L4Q"
|
| 159 |
+
stringEndArchive64 = b"PK\x06\x06"
|
| 160 |
+
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
|
| 161 |
+
|
| 162 |
+
_CD64_SIGNATURE = 0
|
| 163 |
+
_CD64_DIRECTORY_RECSIZE = 1
|
| 164 |
+
_CD64_CREATE_VERSION = 2
|
| 165 |
+
_CD64_EXTRACT_VERSION = 3
|
| 166 |
+
_CD64_DISK_NUMBER = 4
|
| 167 |
+
_CD64_DISK_NUMBER_START = 5
|
| 168 |
+
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
|
| 169 |
+
_CD64_NUMBER_ENTRIES_TOTAL = 7
|
| 170 |
+
_CD64_DIRECTORY_SIZE = 8
|
| 171 |
+
_CD64_OFFSET_START_CENTDIR = 9
|
| 172 |
+
|
| 173 |
+
_DD_SIGNATURE = 0x08074b50
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _EndRecData64(fpin, offset, endrec):
|
| 177 |
+
"""
|
| 178 |
+
Read the ZIP64 end-of-archive records and use that to update endrec
|
| 179 |
+
"""
|
| 180 |
+
try:
|
| 181 |
+
fpin.seek(offset - sizeEndCentDir64Locator, 2)
|
| 182 |
+
except OSError:
|
| 183 |
+
# If the seek fails, the file is not large enough to contain a ZIP64
|
| 184 |
+
# end-of-archive record, so just return the end record we were given.
|
| 185 |
+
return endrec
|
| 186 |
+
|
| 187 |
+
data = fpin.read(sizeEndCentDir64Locator)
|
| 188 |
+
if len(data) != sizeEndCentDir64Locator:
|
| 189 |
+
return endrec
|
| 190 |
+
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
|
| 191 |
+
if sig != stringEndArchive64Locator:
|
| 192 |
+
return endrec
|
| 193 |
+
|
| 194 |
+
if diskno != 0 or disks > 1:
|
| 195 |
+
raise BadZipFile("zipfiles that span multiple disks are not supported")
|
| 196 |
+
|
| 197 |
+
# Assume no 'zip64 extensible data'
|
| 198 |
+
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
|
| 199 |
+
data = fpin.read(sizeEndCentDir64)
|
| 200 |
+
if len(data) != sizeEndCentDir64:
|
| 201 |
+
return endrec
|
| 202 |
+
sig, sz, _, read_version, disk_num, disk_dir, \
|
| 203 |
+
dircount, dircount2, dirsize, diroffset = \
|
| 204 |
+
struct.unpack(structEndArchive64, data)
|
| 205 |
+
if sig != stringEndArchive64:
|
| 206 |
+
return endrec
|
| 207 |
+
|
| 208 |
+
# Update the original endrec using data from the ZIP64 record
|
| 209 |
+
endrec[_ECD_SIGNATURE] = sig
|
| 210 |
+
endrec[_ECD_DISK_NUMBER] = disk_num
|
| 211 |
+
endrec[_ECD_DISK_START] = disk_dir
|
| 212 |
+
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
|
| 213 |
+
endrec[_ECD_ENTRIES_TOTAL] = dircount2
|
| 214 |
+
endrec[_ECD_SIZE] = dirsize
|
| 215 |
+
endrec[_ECD_OFFSET] = diroffset
|
| 216 |
+
return endrec
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def _EndRecData(fpin):
|
| 220 |
+
"""Return data from the "End of Central Directory" record, or None.
|
| 221 |
+
The data is a list of the nine items in the ZIP "End of central dir"
|
| 222 |
+
record followed by a tenth item, the file seek offset of this record."""
|
| 223 |
+
|
| 224 |
+
# Determine file size
|
| 225 |
+
fpin.seek(0, 2)
|
| 226 |
+
filesize = fpin.tell()
|
| 227 |
+
|
| 228 |
+
# Check to see if this is ZIP file with no archive comment (the
|
| 229 |
+
# "end of central directory" structure should be the last item in the
|
| 230 |
+
# file if this is the case).
|
| 231 |
+
try:
|
| 232 |
+
fpin.seek(-sizeEndCentDir, 2)
|
| 233 |
+
except OSError:
|
| 234 |
+
return None
|
| 235 |
+
data = fpin.read()
|
| 236 |
+
if (len(data) == sizeEndCentDir and
|
| 237 |
+
data[0:4] == stringEndArchive and
|
| 238 |
+
data[-2:] == b"\000\000"):
|
| 239 |
+
# the signature is correct and there's no comment, unpack structure
|
| 240 |
+
endrec = struct.unpack(structEndArchive, data)
|
| 241 |
+
endrec=list(endrec)
|
| 242 |
+
|
| 243 |
+
# Append a blank comment and record start offset
|
| 244 |
+
endrec.append(b"")
|
| 245 |
+
endrec.append(filesize - sizeEndCentDir)
|
| 246 |
+
|
| 247 |
+
# Try to read the "Zip64 end of central directory" structure
|
| 248 |
+
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
|
| 249 |
+
|
| 250 |
+
# Either this is not a ZIP file, or it is a ZIP file with an archive
|
| 251 |
+
# comment. Search the end of the file for the "end of central directory"
|
| 252 |
+
# record signature. The comment is the last item in the ZIP file and may be
|
| 253 |
+
# up to 64K long. It is assumed that the "end of central directory" magic
|
| 254 |
+
# number does not appear in the comment.
|
| 255 |
+
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
|
| 256 |
+
fpin.seek(maxCommentStart, 0)
|
| 257 |
+
data = fpin.read()
|
| 258 |
+
start = data.rfind(stringEndArchive)
|
| 259 |
+
if start >= 0:
|
| 260 |
+
# found the magic number; attempt to unpack and interpret
|
| 261 |
+
recData = data[start:start+sizeEndCentDir]
|
| 262 |
+
if len(recData) != sizeEndCentDir:
|
| 263 |
+
# Zip file is corrupted.
|
| 264 |
+
return None
|
| 265 |
+
endrec = list(struct.unpack(structEndArchive, recData))
|
| 266 |
+
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
|
| 267 |
+
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
|
| 268 |
+
endrec.append(comment)
|
| 269 |
+
endrec.append(maxCommentStart + start)
|
| 270 |
+
|
| 271 |
+
# Try to read the "Zip64 end of central directory" structure
|
| 272 |
+
return _EndRecData64(fpin, maxCommentStart + start - filesize,
|
| 273 |
+
endrec)
|
| 274 |
+
|
| 275 |
+
# Unable to find a valid end of central directory structure
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
class ZipInfo(object):
|
| 280 |
+
__slots__ = (
|
| 281 |
+
'filename',
|
| 282 |
+
'header_offset',
|
| 283 |
+
'CRC',
|
| 284 |
+
'compress_size',
|
| 285 |
+
'file_size',
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
def __init__(self, filename="NoName"):
|
| 289 |
+
self.filename = filename
|
| 290 |
+
self.compress_size = 0 # Size of the compressed file
|
| 291 |
+
self.file_size = 0 # Size of the uncompressed file
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _decodeExtra(self, extra):
|
| 295 |
+
# Try to decode the extra field.
|
| 296 |
+
unpack = struct.unpack
|
| 297 |
+
while len(extra) >= 4:
|
| 298 |
+
tp, ln = unpack('<HH', extra[:4])
|
| 299 |
+
if ln+4 > len(extra):
|
| 300 |
+
raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
|
| 301 |
+
if tp == 0x0001:
|
| 302 |
+
data = extra[4:ln+4]
|
| 303 |
+
# ZIP64 extension (large files and/or large archives)
|
| 304 |
+
try:
|
| 305 |
+
if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF):
|
| 306 |
+
field = "File size"
|
| 307 |
+
self.file_size, = unpack('<Q', data[:8])
|
| 308 |
+
data = data[8:]
|
| 309 |
+
if self.compress_size == 0xFFFF_FFFF:
|
| 310 |
+
field = "Compress size"
|
| 311 |
+
self.compress_size, = unpack('<Q', data[:8])
|
| 312 |
+
data = data[8:]
|
| 313 |
+
if self.header_offset == 0xFFFF_FFFF:
|
| 314 |
+
field = "Header offset"
|
| 315 |
+
self.header_offset, = unpack('<Q', data[:8])
|
| 316 |
+
except struct.error:
|
| 317 |
+
raise BadZipFile(f"Corrupt zip64 extra field. "
|
| 318 |
+
f"{field} not found.") from None
|
| 319 |
+
|
| 320 |
+
extra = extra[ln+4:]
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
class _SharedFile:
|
| 324 |
+
def __init__(self, file, pos, close, lock, writing):
|
| 325 |
+
self._file = file
|
| 326 |
+
self._pos = pos
|
| 327 |
+
self._close = close
|
| 328 |
+
self._lock = lock
|
| 329 |
+
self._writing = writing
|
| 330 |
+
self.seekable = file.seekable
|
| 331 |
+
|
| 332 |
+
def tell(self):
|
| 333 |
+
return self._pos
|
| 334 |
+
|
| 335 |
+
def seek(self, offset, whence=0):
|
| 336 |
+
with self._lock:
|
| 337 |
+
if self._writing():
|
| 338 |
+
raise ValueError("Can't reposition in the ZIP file while "
|
| 339 |
+
"there is an open writing handle on it. "
|
| 340 |
+
"Close the writing handle before trying to read.")
|
| 341 |
+
self._file.seek(offset, whence)
|
| 342 |
+
self._pos = self._file.tell()
|
| 343 |
+
return self._pos
|
| 344 |
+
|
| 345 |
+
def read(self, n=-1):
|
| 346 |
+
with self._lock:
|
| 347 |
+
if self._writing():
|
| 348 |
+
raise ValueError("Can't read from the ZIP file while there "
|
| 349 |
+
"is an open writing handle on it. "
|
| 350 |
+
"Close the writing handle before trying to read.")
|
| 351 |
+
self._file.seek(self._pos)
|
| 352 |
+
data = self._file.read(n)
|
| 353 |
+
self._pos = self._file.tell()
|
| 354 |
+
return data
|
| 355 |
+
|
| 356 |
+
def close(self):
|
| 357 |
+
if self._file is not None:
|
| 358 |
+
fileobj = self._file
|
| 359 |
+
self._file = None
|
| 360 |
+
self._close(fileobj)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
class ZipExtFile(io.BufferedIOBase):
|
| 364 |
+
"""File-like object for reading an archive member.
|
| 365 |
+
Is returned by ZipFile.open().
|
| 366 |
+
"""
|
| 367 |
+
|
| 368 |
+
# Max size supported by decompressor.
|
| 369 |
+
MAX_N = 1 << 31 - 1
|
| 370 |
+
|
| 371 |
+
# Read from compressed files in 4k blocks.
|
| 372 |
+
MIN_READ_SIZE = 4096
|
| 373 |
+
|
| 374 |
+
# Chunk size to read during seek
|
| 375 |
+
MAX_SEEK_READ = 1 << 24
|
| 376 |
+
|
| 377 |
+
def __init__(self, fileobj, mode, zipinfo, close_fileobj=False):
|
| 378 |
+
self._fileobj = fileobj
|
| 379 |
+
self._close_fileobj = close_fileobj
|
| 380 |
+
self._compress_left = zipinfo.compress_size
|
| 381 |
+
self._left = zipinfo.file_size
|
| 382 |
+
self._eof = False
|
| 383 |
+
self._readbuffer = b''
|
| 384 |
+
self._offset = 0
|
| 385 |
+
|
| 386 |
+
self.newlines = None
|
| 387 |
+
|
| 388 |
+
self.mode = mode
|
| 389 |
+
self.name = zipinfo.filename
|
| 390 |
+
|
| 391 |
+
if hasattr(zipinfo, 'CRC'):
|
| 392 |
+
self._expected_crc = zipinfo.CRC
|
| 393 |
+
self._running_crc = crc32(b'')
|
| 394 |
+
else:
|
| 395 |
+
self._expected_crc = None
|
| 396 |
+
|
| 397 |
+
self._seekable = False
|
| 398 |
+
try:
|
| 399 |
+
if fileobj.seekable():
|
| 400 |
+
self._orig_compress_start = fileobj.tell()
|
| 401 |
+
self._orig_compress_size = zipinfo.compress_size
|
| 402 |
+
self._orig_file_size = zipinfo.file_size
|
| 403 |
+
self._orig_start_crc = self._running_crc
|
| 404 |
+
self._seekable = True
|
| 405 |
+
except AttributeError:
|
| 406 |
+
pass
|
| 407 |
+
|
| 408 |
+
self._decrypter = None
|
| 409 |
+
|
| 410 |
+
def _init_decrypter(self):
|
| 411 |
+
raise NotImplementedError
|
| 412 |
+
|
| 413 |
+
def readline(self, limit=-1):
|
| 414 |
+
"""Read and return a line from the stream.
|
| 415 |
+
If limit is specified, at most limit bytes will be read.
|
| 416 |
+
"""
|
| 417 |
+
|
| 418 |
+
if limit < 0:
|
| 419 |
+
# Shortcut common case - newline found in buffer.
|
| 420 |
+
i = self._readbuffer.find(b'\n', self._offset) + 1
|
| 421 |
+
if i > 0:
|
| 422 |
+
line = self._readbuffer[self._offset: i]
|
| 423 |
+
self._offset = i
|
| 424 |
+
return line
|
| 425 |
+
|
| 426 |
+
return io.BufferedIOBase.readline(self, limit)
|
| 427 |
+
|
| 428 |
+
def peek(self, n=1):
|
| 429 |
+
"""Returns buffered bytes without advancing the position."""
|
| 430 |
+
if n > len(self._readbuffer) - self._offset:
|
| 431 |
+
chunk = self.read(n)
|
| 432 |
+
if len(chunk) > self._offset:
|
| 433 |
+
self._readbuffer = chunk + self._readbuffer[self._offset:]
|
| 434 |
+
self._offset = 0
|
| 435 |
+
else:
|
| 436 |
+
self._offset -= len(chunk)
|
| 437 |
+
|
| 438 |
+
# Return up to 512 bytes to reduce allocation overhead for tight loops.
|
| 439 |
+
return self._readbuffer[self._offset: self._offset + 512]
|
| 440 |
+
|
| 441 |
+
def readable(self):
|
| 442 |
+
if self.closed:
|
| 443 |
+
raise ValueError("I/O operation on closed file.")
|
| 444 |
+
return True
|
| 445 |
+
|
| 446 |
+
def read(self, n=-1):
|
| 447 |
+
"""Read and return up to n bytes.
|
| 448 |
+
If the argument is omitted, None, or negative, data is read and returned until EOF is reached.
|
| 449 |
+
"""
|
| 450 |
+
if self.closed:
|
| 451 |
+
raise ValueError("read from closed file.")
|
| 452 |
+
if n is None or n < 0:
|
| 453 |
+
buf = self._readbuffer[self._offset:]
|
| 454 |
+
self._readbuffer = b''
|
| 455 |
+
self._offset = 0
|
| 456 |
+
while not self._eof:
|
| 457 |
+
buf += self._read1(self.MAX_N)
|
| 458 |
+
return buf
|
| 459 |
+
|
| 460 |
+
end = n + self._offset
|
| 461 |
+
if end < len(self._readbuffer):
|
| 462 |
+
buf = self._readbuffer[self._offset:end]
|
| 463 |
+
self._offset = end
|
| 464 |
+
return buf
|
| 465 |
+
|
| 466 |
+
n = end - len(self._readbuffer)
|
| 467 |
+
buf = self._readbuffer[self._offset:]
|
| 468 |
+
self._readbuffer = b''
|
| 469 |
+
self._offset = 0
|
| 470 |
+
while n > 0 and not self._eof:
|
| 471 |
+
data = self._read1(n)
|
| 472 |
+
if n < len(data):
|
| 473 |
+
self._readbuffer = data
|
| 474 |
+
self._offset = n
|
| 475 |
+
buf += data[:n]
|
| 476 |
+
break
|
| 477 |
+
buf += data
|
| 478 |
+
n -= len(data)
|
| 479 |
+
return buf
|
| 480 |
+
|
| 481 |
+
def _update_crc(self, newdata):
|
| 482 |
+
# Update the CRC using the given data.
|
| 483 |
+
if self._expected_crc is None:
|
| 484 |
+
# No need to compute the CRC if we don't have a reference value
|
| 485 |
+
return
|
| 486 |
+
self._running_crc = crc32(newdata, self._running_crc)
|
| 487 |
+
# Check the CRC if we're at the end of the file
|
| 488 |
+
if self._eof and self._running_crc != self._expected_crc:
|
| 489 |
+
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
|
| 490 |
+
|
| 491 |
+
def read1(self, n):
|
| 492 |
+
"""Read up to n bytes with at most one read() system call."""
|
| 493 |
+
|
| 494 |
+
if n is None or n < 0:
|
| 495 |
+
buf = self._readbuffer[self._offset:]
|
| 496 |
+
self._readbuffer = b''
|
| 497 |
+
self._offset = 0
|
| 498 |
+
while not self._eof:
|
| 499 |
+
data = self._read1(self.MAX_N)
|
| 500 |
+
if data:
|
| 501 |
+
buf += data
|
| 502 |
+
break
|
| 503 |
+
return buf
|
| 504 |
+
|
| 505 |
+
end = n + self._offset
|
| 506 |
+
if end < len(self._readbuffer):
|
| 507 |
+
buf = self._readbuffer[self._offset:end]
|
| 508 |
+
self._offset = end
|
| 509 |
+
return buf
|
| 510 |
+
|
| 511 |
+
n = end - len(self._readbuffer)
|
| 512 |
+
buf = self._readbuffer[self._offset:]
|
| 513 |
+
self._readbuffer = b''
|
| 514 |
+
self._offset = 0
|
| 515 |
+
if n > 0:
|
| 516 |
+
while not self._eof:
|
| 517 |
+
data = self._read1(n)
|
| 518 |
+
if n < len(data):
|
| 519 |
+
self._readbuffer = data
|
| 520 |
+
self._offset = n
|
| 521 |
+
buf += data[:n]
|
| 522 |
+
break
|
| 523 |
+
if data:
|
| 524 |
+
buf += data
|
| 525 |
+
break
|
| 526 |
+
return buf
|
| 527 |
+
|
| 528 |
+
def _read1(self, n):
|
| 529 |
+
# Read up to n compressed bytes with at most one read() system call,
|
| 530 |
+
# decrypt and decompress them.
|
| 531 |
+
if self._eof or n <= 0:
|
| 532 |
+
return b''
|
| 533 |
+
|
| 534 |
+
# Read from file.
|
| 535 |
+
data = self._read2(n)
|
| 536 |
+
self._eof = self._compress_left <= 0
|
| 537 |
+
data = data[:self._left]
|
| 538 |
+
self._left -= len(data)
|
| 539 |
+
if self._left <= 0:
|
| 540 |
+
self._eof = True
|
| 541 |
+
self._update_crc(data)
|
| 542 |
+
return data
|
| 543 |
+
|
| 544 |
+
def _read2(self, n):
|
| 545 |
+
if self._compress_left <= 0:
|
| 546 |
+
return b''
|
| 547 |
+
|
| 548 |
+
n = max(n, self.MIN_READ_SIZE)
|
| 549 |
+
n = min(n, self._compress_left)
|
| 550 |
+
|
| 551 |
+
data = self._fileobj.read(n)
|
| 552 |
+
self._compress_left -= len(data)
|
| 553 |
+
if not data:
|
| 554 |
+
raise EOFError
|
| 555 |
+
|
| 556 |
+
if self._decrypter is not None:
|
| 557 |
+
data = self._decrypter(data)
|
| 558 |
+
return data
|
| 559 |
+
|
| 560 |
+
def close(self):
|
| 561 |
+
try:
|
| 562 |
+
if self._close_fileobj:
|
| 563 |
+
self._fileobj.close()
|
| 564 |
+
finally:
|
| 565 |
+
super().close()
|
| 566 |
+
|
| 567 |
+
def seekable(self):
|
| 568 |
+
if self.closed:
|
| 569 |
+
raise ValueError("I/O operation on closed file.")
|
| 570 |
+
return self._seekable
|
| 571 |
+
|
| 572 |
+
def seek(self, offset, whence=0):
|
| 573 |
+
if self.closed:
|
| 574 |
+
raise ValueError("seek on closed file.")
|
| 575 |
+
if not self._seekable:
|
| 576 |
+
raise io.UnsupportedOperation("underlying stream is not seekable")
|
| 577 |
+
curr_pos = self.tell()
|
| 578 |
+
if whence == 0: # Seek from start of file
|
| 579 |
+
new_pos = offset
|
| 580 |
+
elif whence == 1: # Seek from current position
|
| 581 |
+
new_pos = curr_pos + offset
|
| 582 |
+
elif whence == 2: # Seek from EOF
|
| 583 |
+
new_pos = self._orig_file_size + offset
|
| 584 |
+
else:
|
| 585 |
+
raise ValueError("whence must be os.SEEK_SET (0), "
|
| 586 |
+
"os.SEEK_CUR (1), or os.SEEK_END (2)")
|
| 587 |
+
|
| 588 |
+
if new_pos > self._orig_file_size:
|
| 589 |
+
new_pos = self._orig_file_size
|
| 590 |
+
|
| 591 |
+
if new_pos < 0:
|
| 592 |
+
new_pos = 0
|
| 593 |
+
|
| 594 |
+
read_offset = new_pos - curr_pos
|
| 595 |
+
buff_offset = read_offset + self._offset
|
| 596 |
+
|
| 597 |
+
if buff_offset >= 0 and buff_offset < len(self._readbuffer):
|
| 598 |
+
# Just move the _offset index if the new position is in the _readbuffer
|
| 599 |
+
self._offset = buff_offset
|
| 600 |
+
read_offset = 0
|
| 601 |
+
elif read_offset < 0:
|
| 602 |
+
# Position is before the current position. Reset the ZipExtFile
|
| 603 |
+
self._fileobj.seek(self._orig_compress_start)
|
| 604 |
+
self._running_crc = self._orig_start_crc
|
| 605 |
+
self._compress_left = self._orig_compress_size
|
| 606 |
+
self._left = self._orig_file_size
|
| 607 |
+
self._readbuffer = b''
|
| 608 |
+
self._offset = 0
|
| 609 |
+
self._eof = False
|
| 610 |
+
read_offset = new_pos
|
| 611 |
+
if self._decrypter is not None:
|
| 612 |
+
self._init_decrypter()
|
| 613 |
+
|
| 614 |
+
while read_offset > 0:
|
| 615 |
+
read_len = min(self.MAX_SEEK_READ, read_offset)
|
| 616 |
+
self.read(read_len)
|
| 617 |
+
read_offset -= read_len
|
| 618 |
+
|
| 619 |
+
return self.tell()
|
| 620 |
+
|
| 621 |
+
def tell(self):
|
| 622 |
+
if self.closed:
|
| 623 |
+
raise ValueError("tell on closed file.")
|
| 624 |
+
if not self._seekable:
|
| 625 |
+
raise io.UnsupportedOperation("underlying stream is not seekable")
|
| 626 |
+
filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
|
| 627 |
+
return filepos
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
class SlimZipFile:
|
| 631 |
+
fp = None # Set here since __del__ checks it
|
| 632 |
+
|
| 633 |
+
def __init__(self, file: str, mode="r", allowZip64=True, compresslevel=None):
|
| 634 |
+
if mode not in ('r',):
|
| 635 |
+
raise ValueError("ZipFile requires mode 'r'")
|
| 636 |
+
|
| 637 |
+
self._allowZip64 = allowZip64
|
| 638 |
+
self._didModify = False
|
| 639 |
+
self.debug = 0 # Level of printing: 0 through 3
|
| 640 |
+
self.NameToInfo = {} # Find file info given name
|
| 641 |
+
self.mode = mode
|
| 642 |
+
self.pwd = None
|
| 643 |
+
self._comment = b''
|
| 644 |
+
|
| 645 |
+
self._filePassed = 0
|
| 646 |
+
self.filename = file
|
| 647 |
+
if file.startswith("s3://"):
|
| 648 |
+
assert mox is not None, f"File path starts with s3:// {file}, you need to run on Modelarts!!"
|
| 649 |
+
self.fp = mox.file.File(file, "rb")
|
| 650 |
+
else:
|
| 651 |
+
self.fp = io.open(file, "rb")
|
| 652 |
+
self._fileRefCnt = 1
|
| 653 |
+
self._lock = threading.RLock()
|
| 654 |
+
self._seekable = True
|
| 655 |
+
self._writing = False
|
| 656 |
+
self._RealGetContents()
|
| 657 |
+
|
| 658 |
+
def __enter__(self):
|
| 659 |
+
return self
|
| 660 |
+
|
| 661 |
+
def __exit__(self, type, value, traceback):
|
| 662 |
+
self.close()
|
| 663 |
+
|
| 664 |
+
def _RealGetContents(self):
|
| 665 |
+
"""Read in the table of contents for the ZIP file."""
|
| 666 |
+
fp = self.fp
|
| 667 |
+
try:
|
| 668 |
+
endrec = _EndRecData(fp)
|
| 669 |
+
except OSError:
|
| 670 |
+
raise BadZipFile("File is not a zip file")
|
| 671 |
+
if not endrec:
|
| 672 |
+
raise BadZipFile("File is not a zip file")
|
| 673 |
+
|
| 674 |
+
size_cd = endrec[_ECD_SIZE] # bytes in central directory
|
| 675 |
+
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
|
| 676 |
+
self._comment = endrec[_ECD_COMMENT] # archive comment
|
| 677 |
+
|
| 678 |
+
# "concat" is zero, unless zip was concatenated to another file
|
| 679 |
+
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
|
| 680 |
+
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
|
| 681 |
+
# If Zip64 extension structures are present, account for them
|
| 682 |
+
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
|
| 683 |
+
|
| 684 |
+
# self.start_dir: Position of start of central directory
|
| 685 |
+
self.start_dir = offset_cd + concat
|
| 686 |
+
fp.seek(self.start_dir, 0)
|
| 687 |
+
data = fp.read(size_cd)
|
| 688 |
+
fp = io.BytesIO(data)
|
| 689 |
+
total = 0
|
| 690 |
+
while total < size_cd:
|
| 691 |
+
centdir = fp.read(sizeCentralDir)
|
| 692 |
+
if len(centdir) != sizeCentralDir:
|
| 693 |
+
raise BadZipFile("Truncated central directory")
|
| 694 |
+
centdir = struct.unpack(structCentralDir, centdir)
|
| 695 |
+
if centdir[_CD_SIGNATURE] != stringCentralDir:
|
| 696 |
+
raise BadZipFile("Bad magic number for central directory")
|
| 697 |
+
|
| 698 |
+
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
|
| 699 |
+
flags = centdir[_CD_FLAG_BITS]
|
| 700 |
+
if flags & _MASK_UTF_FILENAME:
|
| 701 |
+
# UTF-8 file names extension
|
| 702 |
+
filename = filename.decode('utf-8')
|
| 703 |
+
else:
|
| 704 |
+
# Historical ZIP filename encoding
|
| 705 |
+
filename = filename.decode('cp437')
|
| 706 |
+
# Create ZipInfo instance to store file information
|
| 707 |
+
x = ZipInfo(filename)
|
| 708 |
+
extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
|
| 709 |
+
_ = fp.read(centdir[_CD_COMMENT_LENGTH]) # x.comment
|
| 710 |
+
|
| 711 |
+
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
|
| 712 |
+
(_, _, extract_version, _, _, _, _, _, x.CRC, x.compress_size, x.file_size) = centdir[1:12]
|
| 713 |
+
if extract_version > MAX_EXTRACT_VERSION:
|
| 714 |
+
raise NotImplementedError("zip file version %.1f" % (extract_version / 10))
|
| 715 |
+
_, _, _ = centdir[15:18]
|
| 716 |
+
|
| 717 |
+
x._decodeExtra(extra)
|
| 718 |
+
x.header_offset = x.header_offset + concat
|
| 719 |
+
self.NameToInfo[x.filename] = x
|
| 720 |
+
|
| 721 |
+
# update total bytes read from central directory
|
| 722 |
+
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
|
| 723 |
+
+ centdir[_CD_EXTRA_FIELD_LENGTH]
|
| 724 |
+
+ centdir[_CD_COMMENT_LENGTH])
|
| 725 |
+
|
| 726 |
+
# from pympler import asizeof
|
| 727 |
+
# print("size of x = {:.6f}GB".format(asizeof.asizeof(self.NameToInfo) / 1024. / 1024. / 1024.))
|
| 728 |
+
|
| 729 |
+
def namelist(self):
|
| 730 |
+
return list(self.NameToInfo.keys())
|
| 731 |
+
|
| 732 |
+
def getinfo(self, name):
|
| 733 |
+
info = self.NameToInfo.get(name)
|
| 734 |
+
if info is None:
|
| 735 |
+
raise KeyError(
|
| 736 |
+
'There is no item named %r in the archive' % name)
|
| 737 |
+
|
| 738 |
+
return info
|
| 739 |
+
|
| 740 |
+
def read(self, name, pwd=None):
|
| 741 |
+
"""Return file bytes for name."""
|
| 742 |
+
with self.open(name, "r", pwd) as fp:
|
| 743 |
+
return fp.read()
|
| 744 |
+
|
| 745 |
+
def open(self, name, mode="r", *, force_zip64=False):
|
| 746 |
+
if mode not in {"r", }:
|
| 747 |
+
raise ValueError('open() requires mode "r"')
|
| 748 |
+
|
| 749 |
+
if not self.fp:
|
| 750 |
+
raise ValueError(
|
| 751 |
+
"Attempt to use ZIP archive that was already closed")
|
| 752 |
+
|
| 753 |
+
# Make sure we have an info object
|
| 754 |
+
zinfo = self.getinfo(name)
|
| 755 |
+
|
| 756 |
+
# Open for reading:
|
| 757 |
+
self._fileRefCnt += 1
|
| 758 |
+
zef_file = _SharedFile(self.fp, zinfo.header_offset,
|
| 759 |
+
self._fpclose, self._lock, lambda: self._writing)
|
| 760 |
+
try:
|
| 761 |
+
# Skip the file header:
|
| 762 |
+
fheader = zef_file.read(sizeFileHeader)
|
| 763 |
+
if len(fheader) != sizeFileHeader:
|
| 764 |
+
raise BadZipFile("Truncated file header")
|
| 765 |
+
fheader = struct.unpack(structFileHeader, fheader)
|
| 766 |
+
if fheader[_FH_SIGNATURE] != stringFileHeader:
|
| 767 |
+
raise BadZipFile("Bad magic number for file header")
|
| 768 |
+
|
| 769 |
+
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
|
| 770 |
+
if fheader[_FH_EXTRA_FIELD_LENGTH]:
|
| 771 |
+
zef_file.seek(fheader[_FH_EXTRA_FIELD_LENGTH], whence=1)
|
| 772 |
+
|
| 773 |
+
if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & _MASK_UTF_FILENAME:
|
| 774 |
+
# UTF-8 filename
|
| 775 |
+
fname_str = fname.decode("utf-8")
|
| 776 |
+
else:
|
| 777 |
+
fname_str = fname.decode("cp437")
|
| 778 |
+
|
| 779 |
+
if fname_str != zinfo.filename:
|
| 780 |
+
raise BadZipFile(
|
| 781 |
+
'File name in directory %r and header %r differ.'
|
| 782 |
+
% (zinfo.filename, fname))
|
| 783 |
+
|
| 784 |
+
return ZipExtFile(zef_file, mode, zinfo, True)
|
| 785 |
+
except:
|
| 786 |
+
zef_file.close()
|
| 787 |
+
raise
|
| 788 |
+
|
| 789 |
+
def __del__(self):
|
| 790 |
+
"""Call the "close()" method in case the user forgot."""
|
| 791 |
+
self.close()
|
| 792 |
+
|
| 793 |
+
def close(self):
|
| 794 |
+
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
|
| 795 |
+
records."""
|
| 796 |
+
if self.fp is None:
|
| 797 |
+
return
|
| 798 |
+
|
| 799 |
+
fp = self.fp
|
| 800 |
+
self.fp = None
|
| 801 |
+
self._fpclose(fp)
|
| 802 |
+
|
| 803 |
+
def _write_end_record(self):
|
| 804 |
+
raise NotImplementedError
|
| 805 |
+
|
| 806 |
+
def _fpclose(self, fp):
|
| 807 |
+
assert self._fileRefCnt > 0
|
| 808 |
+
self._fileRefCnt -= 1
|
| 809 |
+
if not self._fileRefCnt and not self._filePassed:
|
| 810 |
+
fp.close()
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/t2i_dataset.py
ADDED
|
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import math
|
| 3 |
+
import re
|
| 4 |
+
import random
|
| 5 |
+
from copy import deepcopy
|
| 6 |
+
import time
|
| 7 |
+
import pickle
|
| 8 |
+
import yaml
|
| 9 |
+
import cv2
|
| 10 |
+
from PIL import Image, PngImagePlugin, ImageFile
|
| 11 |
+
import PIL
|
| 12 |
+
import zipfile
|
| 13 |
+
import tarfile
|
| 14 |
+
|
| 15 |
+
Image.MAX_IMAGE_PIXELS = 933120000
|
| 16 |
+
PngImagePlugin.MAX_TEXT_CHUNK = 1024 * 2**20
|
| 17 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 18 |
+
|
| 19 |
+
import pandas
|
| 20 |
+
import numpy as np
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
from torch.utils import data
|
| 24 |
+
|
| 25 |
+
# temp
|
| 26 |
+
import os, sys
|
| 27 |
+
sys.path.append(os.getcwd())
|
| 28 |
+
|
| 29 |
+
from mimogpt.datasets.utils.ordered_yaml import ordered_yaml
|
| 30 |
+
from mimogpt.datasets.slim_zipfile2 import SlimZipFile
|
| 31 |
+
# from zipfile import ZipFile
|
| 32 |
+
# from zipfile2 import ZipFile
|
| 33 |
+
from mimogpt.datasets.utils.checker import DataChecker
|
| 34 |
+
|
| 35 |
+
def pick_available_prompt(item, keys):
|
| 36 |
+
pattern = r'[^\u0000-\u007F]'
|
| 37 |
+
for key in keys:
|
| 38 |
+
if item[key] != '#':
|
| 39 |
+
if re.search(pattern, item[key]):
|
| 40 |
+
continue
|
| 41 |
+
return {key: item[key]}
|
| 42 |
+
return []
|
| 43 |
+
|
| 44 |
+
def random_pick_available_prompt(item, keys):
|
| 45 |
+
pattern = r'[^\u0000-\u007F]'
|
| 46 |
+
prompts = {}
|
| 47 |
+
for key in keys:
|
| 48 |
+
if item[key] not in ['#', '']:
|
| 49 |
+
if re.search(pattern, item[key]):
|
| 50 |
+
continue
|
| 51 |
+
prompts[key] = item[key]
|
| 52 |
+
|
| 53 |
+
return prompts
|
| 54 |
+
|
| 55 |
+
def random_pick_imagenet_ablation(item, key):
|
| 56 |
+
pattern = r'[^\u0000-\u007F]'
|
| 57 |
+
prompts = {}
|
| 58 |
+
|
| 59 |
+
if item[key] in ['#', '']:
|
| 60 |
+
raise NotImplementedError
|
| 61 |
+
if re.search(pattern, item[key]):
|
| 62 |
+
raise NotImplementedError
|
| 63 |
+
|
| 64 |
+
synset = [prompt.strip(' ') for prompt in item[key].split(',')]
|
| 65 |
+
for i in range(len(synset)):
|
| 66 |
+
prompts[str(i)] = synset[i]
|
| 67 |
+
|
| 68 |
+
return prompts
|
| 69 |
+
|
| 70 |
+
def mox_copy(src, dst, unzip=False, delete_zip=False, threads=1, is_processing=False):
|
| 71 |
+
import moxing as mox
|
| 72 |
+
mox.file.copy_parallel(src, dst, threads=threads, is_processing=is_processing)
|
| 73 |
+
if unzip:
|
| 74 |
+
assert dst.endswith('.zip')
|
| 75 |
+
unzip_dst = os.path.abspath(dst.replace('.zip', ''))
|
| 76 |
+
if not os.path.exists(unzip_dst):
|
| 77 |
+
zip_file = zipfile.ZipFile(dst, 'r')
|
| 78 |
+
for f in zip_file.namelist():
|
| 79 |
+
zip_file.extract(f, unzip_dst)
|
| 80 |
+
zip_file.close()
|
| 81 |
+
if delete_zip:
|
| 82 |
+
os.remove(dst)
|
| 83 |
+
|
| 84 |
+
def identity(a):
|
| 85 |
+
return a
|
| 86 |
+
|
| 87 |
+
def identity2(a, b):
|
| 88 |
+
return a
|
| 89 |
+
|
| 90 |
+
class Base(data.Dataset):
|
| 91 |
+
def __init__(self, opt):
|
| 92 |
+
super(Base, self).__init__()
|
| 93 |
+
self.checker = DataChecker(opt)
|
| 94 |
+
|
| 95 |
+
self.opt = opt['dataset']
|
| 96 |
+
self.rank = opt['rank']
|
| 97 |
+
self.replicas = opt['world_size']
|
| 98 |
+
self.batch_size = opt['batch_size']
|
| 99 |
+
self.enlarge_ratio = opt.dataset['enlarge_ratio']
|
| 100 |
+
assert self.rank < self.replicas
|
| 101 |
+
|
| 102 |
+
self.load_t5_cache = self.opt['load_t5_cache']
|
| 103 |
+
self.prompt_fn = random_pick_available_prompt if self.opt['prompt_augmentation'] else pick_available_prompt
|
| 104 |
+
self.prompts_randomless = self.opt.get('prompts_randomless', False)
|
| 105 |
+
|
| 106 |
+
self.sync_wait_time = self.opt.get('sync_wait_time', 1)
|
| 107 |
+
self.sync_wait_count = self.rank
|
| 108 |
+
self.sync_wait_ratio = self.opt.get('sync_wait_ratio', 1)
|
| 109 |
+
self._load_meta_info()
|
| 110 |
+
|
| 111 |
+
self.low_res_list = [] if 'low_res_list' not in self.opt else self.opt['low_res_list']
|
| 112 |
+
self.max_size = self.opt['max_size']
|
| 113 |
+
self.resize_f = self.opt['resize_f']
|
| 114 |
+
if self.opt['preprocess_func'] == 'identity':
|
| 115 |
+
self.preprocess_img_func = identity
|
| 116 |
+
elif self.opt['preprocess_func'] == 'resize_max':
|
| 117 |
+
self.preprocess_img_func = self.resize_max
|
| 118 |
+
elif self.opt['preprocess_func'] == 'center_crop':
|
| 119 |
+
self.preprocess_img_func = self.center_crop
|
| 120 |
+
else:
|
| 121 |
+
raise NotImplementedError
|
| 122 |
+
|
| 123 |
+
self.enable_data_warmup = self.opt.get('enable_data_warmup', False)
|
| 124 |
+
|
| 125 |
+
def _sync_data(self, pack):
|
| 126 |
+
if pack['sync_data'] and not os.path.exists(pack['zip']):
|
| 127 |
+
s3_path = os.path.join(pack['s3_zip_root'], os.path.basename(pack['zip']))
|
| 128 |
+
os.makedirs(os.path.dirname(pack['zip']), exist_ok=True)
|
| 129 |
+
print(f"Mox copy {s3_path} to {pack['zip']}")
|
| 130 |
+
remain = self.sync_wait_count % self.sync_wait_ratio
|
| 131 |
+
time.sleep(self.sync_wait_time * remain)
|
| 132 |
+
# mox_copy(s3_path, pack['zip'])
|
| 133 |
+
mox_copy(s3_path, pack['zip'], True)
|
| 134 |
+
self.sync_wait_count += 1
|
| 135 |
+
|
| 136 |
+
def get_balance_num(self, x, target, a=1, b=5):
|
| 137 |
+
ratio = x / target
|
| 138 |
+
return int(min(max(a, ratio), b))
|
| 139 |
+
|
| 140 |
+
def _load_meta_info(self):
|
| 141 |
+
self.packs = []
|
| 142 |
+
self._len = 0
|
| 143 |
+
with open(self.opt['meta_info'], mode='r') as f:
|
| 144 |
+
Loader, _ = ordered_yaml()
|
| 145 |
+
data_opt = yaml.load(f, Loader=Loader)
|
| 146 |
+
|
| 147 |
+
for info in data_opt['info']:
|
| 148 |
+
if len(info) == 10:
|
| 149 |
+
parquet_root, zip_root, t5_root, file_list, sync_data, \
|
| 150 |
+
s3_zip_root, is_cfg, duplicate, prompt_keys, dataset_name = info
|
| 151 |
+
balanced = 0
|
| 152 |
+
elif len(info) == 11:
|
| 153 |
+
parquet_root, zip_root, t5_root, file_list, sync_data, \
|
| 154 |
+
s3_zip_root, is_cfg, duplicate, prompt_keys, balanced, dataset_name = info
|
| 155 |
+
else:
|
| 156 |
+
raise NotImplementedError
|
| 157 |
+
|
| 158 |
+
duplicate = int(duplicate)
|
| 159 |
+
with open(file_list, 'r') as f:
|
| 160 |
+
parquet_list = f.readlines()
|
| 161 |
+
for parquet_name in parquet_list:
|
| 162 |
+
|
| 163 |
+
parquet_name, parquet_len = parquet_name.strip('\n').split(' ')
|
| 164 |
+
parquet_path = os.path.join(parquet_root, parquet_name)
|
| 165 |
+
|
| 166 |
+
if dataset_name in ['Imagenet-1K-ablation']:
|
| 167 |
+
zip_path = os.path.join(zip_root, parquet_name.split('-')[0] + '.tar')
|
| 168 |
+
elif dataset_name in ['imagenet_untar']:
|
| 169 |
+
parquet2zip_name_tmp = parquet_name[:2]+'_'+parquet_name[2:]
|
| 170 |
+
zip_path = os.path.join(zip_root, parquet2zip_name_tmp.split('-')[0] + '.zip')
|
| 171 |
+
else:
|
| 172 |
+
zip_path = os.path.join(zip_root, parquet_name.replace('.parquet', '.zip'))
|
| 173 |
+
|
| 174 |
+
t5_path = os.path.join(t5_root, parquet_name.replace('.parquet', '')) if t5_root is not None else None
|
| 175 |
+
|
| 176 |
+
if balanced > 0:
|
| 177 |
+
pack_num = 0
|
| 178 |
+
balanced_list = []
|
| 179 |
+
parquet = pandas.read_parquet(parquet_path)
|
| 180 |
+
for i in range(parquet.shape[0]):
|
| 181 |
+
cur_num = self.get_balance_num(int(parquet.iloc[i]['tag_count']), balanced)
|
| 182 |
+
pack_num += cur_num
|
| 183 |
+
balanced_list.extend([i] * cur_num)
|
| 184 |
+
assert len(balanced_list) == pack_num
|
| 185 |
+
pack_dic = {
|
| 186 |
+
'parquet': parquet_path,
|
| 187 |
+
'zip': zip_path,
|
| 188 |
+
't5': t5_path,
|
| 189 |
+
'cfg': is_cfg,
|
| 190 |
+
'num': pack_num * duplicate,
|
| 191 |
+
'sync_data': sync_data,
|
| 192 |
+
's3_zip_root': s3_zip_root,
|
| 193 |
+
'duplicate': duplicate,
|
| 194 |
+
'prompt_keys': prompt_keys,
|
| 195 |
+
'dataset_name': dataset_name,
|
| 196 |
+
'balanced': balanced,
|
| 197 |
+
'balanced_list': balanced_list
|
| 198 |
+
}
|
| 199 |
+
else:
|
| 200 |
+
pack_dic = {
|
| 201 |
+
'parquet': parquet_path,
|
| 202 |
+
'zip': zip_path,
|
| 203 |
+
't5': t5_path,
|
| 204 |
+
'cfg': is_cfg,
|
| 205 |
+
'num': int(parquet_len) * duplicate,
|
| 206 |
+
'sync_data': sync_data,
|
| 207 |
+
's3_zip_root': s3_zip_root,
|
| 208 |
+
'duplicate': duplicate,
|
| 209 |
+
'prompt_keys': prompt_keys,
|
| 210 |
+
'dataset_name': dataset_name,
|
| 211 |
+
'balanced': balanced,
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
self.packs.append(pack_dic)
|
| 215 |
+
self._len += pack_dic['num']
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
self.paths = []
|
| 219 |
+
self.zip_fn = {}
|
| 220 |
+
self.t5_fn = {}
|
| 221 |
+
|
| 222 |
+
# self.num_sample_per_rank = math.ceil(self._len / self.replicas)
|
| 223 |
+
# assert self.num_sample_per_rank > 0
|
| 224 |
+
# start_idx = self.num_sample_per_rank * self.rank
|
| 225 |
+
# end_idx = start_idx + self.num_sample_per_rank
|
| 226 |
+
|
| 227 |
+
self.num_sample_per_rank = self._len // self.replicas
|
| 228 |
+
num_redundant_rank = self._len % self.replicas
|
| 229 |
+
assert self.num_sample_per_rank > 0
|
| 230 |
+
if num_redundant_rank > self.rank:
|
| 231 |
+
start_idx = (self.num_sample_per_rank + 1) * self.rank
|
| 232 |
+
end_idx = start_idx + self.num_sample_per_rank + 1
|
| 233 |
+
else:
|
| 234 |
+
start_idx = num_redundant_rank + self.num_sample_per_rank * self.rank
|
| 235 |
+
end_idx = start_idx + self.num_sample_per_rank
|
| 236 |
+
self.num_sample_per_rank = math.ceil(self._len / self.replicas)
|
| 237 |
+
|
| 238 |
+
pack_g = torch.Generator()
|
| 239 |
+
pack_g.manual_seed(3407)
|
| 240 |
+
shuffle_pack_index = torch.randperm(len(self.packs), generator=pack_g).tolist()
|
| 241 |
+
# print(shuffle_pack_index)
|
| 242 |
+
self.packs = [self.packs[i] for i in shuffle_pack_index]
|
| 243 |
+
|
| 244 |
+
for i, pack in enumerate(self.packs):
|
| 245 |
+
if end_idx <= 0:
|
| 246 |
+
break
|
| 247 |
+
if start_idx < pack['num']:
|
| 248 |
+
if pack['dataset_name'] in ['7M_human_open', '7M_human_vertical']:
|
| 249 |
+
suffix = '.jpg'
|
| 250 |
+
else:
|
| 251 |
+
suffix = ''
|
| 252 |
+
self._sync_data(pack)
|
| 253 |
+
try:
|
| 254 |
+
parquet = pandas.read_parquet(pack['parquet'])
|
| 255 |
+
except Exception as e:
|
| 256 |
+
print(f'Error: {e}, parquet_path: {pack["parquet"]}')
|
| 257 |
+
|
| 258 |
+
for ii in range(max(0, start_idx), min(pack['num'], end_idx)):
|
| 259 |
+
id_key = 'ID'
|
| 260 |
+
# cheat
|
| 261 |
+
ii = ii // pack['duplicate']
|
| 262 |
+
if pack['balanced'] > 0:
|
| 263 |
+
# cheat of cheat
|
| 264 |
+
ii = pack['balanced_list'][ii]
|
| 265 |
+
|
| 266 |
+
# if pack['dataset_name'] in ['Imagenet-1K-ablation']:
|
| 267 |
+
if pack['dataset_name'] in ['Imagenet-1K-ablation'] or pack['dataset_name'] in ['imagenet_untar']:
|
| 268 |
+
prompt = random_pick_imagenet_ablation(parquet.iloc[ii], "origin_caption")
|
| 269 |
+
id_key = 'origin_ID1'
|
| 270 |
+
else:
|
| 271 |
+
prompt = self.prompt_fn(parquet.iloc[ii], pack["prompt_keys"])
|
| 272 |
+
if len(prompt) == 0:
|
| 273 |
+
continue
|
| 274 |
+
single_dict = {
|
| 275 |
+
'img_path': str(parquet.iloc[ii][id_key] + suffix),
|
| 276 |
+
'prompt': prompt,
|
| 277 |
+
'cfg': pack['cfg'],
|
| 278 |
+
't5': pack['t5'],
|
| 279 |
+
'zip': pack['zip'],
|
| 280 |
+
'dataset_name': pack['dataset_name'],
|
| 281 |
+
'parquet': pack["parquet"].split("/")[-1].replace('.parquet', ''),
|
| 282 |
+
}
|
| 283 |
+
if self.prompts_randomless:
|
| 284 |
+
single_dict['full_prompt'] = deepcopy(single_dict['prompt'])
|
| 285 |
+
|
| 286 |
+
self.paths.append(single_dict)
|
| 287 |
+
|
| 288 |
+
start_idx -= pack['num']
|
| 289 |
+
end_idx -= pack['num']
|
| 290 |
+
|
| 291 |
+
del self.packs
|
| 292 |
+
random.shuffle(self.paths)
|
| 293 |
+
print(f'Base dataloader [rank{self.rank}], length: {len(self.paths)}, Total length: {len(self)}')
|
| 294 |
+
|
| 295 |
+
def _read_img(self, dic):
|
| 296 |
+
if dic['zip'] not in self.zip_fn:
|
| 297 |
+
if dic['zip'].endswith('.tar'):
|
| 298 |
+
self.zip_fn[dic['zip']] = tarfile.open(dic['zip'], 'r')
|
| 299 |
+
else:
|
| 300 |
+
# self.zip_fn[dic['zip']] = SlimZipFile(dic['zip'])
|
| 301 |
+
self.zip_fn[dic['zip']] = zipfile.ZipFile(dic['zip'])
|
| 302 |
+
|
| 303 |
+
if dic['zip'].endswith('.tar'):
|
| 304 |
+
f = self.zip_fn[dic['zip']].extractfile(self.zip_fn[dic['zip']].getmember(dic['img_path']))
|
| 305 |
+
else:
|
| 306 |
+
f = self.zip_fn[dic['zip']].open(dic['img_path'], 'r')
|
| 307 |
+
|
| 308 |
+
img = np.ascontiguousarray(Image.open(f).convert('RGB')).astype(np.float32)
|
| 309 |
+
return self.preprocess_img_func(img)
|
| 310 |
+
|
| 311 |
+
def resize_max(self, img):
|
| 312 |
+
h, w = img.shape[:2]
|
| 313 |
+
reduce_factor = max(1, (h * w / self.max_size ** 2) ** 0.5)
|
| 314 |
+
w = ((w // reduce_factor) // self.resize_f) * self.resize_f
|
| 315 |
+
h = ((h // reduce_factor) // self.resize_f) * self.resize_f
|
| 316 |
+
ratio = h / w
|
| 317 |
+
img = cv2.resize(img, (int(w), int(h)), interpolation=cv2.INTER_AREA)
|
| 318 |
+
|
| 319 |
+
if ratio > 2 or ratio < 0.5:
|
| 320 |
+
return False, img
|
| 321 |
+
else:
|
| 322 |
+
return True, img
|
| 323 |
+
|
| 324 |
+
def center_crop(self, img):
|
| 325 |
+
h, w = img.shape[0:2]
|
| 326 |
+
size = min(h, w)
|
| 327 |
+
ratio = h / w
|
| 328 |
+
|
| 329 |
+
h = (h - size) // 2
|
| 330 |
+
w = (w - size) // 2
|
| 331 |
+
img = img[h:h + size, w:w + size, ...]
|
| 332 |
+
img = cv2.resize(img, (self.max_size, self.max_size), interpolation=cv2.INTER_AREA)
|
| 333 |
+
|
| 334 |
+
# if (size < self.max_size) or (ratio > 2) or (ratio < 0.5): # will be a bug if use small size images to train, will filter them out
|
| 335 |
+
if (ratio > 2) or (ratio < 0.5):
|
| 336 |
+
return False, img
|
| 337 |
+
elif (size < self.max_size) and (self.max_size<=1024):
|
| 338 |
+
return False, img
|
| 339 |
+
else:
|
| 340 |
+
return True, img
|
| 341 |
+
|
| 342 |
+
def _process(self, dic):
|
| 343 |
+
retry_image = 0
|
| 344 |
+
prompt_key: str = random.choice(list(dic['prompt'].keys()))
|
| 345 |
+
while retry_image <= 3:
|
| 346 |
+
if dic['t5'] is not None and self.load_t5_cache:
|
| 347 |
+
# if dic['t5'] not in self.t5_fn:
|
| 348 |
+
# self.t5_fn[dic['t5']] = SlimZipFile(dic['t5'])
|
| 349 |
+
# with self.t5_fn[dic['t5']].open(dic['img_path'] + '.pkl', 'r') as f:
|
| 350 |
+
# t5_tensor = pickle.load(f)['T5_feature']
|
| 351 |
+
retry = 0
|
| 352 |
+
t5_tensor = None
|
| 353 |
+
while retry <= 3:
|
| 354 |
+
t5_path = os.path.join(dic['t5'], prompt_key + ".zip", dic['img_path'] + '.pkl')
|
| 355 |
+
try:
|
| 356 |
+
t5_tensor = torch.load(t5_path, map_location='cpu')
|
| 357 |
+
filter_max_token_len = self.opt['filter_max_token_len']
|
| 358 |
+
if t5_tensor.shape[1] <= filter_max_token_len:
|
| 359 |
+
break
|
| 360 |
+
except Exception as e:
|
| 361 |
+
print(f'Error: {e}, t5_path: {t5_path}, retry time {retry}')
|
| 362 |
+
pass
|
| 363 |
+
index = random.randint(0, len(self.paths) - 1)
|
| 364 |
+
dic = self.paths[index]
|
| 365 |
+
retry += 1
|
| 366 |
+
else:
|
| 367 |
+
t5_tensor = None
|
| 368 |
+
|
| 369 |
+
try:
|
| 370 |
+
is_pass, img = self._read_img(dic)
|
| 371 |
+
except Exception as e:
|
| 372 |
+
print(f'Error: {e}, img_path: {dic["img_path"]}, zip_path: {dic["zip"]}')
|
| 373 |
+
is_pass = False
|
| 374 |
+
|
| 375 |
+
if is_pass:
|
| 376 |
+
break
|
| 377 |
+
else:
|
| 378 |
+
index = random.randint(0, len(self.paths) - 1)
|
| 379 |
+
dic = self.paths[index]
|
| 380 |
+
prompt_key = random.choice(list(dic['prompt'].keys()))
|
| 381 |
+
retry_image += 1
|
| 382 |
+
|
| 383 |
+
# generate original and low resolution
|
| 384 |
+
ret = {}
|
| 385 |
+
original_img = img
|
| 386 |
+
for res in self.low_res_list:
|
| 387 |
+
low_img = cv2.resize(original_img, (res, res), interpolation=cv2.INTER_AREA)
|
| 388 |
+
low_img = (torch.from_numpy(low_img.transpose(2, 0, 1)).float() / 127.5) - 1
|
| 389 |
+
low_img = low_img.contiguous()
|
| 390 |
+
ret[f'image_{res}'] = low_img
|
| 391 |
+
img = (torch.from_numpy(img.transpose(2, 0, 1)).float() / 127.5) - 1
|
| 392 |
+
img = img.contiguous()
|
| 393 |
+
|
| 394 |
+
if self.prompts_randomless:
|
| 395 |
+
prompt = dic["prompt"].pop(prompt_key)
|
| 396 |
+
if len(dic["prompt"].keys()) == 0:
|
| 397 |
+
dic["prompt"] = deepcopy(dic['full_prompt'])
|
| 398 |
+
else:
|
| 399 |
+
prompt = dic["prompt"][prompt_key]
|
| 400 |
+
ret = ret | {
|
| 401 |
+
'image': img,
|
| 402 |
+
'prompt': prompt,
|
| 403 |
+
#'cfg': dic['cfg'],
|
| 404 |
+
#'t5': t5_tensor,
|
| 405 |
+
#'t5_path': None if dic['t5'] is None else os.path.join(dic['t5'], prompt_key + ".zip", dic['img_path'] + '.pkl'),
|
| 406 |
+
'debug_info': [dic['dataset_name'], dic["parquet"], dic["img_path"], prompt_key]
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
self.checker.save(ret, dic)
|
| 410 |
+
return ret
|
| 411 |
+
|
| 412 |
+
def get_index_from_accelerate(self, index):
|
| 413 |
+
index -= self.batch_size * self.rank
|
| 414 |
+
r = index % (self.replicas * self.batch_size)
|
| 415 |
+
d = index // (self.replicas * self.batch_size)
|
| 416 |
+
index = d * self.batch_size + r
|
| 417 |
+
return index
|
| 418 |
+
|
| 419 |
+
def get_warmup_index_from_accelerate(self, index):
|
| 420 |
+
index = index // self.replicas
|
| 421 |
+
return index
|
| 422 |
+
|
| 423 |
+
def __getitem__(self, ori_index):
|
| 424 |
+
# if self.enable_data_warmup:
|
| 425 |
+
# index = self.get_warmup_index_from_accelerate(ori_index)
|
| 426 |
+
# else:
|
| 427 |
+
# index = self.get_index_from_accelerate(ori_index)
|
| 428 |
+
index = ori_index
|
| 429 |
+
index = index % len(self.paths)
|
| 430 |
+
dic = self.paths[index]
|
| 431 |
+
data_dic = self._process(dic)
|
| 432 |
+
# print(f'Base dataloader [rank{self.rank}], ori_index: {ori_index}, index: {index}, prompt: {data_dic["prompt"]}')
|
| 433 |
+
return data_dic
|
| 434 |
+
|
| 435 |
+
def __len__(self):
|
| 436 |
+
return self.num_sample_per_rank * self.enlarge_ratio
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def build_t2i_trainloader(data_config, rank, world_size):
|
| 440 |
+
data_config['rank'] = rank
|
| 441 |
+
data_config['world_size'] = world_size
|
| 442 |
+
train_dataset = Base(data_config)
|
| 443 |
+
dataloader_args = dict(
|
| 444 |
+
dataset=train_dataset,
|
| 445 |
+
batch_size=data_config['batch_size'],
|
| 446 |
+
shuffle=False,
|
| 447 |
+
num_workers=data_config['num_workers'],
|
| 448 |
+
pin_memory=True,
|
| 449 |
+
drop_last=True,
|
| 450 |
+
prefetch_factor=16,
|
| 451 |
+
# collate_fn=collate_list,
|
| 452 |
+
persistent_workers=True
|
| 453 |
+
)
|
| 454 |
+
train_dataloader = torch.utils.data.DataLoader(**dataloader_args)
|
| 455 |
+
return train_dataloader
|
| 456 |
+
|
| 457 |
+
|
| 458 |
+
if __name__ == "__main__":
|
| 459 |
+
with open('./configs/mimo/selftok/sd3_v1227_test/512-debug-newdata.yml', 'r') as file:
|
| 460 |
+
config = yaml.safe_load(file)
|
| 461 |
+
config = config['data']
|
| 462 |
+
config['rank'] = 0
|
| 463 |
+
config['world_size'] = 1
|
| 464 |
+
train_dataset = Base(config)
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/transforms.py
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import cv2
|
| 3 |
+
import os
|
| 4 |
+
import re
|
| 5 |
+
import torch
|
| 6 |
+
import pickle
|
| 7 |
+
import random
|
| 8 |
+
import numpy as np
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import torch.distributed as dist
|
| 11 |
+
from PIL import Image
|
| 12 |
+
from torchvision import transforms
|
| 13 |
+
from torchvision.transforms import functional as F
|
| 14 |
+
from mimogpt.engine.utils import mox_copy
|
| 15 |
+
|
| 16 |
+
class RandomResize(transforms.Resize):
|
| 17 |
+
"""
|
| 18 |
+
Resize transform but only if the input is smaller than the resize dims
|
| 19 |
+
"""
|
| 20 |
+
def __init__(self, size, interpolation=F.InterpolationMode.BILINEAR, max_size=None, antialias="warn", ratio=0.5):
|
| 21 |
+
super().__init__(size, interpolation, max_size, antialias)
|
| 22 |
+
self.ratio = ratio
|
| 23 |
+
|
| 24 |
+
def forward(self, img):
|
| 25 |
+
"""
|
| 26 |
+
Args:
|
| 27 |
+
img (PIL Image or Tensor): Image to be scaled.
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
PIL Image or Tensor: Rescaled image.
|
| 31 |
+
"""
|
| 32 |
+
if isinstance(img, torch.Tensor):
|
| 33 |
+
h, w = img.shape[1:]
|
| 34 |
+
else: # PIL Image
|
| 35 |
+
w, h = img.size
|
| 36 |
+
r = torch.rand(1)
|
| 37 |
+
if r > self.ratio:
|
| 38 |
+
# print('fix 256')
|
| 39 |
+
return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
|
| 40 |
+
else:
|
| 41 |
+
r = (r*1./self.ratio)[0].item()
|
| 42 |
+
res = r * self.size + (1-r) * min(w,h)
|
| 43 |
+
res = int(max(res, self.size))
|
| 44 |
+
# print(f'{res}')
|
| 45 |
+
return F.resize(img, res, self.interpolation, self.max_size, self.antialias)
|
| 46 |
+
|
| 47 |
+
def set_visual_transforms(
|
| 48 |
+
mode,
|
| 49 |
+
input_res=224,
|
| 50 |
+
resize_res=256,
|
| 51 |
+
norm_mean=(0.48145466, 0.4578275, 0.40821073),
|
| 52 |
+
norm_std=(0.26862954, 0.26130258, 0.27577711),
|
| 53 |
+
augment_choice="vlip",
|
| 54 |
+
):
|
| 55 |
+
if augment_choice == "vlip":
|
| 56 |
+
normalize = transforms.Normalize(mean=norm_mean, std=norm_std)
|
| 57 |
+
if mode == "train":
|
| 58 |
+
preprocess = transforms.Compose(
|
| 59 |
+
[
|
| 60 |
+
transforms.Resize(resize_res),
|
| 61 |
+
transforms.RandomCrop(input_res),
|
| 62 |
+
transforms.ToTensor(),
|
| 63 |
+
normalize,
|
| 64 |
+
]
|
| 65 |
+
)
|
| 66 |
+
elif mode in ["eval", "val", "test"]:
|
| 67 |
+
preprocess = transforms.Compose(
|
| 68 |
+
[
|
| 69 |
+
transforms.Resize(input_res),
|
| 70 |
+
transforms.CenterCrop(input_res),
|
| 71 |
+
transforms.ToTensor(),
|
| 72 |
+
normalize,
|
| 73 |
+
]
|
| 74 |
+
)
|
| 75 |
+
else:
|
| 76 |
+
raise NotImplementedError
|
| 77 |
+
|
| 78 |
+
elif augment_choice == "clip":
|
| 79 |
+
if mode == "train":
|
| 80 |
+
preprocess = transforms.Compose(
|
| 81 |
+
[
|
| 82 |
+
transforms.Resize((resize_res, resize_res), interpolation=transforms.InterpolationMode.BICUBIC),
|
| 83 |
+
NormalizeToTensor(),
|
| 84 |
+
]
|
| 85 |
+
)
|
| 86 |
+
elif mode in ["eval", "val", "test"]:
|
| 87 |
+
preprocess = transforms.Compose(
|
| 88 |
+
[
|
| 89 |
+
transforms.Resize((resize_res, resize_res), interpolation=transforms.InterpolationMode.BICUBIC),
|
| 90 |
+
NormalizeToTensor(),
|
| 91 |
+
]
|
| 92 |
+
)
|
| 93 |
+
else:
|
| 94 |
+
raise NotImplementedError
|
| 95 |
+
|
| 96 |
+
elif augment_choice == "mimo":
|
| 97 |
+
if mode == "train":
|
| 98 |
+
preprocess = transforms.Compose(
|
| 99 |
+
[
|
| 100 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 101 |
+
transforms.CenterCrop(input_res),
|
| 102 |
+
NormalizeToTensor(),
|
| 103 |
+
]
|
| 104 |
+
)
|
| 105 |
+
elif mode in ["eval", "val", "test"]:
|
| 106 |
+
preprocess = transforms.Compose(
|
| 107 |
+
[
|
| 108 |
+
transforms.Resize(input_res),
|
| 109 |
+
transforms.CenterCrop(input_res),
|
| 110 |
+
NormalizeToTensor(),
|
| 111 |
+
]
|
| 112 |
+
)
|
| 113 |
+
else:
|
| 114 |
+
raise NotImplementedError
|
| 115 |
+
elif augment_choice == "selftok":
|
| 116 |
+
if mode == "train":
|
| 117 |
+
preprocess = transforms.Compose(
|
| 118 |
+
[
|
| 119 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 120 |
+
transforms.RandomCrop(input_res),
|
| 121 |
+
NormalizeToTensor(),
|
| 122 |
+
]
|
| 123 |
+
)
|
| 124 |
+
elif mode in ["eval", "val", "test"]:
|
| 125 |
+
preprocess = transforms.Compose(
|
| 126 |
+
[
|
| 127 |
+
transforms.Resize(input_res),
|
| 128 |
+
transforms.CenterCrop(input_res),
|
| 129 |
+
NormalizeToTensor(),
|
| 130 |
+
]
|
| 131 |
+
)
|
| 132 |
+
else:
|
| 133 |
+
raise NotImplementedError
|
| 134 |
+
|
| 135 |
+
elif augment_choice == "prior":
|
| 136 |
+
if mode == "train":
|
| 137 |
+
preprocess = transforms.Compose(
|
| 138 |
+
[
|
| 139 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 140 |
+
transforms.CenterCrop(input_res),
|
| 141 |
+
NormalizeToTensor(),
|
| 142 |
+
]
|
| 143 |
+
)
|
| 144 |
+
elif mode in ["eval", "val", "test"]:
|
| 145 |
+
preprocess = transforms.Compose(
|
| 146 |
+
[
|
| 147 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 148 |
+
transforms.CenterCrop(input_res),
|
| 149 |
+
NormalizeToTensor(),
|
| 150 |
+
]
|
| 151 |
+
)
|
| 152 |
+
else:
|
| 153 |
+
raise NotImplementedError
|
| 154 |
+
|
| 155 |
+
elif augment_choice == "round":
|
| 156 |
+
preprocess = transforms.Compose(
|
| 157 |
+
[
|
| 158 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 159 |
+
transforms.RandomResizedCrop(
|
| 160 |
+
size=input_res,
|
| 161 |
+
scale=((input_res / resize_res) ** 2, 0.618),
|
| 162 |
+
interpolation=transforms.InterpolationMode.BICUBIC,
|
| 163 |
+
),
|
| 164 |
+
NormalizeToTensor(),
|
| 165 |
+
]
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
elif augment_choice == "noCrop":
|
| 169 |
+
if mode == "train":
|
| 170 |
+
preprocess = transforms.Compose(
|
| 171 |
+
[
|
| 172 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 173 |
+
NormalizeToTensor(),
|
| 174 |
+
]
|
| 175 |
+
)
|
| 176 |
+
elif mode in ["eval", "val", "test"]:
|
| 177 |
+
preprocess = transforms.Compose(
|
| 178 |
+
[
|
| 179 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 180 |
+
NormalizeToTensor(),
|
| 181 |
+
]
|
| 182 |
+
)
|
| 183 |
+
else:
|
| 184 |
+
raise NotImplementedError
|
| 185 |
+
|
| 186 |
+
elif augment_choice == "simple":
|
| 187 |
+
if mode == "train":
|
| 188 |
+
preprocess = transforms.Compose(
|
| 189 |
+
[
|
| 190 |
+
NormalizeToTensor(),
|
| 191 |
+
]
|
| 192 |
+
)
|
| 193 |
+
elif mode in ["eval", "val", "test"]:
|
| 194 |
+
preprocess = transforms.Compose(
|
| 195 |
+
[
|
| 196 |
+
NormalizeToTensor(),
|
| 197 |
+
]
|
| 198 |
+
)
|
| 199 |
+
else:
|
| 200 |
+
raise NotImplementedError
|
| 201 |
+
|
| 202 |
+
elif augment_choice == "V06":
|
| 203 |
+
if mode == "train":
|
| 204 |
+
preprocess = transforms.Compose(
|
| 205 |
+
[
|
| 206 |
+
NormalizeToTensor(),
|
| 207 |
+
]
|
| 208 |
+
)
|
| 209 |
+
elif mode in ["eval", "val", "test"]:
|
| 210 |
+
preprocess = transforms.Compose(
|
| 211 |
+
[
|
| 212 |
+
NormalizeToTensor(),
|
| 213 |
+
]
|
| 214 |
+
)
|
| 215 |
+
else:
|
| 216 |
+
raise NotImplementedError
|
| 217 |
+
|
| 218 |
+
elif augment_choice == "sr":
|
| 219 |
+
if mode == "train":
|
| 220 |
+
preprocess = transforms.Compose(
|
| 221 |
+
[
|
| 222 |
+
transforms.Resize(resize_res, interpolation=transforms.InterpolationMode.BICUBIC),
|
| 223 |
+
transforms.RandomCrop(input_res),
|
| 224 |
+
NormalizeToTensor(),
|
| 225 |
+
]
|
| 226 |
+
)
|
| 227 |
+
else:
|
| 228 |
+
raise NotImplementedError
|
| 229 |
+
|
| 230 |
+
elif augment_choice == "mimo_interleaved":
|
| 231 |
+
if mode == "train":
|
| 232 |
+
preprocess = transforms.Compose(
|
| 233 |
+
[
|
| 234 |
+
transforms.Resize(resize_res),
|
| 235 |
+
transforms.CenterCrop(input_res),
|
| 236 |
+
NormalizeToTensor(),
|
| 237 |
+
]
|
| 238 |
+
)
|
| 239 |
+
elif mode in ["eval", "val", "test"]:
|
| 240 |
+
preprocess = transforms.Compose(
|
| 241 |
+
[
|
| 242 |
+
transforms.Resize(input_res),
|
| 243 |
+
transforms.CenterCrop(input_res),
|
| 244 |
+
NormalizeToTensor(),
|
| 245 |
+
]
|
| 246 |
+
)
|
| 247 |
+
else:
|
| 248 |
+
raise NotImplementedError
|
| 249 |
+
|
| 250 |
+
else:
|
| 251 |
+
raise NotImplementedError
|
| 252 |
+
|
| 253 |
+
return preprocess
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def text_transform(caption):
|
| 257 |
+
pat = re.compile(r"[#]+")
|
| 258 |
+
res = re.sub(pat, " ", caption)
|
| 259 |
+
return res
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
class CaptionDropout(object):
|
| 263 |
+
def __init__(self, rate):
|
| 264 |
+
assert isinstance(rate, float)
|
| 265 |
+
self.rate = rate
|
| 266 |
+
|
| 267 |
+
def __call__(self, caption):
|
| 268 |
+
if random.random() < self.rate:
|
| 269 |
+
return ""
|
| 270 |
+
else:
|
| 271 |
+
return caption
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class NormalizeToTensor(object):
|
| 275 |
+
"""Convert ndarrays in sample to Tensors."""
|
| 276 |
+
|
| 277 |
+
def __init__(self, reshape=True):
|
| 278 |
+
self.reshape = reshape
|
| 279 |
+
|
| 280 |
+
def __call__(self, image):
|
| 281 |
+
image = np.array(image).astype(np.float32)
|
| 282 |
+
image = (image / 127.5 - 1.0).astype(np.float32)
|
| 283 |
+
if self.reshape:
|
| 284 |
+
image = np.reshape(image, (image.shape[0], image.shape[1], -1))
|
| 285 |
+
image = image.transpose((2, 0, 1))
|
| 286 |
+
return torch.from_numpy(image)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def get_zip_idx(split_range, train_task="mimo"):
|
| 290 |
+
zip_min_split, zip_max_split = split_range
|
| 291 |
+
rank = dist.get_rank()
|
| 292 |
+
world_size = dist.get_world_size()
|
| 293 |
+
try:
|
| 294 |
+
# training in cloud
|
| 295 |
+
import moxing as mox
|
| 296 |
+
|
| 297 |
+
# this rule need same to multi_modality/common/cloud_copy.py#L91
|
| 298 |
+
this_node_all_idx = list(range(zip_min_split, zip_max_split))[(rank // 8) :: max(world_size // 8, 1)]
|
| 299 |
+
if train_task == "pangu":
|
| 300 |
+
this_card_all_idx = this_node_all_idx
|
| 301 |
+
else:
|
| 302 |
+
if world_size > zip_max_split - zip_min_split:
|
| 303 |
+
this_card_all_idx = this_node_all_idx
|
| 304 |
+
else:
|
| 305 |
+
this_card_all_idx = this_node_all_idx[(rank % 8) :: 8]
|
| 306 |
+
except:
|
| 307 |
+
# training in local, different rules compare to training in cloud
|
| 308 |
+
zip_per_rank = (zip_max_split - zip_min_split) // world_size
|
| 309 |
+
this_card_all_idx = list(range(rank * zip_per_rank + zip_min_split, (rank + 1) * zip_per_rank + zip_min_split))
|
| 310 |
+
|
| 311 |
+
return this_card_all_idx
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def make_zip_dataset(data_list, this_card_all_idx, col_used, index_format="pkl"):
|
| 315 |
+
def preprocess(pkl_path):
|
| 316 |
+
if pkl_path.startswith("s3://"):
|
| 317 |
+
# We prevent using memarts here in main process, it will cause segment fault in dataloader process
|
| 318 |
+
# So here we copy pkl files into /cache/
|
| 319 |
+
data_item_local = pkl_path.replace("s3://", "/cache/")
|
| 320 |
+
if not os.path.exists(data_item_local):
|
| 321 |
+
print(f"mox_copy({pkl_path}, {data_item_local})")
|
| 322 |
+
mox_copy(pkl_path, data_item_local)
|
| 323 |
+
else:
|
| 324 |
+
data_item_local = pkl_path
|
| 325 |
+
return data_item_local
|
| 326 |
+
|
| 327 |
+
# hard code to load old pkl
|
| 328 |
+
def _load_pickle(data_item):
|
| 329 |
+
list_ = pd.read_pickle(data_item)
|
| 330 |
+
columns = len(list_[0])
|
| 331 |
+
if columns == 2:
|
| 332 |
+
df = pd.DataFrame(list_, columns=["ID", "caption_en"])
|
| 333 |
+
elif columns == 3: # MTI_ori_split_en_zh_pkl
|
| 334 |
+
df = pd.DataFrame(list_, columns=["ID", "caption_en", "_"])
|
| 335 |
+
elif columns == 4: # MJ
|
| 336 |
+
df = pd.DataFrame(list_, columns=["ID", "caption_zh", "_", "caption_en"])
|
| 337 |
+
elif columns >= 5: # 0527_super
|
| 338 |
+
col = ["_"] * columns
|
| 339 |
+
col[0], col[1] = "ID", "caption_en"
|
| 340 |
+
df = pd.DataFrame(list_, columns=col)
|
| 341 |
+
else:
|
| 342 |
+
raise NotImplementedError
|
| 343 |
+
|
| 344 |
+
df = df.loc[:, col_used]
|
| 345 |
+
return df
|
| 346 |
+
|
| 347 |
+
def _load_pandas(data_item):
|
| 348 |
+
df = pd.read_pickle(data_item)
|
| 349 |
+
df = df.loc[:, col_used]
|
| 350 |
+
return df
|
| 351 |
+
|
| 352 |
+
def _load_parquet(data_item):
|
| 353 |
+
df = pd.read_parquet(data_item)
|
| 354 |
+
df = df.loc[:, col_used]
|
| 355 |
+
return df
|
| 356 |
+
|
| 357 |
+
def isvalid(pd_val):
|
| 358 |
+
if isinstance(pd_val, (dict, list, tuple)):
|
| 359 |
+
if len(pd_val) > 0:
|
| 360 |
+
return True
|
| 361 |
+
else:
|
| 362 |
+
return False
|
| 363 |
+
else:
|
| 364 |
+
if pd_val not in ("#", ""):
|
| 365 |
+
return True
|
| 366 |
+
else:
|
| 367 |
+
return False
|
| 368 |
+
|
| 369 |
+
if index_format in ["pkl", "pickle"]:
|
| 370 |
+
load_func = _load_pickle
|
| 371 |
+
elif index_format == "pandas":
|
| 372 |
+
load_func = _load_pandas
|
| 373 |
+
elif index_format == "parquet":
|
| 374 |
+
load_func = _load_parquet
|
| 375 |
+
else:
|
| 376 |
+
raise NotImplementedError
|
| 377 |
+
|
| 378 |
+
dataframes = []
|
| 379 |
+
data_list = [preprocess(data_list.format(zip_idx)) for zip_idx in this_card_all_idx]
|
| 380 |
+
print(f"Reading {data_list}...")
|
| 381 |
+
|
| 382 |
+
for zip_idx, sub_data_list in enumerate(data_list):
|
| 383 |
+
try:
|
| 384 |
+
sub_df = load_func(sub_data_list)
|
| 385 |
+
except Exception as e:
|
| 386 |
+
print(f"ERROR opening pkl:{sub_data_list} !!!!")
|
| 387 |
+
raise e
|
| 388 |
+
for col in col_used: # filter rows with empty value in col_used
|
| 389 |
+
sub_df = sub_df[sub_df[col].map(isvalid)]
|
| 390 |
+
sub_df.insert(sub_df.shape[1], "zip_idx", zip_idx, allow_duplicates=False)
|
| 391 |
+
dataframes.append(sub_df)
|
| 392 |
+
|
| 393 |
+
print(f"Read {data_list} Finished!")
|
| 394 |
+
dataframe = pd.concat(dataframes, ignore_index=True)
|
| 395 |
+
print("total data num before filtering: {}, {}, {}".format(len(dataframe), data_list, col_used))
|
| 396 |
+
return dataframe
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def make_zip_fns(data_list, this_card_all_idx):
|
| 400 |
+
zip_files = []
|
| 401 |
+
for zip_idx in this_card_all_idx:
|
| 402 |
+
if data_list.endswith("pkl"):
|
| 403 |
+
zip_path = data_list.replace(".pkl", ".zip").format(zip_idx)
|
| 404 |
+
elif data_list.endswith("parquet"):
|
| 405 |
+
zip_path = data_list.replace(".parquet", ".zip").format(zip_idx)
|
| 406 |
+
elif data_list.endswith("zip"):
|
| 407 |
+
zip_path = data_list.format(zip_idx)
|
| 408 |
+
else:
|
| 409 |
+
raise NotImplementedError
|
| 410 |
+
zip_files.append(zip_path)
|
| 411 |
+
return zip_files
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
def canny(original_img):
|
| 415 |
+
img1 = cv2.GaussianBlur(original_img, (9, 9), 0)
|
| 416 |
+
thresh1 = random.randint(50, 125)
|
| 417 |
+
thresh2 = thresh1 + random.randint(50, 100)
|
| 418 |
+
canny_img = cv2.Canny(img1, thresh1, thresh2)
|
| 419 |
+
# thresh
|
| 420 |
+
canny_img[canny_img > 127] = 255
|
| 421 |
+
canny_img[canny_img <= 127] = 0
|
| 422 |
+
stacked_img = np.stack((canny_img,) * 3, axis=-1)
|
| 423 |
+
return stacked_img
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def shapen_USM(I):
|
| 427 |
+
sigma = 3
|
| 428 |
+
kernel_size = (0, 0)
|
| 429 |
+
L = cv2.GaussianBlur(I, kernel_size, sigma)
|
| 430 |
+
L = L.astype(np.int32)
|
| 431 |
+
I = I.astype(np.int32)
|
| 432 |
+
H = I - L
|
| 433 |
+
a = 1.5
|
| 434 |
+
O = I + a * H
|
| 435 |
+
O[O > 255] = 255
|
| 436 |
+
O[O < 0] = 0
|
| 437 |
+
O = O.astype(np.uint8)
|
| 438 |
+
return O
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def xDoG(original_img, kernel_size=0, sigma=1.4, k_sigma=1.6, epsilon=0, phi=10, gamma=0.98, aug=True):
|
| 442 |
+
# XDOG filer
|
| 443 |
+
epsilon /= 255
|
| 444 |
+
g1 = cv2.GaussianBlur(original_img, (kernel_size, kernel_size), sigma)
|
| 445 |
+
g2 = cv2.GaussianBlur(original_img, (kernel_size, kernel_size), sigma * k_sigma)
|
| 446 |
+
dog = g1 - gamma * g2
|
| 447 |
+
dog /= dog.max()
|
| 448 |
+
xdog = 1 + np.tanh(phi * (dog - epsilon))
|
| 449 |
+
xdog[xdog >= 1] = 1
|
| 450 |
+
xdog = 255 - xdog.astype("uint8") * 255
|
| 451 |
+
xdog = cv2.fastNlMeansDenoising(xdog, None, 80, 15, 21)
|
| 452 |
+
# sharpen
|
| 453 |
+
if aug and random.uniform(0, 1) > 0.3:
|
| 454 |
+
xdog = shapen_USM(xdog)
|
| 455 |
+
else:
|
| 456 |
+
xdog = shapen_USM(xdog)
|
| 457 |
+
# thresh
|
| 458 |
+
xdog[xdog > 127] = 255
|
| 459 |
+
xdog[xdog <= 127] = 0
|
| 460 |
+
stacked_img = np.stack((xdog,) * 3, axis=-1)
|
| 461 |
+
return stacked_img
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def generate_sketch(pil_image, aug=True):
|
| 465 |
+
# genertate edge
|
| 466 |
+
cv2_image = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2GRAY)
|
| 467 |
+
if aug:
|
| 468 |
+
if random.uniform(0, 1) > 0.5:
|
| 469 |
+
edge = canny(cv2_image)
|
| 470 |
+
else:
|
| 471 |
+
edge = xDoG(cv2_image)
|
| 472 |
+
else:
|
| 473 |
+
edge = xDoG(cv2_image, aug=False)
|
| 474 |
+
edge = Image.fromarray(cv2.cvtColor(edge, cv2.COLOR_BGR2RGB))
|
| 475 |
+
return edge
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def choice_box(image, grit):
|
| 479 |
+
elem = random.choice(grit)
|
| 480 |
+
bbox = elem.get("box")
|
| 481 |
+
caption = elem.get("caption_en")
|
| 482 |
+
|
| 483 |
+
return image, bbox, caption
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
def get_multi_res_info_(w, h, size=512):
|
| 487 |
+
"""
|
| 488 |
+
general ratio | aspect ratio | vq token numbers
|
| 489 |
+
1 : 2 | 16 : 32 | 512
|
| 490 |
+
9 : 16 | 18 : 32 | 576
|
| 491 |
+
3 : 4 | 28 : 20 | 560
|
| 492 |
+
1 : 1 | 24 : 24 | 576
|
| 493 |
+
4 : 3 | 20 : 28 | 560
|
| 494 |
+
16 : 9 | 32 : 18 | 576
|
| 495 |
+
2 : 1 | 32 : 16 | 512
|
| 496 |
+
"""
|
| 497 |
+
if size == 512:
|
| 498 |
+
ratio0, w0, h0 = 16.0 / 32, 256, 512 # vq tokens = 512
|
| 499 |
+
ratio1, w1, h1 = 18.0 / 32, 288, 512 # vq tokens = 576
|
| 500 |
+
ratio2, w2, h2 = 20.0 / 28, 320, 448 # vq tokens = 560
|
| 501 |
+
ratio3, w3, h3 = 24.0 / 24, 384, 384 # vq tokens = 576
|
| 502 |
+
ratio4, w4, h4 = 28.0 / 20, 448, 320 # vq tokens = 560
|
| 503 |
+
ratio5, w5, h5 = 32.0 / 18, 512, 288 # vq tokens = 576
|
| 504 |
+
ratio6, w6, h6 = 32.0 / 16, 512, 256 # vq tokens = 512
|
| 505 |
+
elif size == 736:
|
| 506 |
+
ratio0, w0, h0 = 22.0 / 46, 352, 736 # vq tokens = 1012
|
| 507 |
+
ratio1, w1, h1 = 24.0 / 42, 384, 672 # vq tokens = 1008
|
| 508 |
+
ratio2, w2, h2 = 28.0 / 36, 448, 576 # vq tokens = 1008
|
| 509 |
+
ratio3, w3, h3 = 36.0 / 36, 512, 512 # vq tokens = 1024
|
| 510 |
+
ratio4, w4, h4 = 28.0 / 20, 576, 448 # vq tokens = 1008
|
| 511 |
+
ratio5, w5, h5 = 42.0 / 24, 672, 384 # vq tokens = 1008
|
| 512 |
+
ratio6, w6, h6 = 46.0 / 22, 736, 352 # vq tokens = 1012
|
| 513 |
+
|
| 514 |
+
aspect_ratio = w * 1.0 / h
|
| 515 |
+
if aspect_ratio < (ratio0 + ratio1) / 2:
|
| 516 |
+
return ratio0, w0, h0
|
| 517 |
+
elif (ratio0 + ratio1) / 2 <= aspect_ratio < (ratio1 + ratio2) / 2:
|
| 518 |
+
return ratio1, w1, h1
|
| 519 |
+
elif (ratio1 + ratio2) / 2 <= aspect_ratio < (ratio2 + ratio3) / 2:
|
| 520 |
+
return ratio2, w2, h2
|
| 521 |
+
elif (ratio2 + ratio3) / 2 <= aspect_ratio < (ratio3 + ratio4) / 2:
|
| 522 |
+
return ratio3, w3, h3
|
| 523 |
+
elif (ratio3 + ratio4) / 2 <= aspect_ratio < (ratio4 + ratio5) / 2:
|
| 524 |
+
return ratio4, w4, h4
|
| 525 |
+
elif (ratio4 + ratio5) / 2 <= aspect_ratio < (ratio5 + ratio6) / 2:
|
| 526 |
+
return ratio5, w5, h5
|
| 527 |
+
elif (ratio5 + ratio6) / 2 <= aspect_ratio:
|
| 528 |
+
return ratio6, w6, h6
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
def get_multi_res_info(w, h, size=384):
|
| 532 |
+
"""
|
| 533 |
+
general ratio | aspect ratio | vq token numbers
|
| 534 |
+
1 : 2 | 11 : 22 | 242
|
| 535 |
+
9 : 16 | 12 : 21 | 252
|
| 536 |
+
| 13 : 19 | 247
|
| 537 |
+
3 : 4 | 14 : 18 | 252
|
| 538 |
+
| 15 : 17 | 255
|
| 539 |
+
1 : 1 | 16 : 16 | 256
|
| 540 |
+
| 17 : 15 | 255
|
| 541 |
+
4 : 3 | 18 : 14 | 252
|
| 542 |
+
| 19 : 13 | 247
|
| 543 |
+
16 : 9 | 21 : 12 | 252
|
| 544 |
+
2 : 1 | 22 : 11 | 242
|
| 545 |
+
"""
|
| 546 |
+
if size != 384:
|
| 547 |
+
return get_multi_res_info_(w, h, size)
|
| 548 |
+
else:
|
| 549 |
+
ratio0, w0, h0 = 11.0 / 22, 176, 352 # vq tokens = 242
|
| 550 |
+
ratio1, w1, h1 = 12.0 / 21, 192, 336 # vq tokens = 252
|
| 551 |
+
ratio2, w2, h2 = 13.0 / 19, 208, 304 # vq tokens = 247
|
| 552 |
+
ratio3, w3, h3 = 14.0 / 18, 224, 288 # vq tokens = 252
|
| 553 |
+
ratio4, w4, h4 = 15.0 / 17, 240, 272 # vq tokens = 255
|
| 554 |
+
ratio5, w5, h5 = 16.0 / 16, 256, 256 # vq tokens = 256
|
| 555 |
+
ratio6, w6, h6 = 17.0 / 15, 272, 240 # vq tokens = 255
|
| 556 |
+
ratio7, w7, h7 = 18.0 / 14, 288, 224 # vq tokens = 252
|
| 557 |
+
ratio8, w8, h8 = 19.0 / 13, 304, 208 # vq tokens = 247
|
| 558 |
+
ratio9, w9, h9 = 21.0 / 12, 336, 192 # vq tokens = 252
|
| 559 |
+
ratio10, w10, h10 = 22.0 / 11, 352, 176 # vq tokens = 242
|
| 560 |
+
|
| 561 |
+
aspect_ratio = w * 1.0 / h
|
| 562 |
+
if aspect_ratio < (ratio0 + ratio1) / 2:
|
| 563 |
+
return ratio0, w0, h0
|
| 564 |
+
elif (ratio0 + ratio1) / 2 <= aspect_ratio < (ratio1 + ratio2) / 2:
|
| 565 |
+
return ratio1, w1, h1
|
| 566 |
+
elif (ratio1 + ratio2) / 2 <= aspect_ratio < (ratio2 + ratio3) / 2:
|
| 567 |
+
return ratio2, w2, h2
|
| 568 |
+
elif (ratio2 + ratio3) / 2 <= aspect_ratio < (ratio3 + ratio4) / 2:
|
| 569 |
+
return ratio3, w3, h3
|
| 570 |
+
elif (ratio3 + ratio4) / 2 <= aspect_ratio < (ratio4 + ratio5) / 2:
|
| 571 |
+
return ratio4, w4, h4
|
| 572 |
+
elif (ratio4 + ratio5) / 2 <= aspect_ratio < (ratio5 + ratio6) / 2:
|
| 573 |
+
return ratio5, w5, h5
|
| 574 |
+
elif (ratio5 + ratio6) / 2 <= aspect_ratio < (ratio6 + ratio7) / 2:
|
| 575 |
+
return ratio6, w6, h6
|
| 576 |
+
elif (ratio6 + ratio7) / 2 <= aspect_ratio < (ratio7 + ratio8) / 2:
|
| 577 |
+
return ratio7, w7, h7
|
| 578 |
+
elif (ratio7 + ratio8) / 2 <= aspect_ratio < (ratio8 + ratio9) / 2:
|
| 579 |
+
return ratio8, w8, h8
|
| 580 |
+
elif (ratio8 + ratio9) / 2 <= aspect_ratio < (ratio9 + ratio10) / 2:
|
| 581 |
+
return ratio9, w9, h9
|
| 582 |
+
elif (ratio9 + ratio10) / 2 <= aspect_ratio:
|
| 583 |
+
return ratio10, w10, h10
|
| 584 |
+
|
| 585 |
+
|
| 586 |
+
def get_multi_res_infer(w, h):
|
| 587 |
+
"""
|
| 588 |
+
general ratio | aspect ratio | vq token numbers
|
| 589 |
+
1 : 2 | 11 : 22 | 242
|
| 590 |
+
9 : 16 | 12 : 21 | 252
|
| 591 |
+
3 : 4 | 14 : 18 | 252
|
| 592 |
+
1 : 1 | 16 : 16 | 256
|
| 593 |
+
4 : 3 | 18 : 14 | 252
|
| 594 |
+
16 : 9 | 21 : 12 | 252
|
| 595 |
+
2 : 1 | 22 : 11 | 242
|
| 596 |
+
"""
|
| 597 |
+
|
| 598 |
+
ratios_list = np.array([11 / 22, 12 / 21, 14 / 18, 16 / 16, 18 / 14, 21 / 12, 22 / 11])
|
| 599 |
+
w_h_list = [(11, 22), (12, 21), (14, 18), (16, 16), (18, 14), (21, 12), (22, 11)]
|
| 600 |
+
|
| 601 |
+
ratio_in = w / h
|
| 602 |
+
index = np.argmin(abs(ratios_list - ratio_in))
|
| 603 |
+
|
| 604 |
+
return w_h_list[index]
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/__init__.py
ADDED
|
File without changes
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/__version__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
__version__ = '2.0'
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/checker.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pickle
|
| 3 |
+
import random
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.distributed as dist
|
| 10 |
+
|
| 11 |
+
from mimogpt.datasets.utils.log import mimo_logger
|
| 12 |
+
|
| 13 |
+
class Checker:
|
| 14 |
+
def __init__(self, opt):
|
| 15 |
+
# self.logger = mimo_logger('skip')
|
| 16 |
+
self.status = opt['checker']['status']
|
| 17 |
+
self.anchor_path = opt['checker']['anchor_path']
|
| 18 |
+
self.check_iter = opt['checker']['check_iter']
|
| 19 |
+
self.save_check = opt['checker']['save_check']
|
| 20 |
+
self.rank = opt['rank']
|
| 21 |
+
self.master_only = opt['checker']['master_only']
|
| 22 |
+
|
| 23 |
+
if (self.rank != 0 and self.master_only) or self.status == 'disable':
|
| 24 |
+
self._func = lambda x, y, z: x
|
| 25 |
+
elif self.status == 'record':
|
| 26 |
+
self._func = self._record
|
| 27 |
+
os.makedirs(self.anchor_path, exist_ok=True)
|
| 28 |
+
print(f'[Checker] [rank{self.rank}] init record')
|
| 29 |
+
elif self.status == 'check':
|
| 30 |
+
self._func = self._check
|
| 31 |
+
print(f'[Checker] [rank{self.rank}] init check')
|
| 32 |
+
elif self.status == 'load_check':
|
| 33 |
+
self._func = self._load_check
|
| 34 |
+
self.load_keys = opt['checker']['load_keys']
|
| 35 |
+
print(f'[Checker] [rank{self.rank}] init load_check')
|
| 36 |
+
else:
|
| 37 |
+
# self.logger.error(f'Not implemented status: {self.status}')
|
| 38 |
+
raise
|
| 39 |
+
|
| 40 |
+
def _record(self, value, key, cur_iter):
|
| 41 |
+
anchor_file = os.path.join(self.anchor_path, key) + f'_{cur_iter}_{self.rank}.pth'
|
| 42 |
+
torch.save(value, anchor_file)
|
| 43 |
+
print(f'[Checker] [rank{self.rank}] save anchor: {key}, anchor_file: {anchor_file}')
|
| 44 |
+
return value
|
| 45 |
+
|
| 46 |
+
def _check(self, value, key, cur_iter):
|
| 47 |
+
anchor_file = os.path.join(self.anchor_path, key) + f'_{cur_iter}_{self.rank}.pth'
|
| 48 |
+
if self.save_check:
|
| 49 |
+
check_file = os.path.join(self.anchor_path, key) + f'_{cur_iter}_{self.rank}_check.pth'
|
| 50 |
+
torch.save(value, check_file)
|
| 51 |
+
anchor_value = torch.load(anchor_file)
|
| 52 |
+
if isinstance(anchor_value, torch.Tensor):
|
| 53 |
+
anchor_value = anchor_value.to(value.device)
|
| 54 |
+
print(f'[Checker] [rank{self.rank}] anchor: {key}, anchor_file: {anchor_file}, diff: {(anchor_value - value).abs().float().mean()}')
|
| 55 |
+
elif isinstance(anchor_value, list) and isinstance(anchor_value[0], torch.Tensor):
|
| 56 |
+
diff = 0
|
| 57 |
+
for i in range(len(anchor_value)):
|
| 58 |
+
diff += (anchor_value[i] - value[i]).abs().float().mean()
|
| 59 |
+
print(f'[Checker] [rank{self.rank}] anchor: {key}, anchor_file: {anchor_file}, diff: {diff / len(anchor_value)}')
|
| 60 |
+
else:
|
| 61 |
+
print(f'[Checker] [rank{self.rank}] anchor: {key}, anchor_file: {anchor_file}, same: {anchor_value == value}')
|
| 62 |
+
return value
|
| 63 |
+
|
| 64 |
+
def _load_check(self, value, key, cur_iter):
|
| 65 |
+
anchor_file = os.path.join(self.anchor_path, key) + f'_{cur_iter}_{self.rank}.pth'
|
| 66 |
+
if self.save_check:
|
| 67 |
+
check_file = os.path.join(self.anchor_path, key) + f'_{cur_iter}_{self.rank}_check.pth'
|
| 68 |
+
torch.save(value, check_file)
|
| 69 |
+
anchor_value = torch.load(anchor_file)
|
| 70 |
+
if isinstance(anchor_value, torch.Tensor):
|
| 71 |
+
anchor_value = anchor_value.to(value.device)
|
| 72 |
+
elif isinstance(anchor_value, list) and isinstance(anchor_value[0], torch.Tensor):
|
| 73 |
+
for i in range(len(anchor_value)):
|
| 74 |
+
anchor_value[i] = anchor_value[i].to(value[0].device)
|
| 75 |
+
if key in self.load_keys:
|
| 76 |
+
return anchor_value
|
| 77 |
+
else:
|
| 78 |
+
if isinstance(anchor_value, torch.Tensor):
|
| 79 |
+
print(f'[Checker] [rank{self.rank}] anchor: {key}, anchor_file: {anchor_file}, diff: {(anchor_value - value).abs().float().mean()}')
|
| 80 |
+
elif isinstance(anchor_value, list) and isinstance(anchor_value[0], torch.Tensor):
|
| 81 |
+
diff = 0
|
| 82 |
+
for i in range(len(anchor_value)):
|
| 83 |
+
diff += (anchor_value[i] - value[i]).abs().float().mean()
|
| 84 |
+
print(f'[Checker] [rank{self.rank}] anchor: {key}, anchor_file: {anchor_file}, diff: {diff / len(anchor_value)}')
|
| 85 |
+
else:
|
| 86 |
+
print(f'[Checker] [rank{self.rank}] anchor: {key}, anchor_file: {anchor_file}, same: {anchor_value == value}')
|
| 87 |
+
return value
|
| 88 |
+
|
| 89 |
+
def check_point(self, value, key, cur_iter):
|
| 90 |
+
if cur_iter < self.check_iter:
|
| 91 |
+
return self._func(value, key, cur_iter)
|
| 92 |
+
else:
|
| 93 |
+
return value
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class DataChecker:
|
| 97 |
+
def __init__(self, opt):
|
| 98 |
+
# self.logger = mimo_logger('skip')
|
| 99 |
+
|
| 100 |
+
self.status = opt['data_checker']['status']
|
| 101 |
+
|
| 102 |
+
self.save_prob = opt['data_checker']['save_prob']
|
| 103 |
+
self.save_amount = opt['data_checker']['save_amount']
|
| 104 |
+
self.save_path = opt['data_checker']['save_path']
|
| 105 |
+
self.rank = opt['rank']
|
| 106 |
+
self.master_only = opt['checker']['master_only']
|
| 107 |
+
|
| 108 |
+
if (self.rank != 0 and self.master_only) or self.status == 'disable':
|
| 109 |
+
self._func = self._pass
|
| 110 |
+
elif self.status == 'save':
|
| 111 |
+
self._func = self._save
|
| 112 |
+
self.count = 0
|
| 113 |
+
os.makedirs(self.save_path, exist_ok=True)
|
| 114 |
+
print(f'[DataChecker] [rank{self.rank}] init record')
|
| 115 |
+
else:
|
| 116 |
+
# self.logger.error(f'Not implemented status: {self.status}')
|
| 117 |
+
raise
|
| 118 |
+
|
| 119 |
+
def _pass(self, ret, dict):
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
def _save(self, ret, dic):
|
| 123 |
+
|
| 124 |
+
if self.count >= self.save_amount:
|
| 125 |
+
self._func = lambda x, y: x
|
| 126 |
+
return
|
| 127 |
+
if random.random() < self.save_prob:
|
| 128 |
+
image = (ret['image'] + 1) * 127.5
|
| 129 |
+
image = image.permute(1, 2, 0).numpy()[:, :, ::-1]
|
| 130 |
+
cv2.imwrite(os.path.join(self.save_path, os.path.basename(dic['zip']) + '_' + dic['img_path'] + '.png'), image)
|
| 131 |
+
with open(os.path.join(self.save_path, os.path.basename(dic['zip']) + '_' + dic['img_path'] + '.txt'), 'w') as f:
|
| 132 |
+
f.write(ret['prompt'])
|
| 133 |
+
self.count += 1
|
| 134 |
+
|
| 135 |
+
def save(self, ret, dic):
|
| 136 |
+
self._func(ret, dic)
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/dist.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime
|
| 2 |
+
import functools
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
import torch.distributed as dist
|
| 6 |
+
import torch.multiprocessing as mp
|
| 7 |
+
|
| 8 |
+
def master_only(func):
|
| 9 |
+
@functools.wraps(func)
|
| 10 |
+
def wrapper(*args, **kwargs):
|
| 11 |
+
local_rank, rank, _ = get_dist_info()
|
| 12 |
+
if rank == 0:
|
| 13 |
+
return func(*args, **kwargs)
|
| 14 |
+
return wrapper
|
| 15 |
+
|
| 16 |
+
def get_dist_info():
|
| 17 |
+
if dist.is_available():
|
| 18 |
+
initialized = dist.is_initialized()
|
| 19 |
+
else:
|
| 20 |
+
initialized = False
|
| 21 |
+
if initialized:
|
| 22 |
+
rank = dist.get_rank()
|
| 23 |
+
local_rank = dist.local_rank
|
| 24 |
+
world_size = dist.get_world_size()
|
| 25 |
+
else:
|
| 26 |
+
rank = 0
|
| 27 |
+
world_size = 1
|
| 28 |
+
local_rank = 0
|
| 29 |
+
return local_rank, rank, world_size
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _init_dist_pytorch(backend, **dist_params):
|
| 33 |
+
if backend == "nccl" and mp.get_start_method(allow_none=True) is None:
|
| 34 |
+
mp.set_start_method('spawn')
|
| 35 |
+
torch.cuda.set_device(dist_params["local_rank"])
|
| 36 |
+
dist.init_process_group(
|
| 37 |
+
backend=backend,
|
| 38 |
+
init_method=dist_params["init_method"],
|
| 39 |
+
timeout=datetime.timedelta(hours=0.5),
|
| 40 |
+
world_size=dist_params["world_size"],
|
| 41 |
+
rank=dist_params["rank"],
|
| 42 |
+
)
|
| 43 |
+
dist.local_rank = dist_params["local_rank"]
|
| 44 |
+
print(
|
| 45 |
+
"init dist: init_method=%s, rank=%s, local_rank=%s, world_size=%s"
|
| 46 |
+
% (dist_params['init_method'], dist_params['rank'], dist_params["local_rank"], dist_params['world_size'])
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def init_dist(launcher, **dist_params):
|
| 51 |
+
if launcher == "pytorch":
|
| 52 |
+
_init_dist_pytorch(**dist_params)
|
| 53 |
+
else:
|
| 54 |
+
raise ValueError(f'init_dist({launcher}, dist_params={dist_params})')
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/log.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import logging
|
| 3 |
+
|
| 4 |
+
from .dist import get_dist_info
|
| 5 |
+
from .__version__ import __version__
|
| 6 |
+
|
| 7 |
+
initialized_logger = {}
|
| 8 |
+
|
| 9 |
+
def get_root_logger(logger_name, log_level=logging.INFO, log_file=None):
|
| 10 |
+
"""Get the root logger.
|
| 11 |
+
|
| 12 |
+
The logger will be initialized if it has not been initialized. By default a
|
| 13 |
+
StreamHandler will be added. If `log_file` is specified, a FileHandler will
|
| 14 |
+
also be added.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
logger_name (str): root logger name
|
| 18 |
+
log_file (str | None): The log filename. If specified, a FileHandler
|
| 19 |
+
will be added to the root logger.
|
| 20 |
+
log_level (int): The root logger level. Note that only the process of
|
| 21 |
+
rank 0 is affected, while other processes will set the level to
|
| 22 |
+
"Error" and be silent most of the time.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
logging.Logger: The root logger.
|
| 26 |
+
"""
|
| 27 |
+
logger = logging.getLogger(logger_name)
|
| 28 |
+
# if the logger has been initialized, just return it
|
| 29 |
+
if logger_name in initialized_logger:
|
| 30 |
+
return logger
|
| 31 |
+
format_str = '%(asctime)s - %(name)s [%(levelname)s]: %(message)s'
|
| 32 |
+
stream_handler = logging.StreamHandler()
|
| 33 |
+
stream_handler.setFormatter(logging.Formatter(format_str))
|
| 34 |
+
logger.addHandler(stream_handler)
|
| 35 |
+
logger.propagate = False
|
| 36 |
+
local_rank, rank, world_size = get_dist_info()
|
| 37 |
+
logger.setLevel(log_level)
|
| 38 |
+
if rank != 0:
|
| 39 |
+
logger.setLevel('ERROR')
|
| 40 |
+
else:
|
| 41 |
+
logger.setLevel(log_level)
|
| 42 |
+
if log_file is not None:
|
| 43 |
+
# add file handler
|
| 44 |
+
file_handler = logging.FileHandler(log_file, 'w')
|
| 45 |
+
file_handler.setFormatter(logging.Formatter(format_str))
|
| 46 |
+
file_handler.setLevel(log_level)
|
| 47 |
+
logger.addHandler(file_handler)
|
| 48 |
+
initialized_logger[logger_name] = True
|
| 49 |
+
return logger
|
| 50 |
+
|
| 51 |
+
loglevel_map = {
|
| 52 |
+
'debug': logging.DEBUG,
|
| 53 |
+
'info': logging.INFO,
|
| 54 |
+
'warn': logging.WARN,
|
| 55 |
+
'error': logging.ERROR,
|
| 56 |
+
'critical': logging.CRITICAL,
|
| 57 |
+
'skip': None,
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
def mimo_logger(log_level, log_file=None):
|
| 61 |
+
log_level = loglevel_map[log_level]
|
| 62 |
+
logger = get_root_logger("Flame2_" + __version__, log_level=log_level, log_file=log_file)
|
| 63 |
+
return logger
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/datasets/utils/ordered_yaml.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
from collections import OrderedDict
|
| 3 |
+
|
| 4 |
+
def ordered_yaml():
|
| 5 |
+
"""Support OrderedDict for yaml.
|
| 6 |
+
|
| 7 |
+
Returns:
|
| 8 |
+
yaml Loader and Dumper.
|
| 9 |
+
"""
|
| 10 |
+
try:
|
| 11 |
+
from yaml import CDumper as Dumper
|
| 12 |
+
from yaml import CLoader as Loader
|
| 13 |
+
except ImportError:
|
| 14 |
+
from yaml import Dumper, Loader
|
| 15 |
+
|
| 16 |
+
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
|
| 17 |
+
|
| 18 |
+
def dict_representer(dumper, data):
|
| 19 |
+
return dumper.represent_dict(data.items())
|
| 20 |
+
|
| 21 |
+
def dict_constructor(loader, node):
|
| 22 |
+
return OrderedDict(loader.construct_pairs(node))
|
| 23 |
+
|
| 24 |
+
Dumper.add_representer(OrderedDict, dict_representer)
|
| 25 |
+
Loader.add_constructor(_mapping_tag, dict_constructor)
|
| 26 |
+
return Loader, Dumper
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from mimogpt.engine.utils import *
|
| 5 |
+
from .trainer_selftok_enc import TrainerSelftokEnc
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
DEVICE_TYPE = os.environ.get("DEVICE_TYPE", "gpu")
|
| 9 |
+
|
| 10 |
+
def setup_task(cfg, is_root):
|
| 11 |
+
if cfg.common.task == 'selftokenc':
|
| 12 |
+
Trainer = TrainerSelftokEnc(cfg)
|
| 13 |
+
hook_list = [
|
| 14 |
+
SelfTokHook(cfg),
|
| 15 |
+
SelfTokSaveHook(cfg, is_root=is_root),
|
| 16 |
+
]
|
| 17 |
+
if cfg.common.val_interval > 0:
|
| 18 |
+
hook_list.append(EvalSelftokHook(cfg))
|
| 19 |
+
#
|
| 20 |
+
else:
|
| 21 |
+
raise NotImplementedError
|
| 22 |
+
|
| 23 |
+
cfg.profile = cfg.common.use_profile if hasattr(cfg.common, "use_profile") else 0
|
| 24 |
+
if cfg.profile and DEVICE_TYPE == "gpu":
|
| 25 |
+
hook_list.insert(0, TorchProfileHook(cfg))
|
| 26 |
+
if cfg.profile and DEVICE_TYPE == "ascend":
|
| 27 |
+
hook_list.insert(0, NPUTorchProfileHook(cfg))
|
| 28 |
+
|
| 29 |
+
return Trainer, hook_list
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (927 Bytes). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/trainer_selftok_enc.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import torch
|
| 5 |
+
import deepspeed
|
| 6 |
+
import functools
|
| 7 |
+
import numpy as np
|
| 8 |
+
from time import time as ttime
|
| 9 |
+
import time
|
| 10 |
+
from itertools import repeat
|
| 11 |
+
import torch.distributed as dist
|
| 12 |
+
from torch.cuda.amp import autocast
|
| 13 |
+
from easydict import EasyDict
|
| 14 |
+
from collections import OrderedDict
|
| 15 |
+
from torch.cuda.amp import GradScaler
|
| 16 |
+
from copy import deepcopy
|
| 17 |
+
from diffusers.models import AutoencoderKL
|
| 18 |
+
import moxing as mox
|
| 19 |
+
sys.path.append(".")
|
| 20 |
+
from mimogpt.utils import hf_logger, AverageMeter
|
| 21 |
+
from mimogpt.models import build_backbone
|
| 22 |
+
from mimogpt.datasets import build_dataloader
|
| 23 |
+
from mimogpt.engine.utils import TrainerBase, print_model_param_num, print_model_params
|
| 24 |
+
from mimogpt.engine.utils import clip_gradient, build_optimizer, setup_deepspeed, build_selftok_optimizer
|
| 25 |
+
from mimogpt.models.selftok.multires_image_tokenizer import MultiImageTokenizer ### mark
|
| 26 |
+
from mimogpt.models.selftok.image_tokenizer import ImageTokenizer
|
| 27 |
+
from mimogpt.models.selftok.sd3.sd3_impls import SDVAE, CFGDenoiser, SD3LatentFormat
|
| 28 |
+
from mimogpt.datasets.t2i_dataset import build_t2i_trainloader
|
| 29 |
+
import re
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
DEVICE_TYPE = os.environ.get("DEVICE_TYPE", "gpu")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
log_dict_keys = [
|
| 37 |
+
"loss", "dm_mse", "L2_loss", "commit_loss", "loss_small", "loss_mid", "loss_large", "loss_uncon",
|
| 38 |
+
"diversity_entropy", "perplexity", "deterministic_entropy", "enc_sum_grad", "delta_embed",
|
| 39 |
+
"n_active", "n_reactive", "perplexity_list", "deter_list", "cosine_sim",
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
def requires_grad(model, flag=True):
|
| 43 |
+
"""
|
| 44 |
+
Set requires_grad flag for all parameters in a model.
|
| 45 |
+
"""
|
| 46 |
+
for p in model.parameters():
|
| 47 |
+
p.requires_grad = flag
|
| 48 |
+
|
| 49 |
+
@torch.no_grad()
|
| 50 |
+
def update_ema(ema_model, model, decay=0.9999):
|
| 51 |
+
"""
|
| 52 |
+
Step the EMA model towards the current model.
|
| 53 |
+
"""
|
| 54 |
+
ema_params = OrderedDict(ema_model.named_parameters())
|
| 55 |
+
model_params = OrderedDict(model.named_parameters())
|
| 56 |
+
|
| 57 |
+
for name, param in model_params.items():
|
| 58 |
+
name = name.replace("module.", "")
|
| 59 |
+
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def load_state(model, state_dict, prefix='',init_method = None):
|
| 63 |
+
model_dict = model.state_dict() # 当前网络结构
|
| 64 |
+
if prefix == 'model.diffusion_model.':
|
| 65 |
+
excluded_keys = ['context_embedder.bias', 'context_embedder.weight']
|
| 66 |
+
if init_method == 1:
|
| 67 |
+
excluded_keys = ['context_embedder.bias', 'context_embedder.weight', 'final_layer.adaLN_modulation.1.bias', 'final_layer.adaLN_modulation.1.weight', 'final_layer.linear.bias', 'final_layer.linear.weight']
|
| 68 |
+
pretrained_dict = {k.replace(prefix,''): v for k, v in state_dict.items() if k.replace(prefix,'') in model_dict and k.replace(prefix,'') not in excluded_keys and 'context_block' not in k}
|
| 69 |
+
elif init_method == 2:
|
| 70 |
+
pretrained_dict = {k.replace(prefix,''): v for k, v in state_dict.items() if k.replace(prefix,'') in model_dict and k.replace(prefix,'') not in excluded_keys and 'context_block' not in k and 'x_block.attn' not in k}
|
| 71 |
+
else:
|
| 72 |
+
pretrained_dict = {k.replace(prefix,''): v for k, v in state_dict.items() if k.replace(prefix,'') in model_dict and k.replace(prefix,'') not in excluded_keys and 'context_block' not in k}
|
| 73 |
+
elif prefix == 'encoder.':
|
| 74 |
+
excluded_keys = ['query', 'q_norm1', 'q_norm2', 'post_norm', 'q_mlp', 'adaLN', 't_emb', 'final', 'quantizer']
|
| 75 |
+
pretrained_dict = {k.replace(prefix,''): v for k, v in state_dict.items() if k.replace(prefix,'') in model_dict and all(ek not in k for ek in excluded_keys)}
|
| 76 |
+
else:
|
| 77 |
+
pretrained_dict = {k.replace(prefix,''): v for k, v in state_dict.items() if k.replace(prefix,'') in model_dict}
|
| 78 |
+
|
| 79 |
+
dict_t = deepcopy(pretrained_dict)
|
| 80 |
+
for key, weight in dict_t.items():
|
| 81 |
+
if key in model_dict and model_dict[key].shape != dict_t[key].shape:
|
| 82 |
+
pretrained_dict.pop(key)
|
| 83 |
+
|
| 84 |
+
m, u = model.load_state_dict(pretrained_dict, strict=False)
|
| 85 |
+
if len(m) > 0:
|
| 86 |
+
hf_logger.info(f"model missing keys:{m}")
|
| 87 |
+
if len(u) > 0:
|
| 88 |
+
hf_logger.info(f"mode unexpected keys:{u}")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class TrainerSelftokEnc(TrainerBase):
|
| 92 |
+
def __init__(self, cfg):
|
| 93 |
+
super().__init__()
|
| 94 |
+
self.cfg = cfg
|
| 95 |
+
self.set_attribute()
|
| 96 |
+
# dataloaders
|
| 97 |
+
rank = dist.get_rank() #0 # machine index
|
| 98 |
+
world_size = dist.get_world_size() #1 # num nodes
|
| 99 |
+
print(f"Rank {rank} with world size {world_size} initiating dataset...")
|
| 100 |
+
self.data_loader = build_t2i_trainloader(cfg.data, rank, world_size)
|
| 101 |
+
self._data_loader_iter = iter(self.data_loader)
|
| 102 |
+
hf_logger.info("dataloader length:", len(self.data_loader))
|
| 103 |
+
self.cfg.dataloader_len = len(self.data_loader)
|
| 104 |
+
|
| 105 |
+
# models
|
| 106 |
+
self.set_vae() # load vae and set to eval
|
| 107 |
+
self.low_res_list = None
|
| 108 |
+
self.decoder_use_low_res_rec = cfg.tokenizer.params.decoder_config.get('low_res', False)
|
| 109 |
+
if hasattr(cfg.model, 'fix_decoder'):
|
| 110 |
+
cfg.tokenizer.params.train_encoder_only = cfg.model.fix_decoder
|
| 111 |
+
self.model = ImageTokenizer(**cfg.tokenizer.params)
|
| 112 |
+
self.model.set_train() # set encoder and decoder to train
|
| 113 |
+
|
| 114 |
+
state_dict = None
|
| 115 |
+
#self.ema = self.set_ema_model(self.model.model) # create ema model, disable grad
|
| 116 |
+
self.ema = None
|
| 117 |
+
pretrain_model_path = self.get_pretrain_model_path()
|
| 118 |
+
if pretrain_model_path is not None:
|
| 119 |
+
state_dict = torch.load(pretrain_model_path, map_location="cpu")
|
| 120 |
+
try:
|
| 121 |
+
hf_logger.info(f"Loading all...")
|
| 122 |
+
#self.ema.load_state_dict(state_dict['ema_state_dict'], strict=True)
|
| 123 |
+
self.model.load_state_dict(state_dict['state_dict'], strict=True)
|
| 124 |
+
except:
|
| 125 |
+
import time
|
| 126 |
+
time.sleep(10 * (dist.get_rank() % 8))
|
| 127 |
+
hf_logger.info(f"Loading partial state dict for rank: {dist.get_rank()}...")
|
| 128 |
+
load_state(self.model, state_dict['state_dict'])
|
| 129 |
+
else:
|
| 130 |
+
if hasattr(self.cfg.tokenizer, "pretrained_dit_path") and self.cfg.tokenizer.pretrained_dit_path:
|
| 131 |
+
teacher_path = self.cfg.tokenizer.pretrained_dit_path
|
| 132 |
+
hf_logger.info(f'mmdit init_from {teacher_path}...')
|
| 133 |
+
state_dict = torch.load(
|
| 134 |
+
teacher_path,
|
| 135 |
+
map_location='cpu'
|
| 136 |
+
)
|
| 137 |
+
load_state(self.model.model, state_dict, 'model.diffusion_model.', init_method = cfg.tokenizer.params.decoder_config.init_method)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
print_model_param_num(cfg.model, self.model)
|
| 141 |
+
print_model_param_num('encoder', self.model.encoder)
|
| 142 |
+
print_model_param_num('decoder', self.model.model)
|
| 143 |
+
self.model.cuda()
|
| 144 |
+
if hasattr(self.cfg.common, "use_fsdp") and self.cfg.common.use_fsdp:
|
| 145 |
+
print("USING FSDP from Pytorch...")
|
| 146 |
+
from torch.distributed.fsdp import (
|
| 147 |
+
FullyShardedDataParallel as FSDP,
|
| 148 |
+
MixedPrecision,
|
| 149 |
+
BackwardPrefetch,
|
| 150 |
+
ShardingStrategy,
|
| 151 |
+
FullStateDictConfig,
|
| 152 |
+
StateDictType,
|
| 153 |
+
)
|
| 154 |
+
from torch.distributed.fsdp.fully_sharded_data_parallel import (
|
| 155 |
+
CPUOffload,
|
| 156 |
+
BackwardPrefetch,
|
| 157 |
+
)
|
| 158 |
+
from torch.distributed.fsdp.wrap import (
|
| 159 |
+
size_based_auto_wrap_policy,
|
| 160 |
+
transformer_auto_wrap_policy,
|
| 161 |
+
enable_wrap,
|
| 162 |
+
wrap,
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
from mimogpt.models.selftok.sd3.mmdit import JointBlock
|
| 166 |
+
print("Using size based policy")
|
| 167 |
+
my_auto_wrap_policy = functools.partial(
|
| 168 |
+
transformer_auto_wrap_policy, transformer_layer_cls={JointBlock}, )
|
| 169 |
+
|
| 170 |
+
bf16 = MixedPrecision(
|
| 171 |
+
## param precision
|
| 172 |
+
param_dtype=torch.float32,
|
| 173 |
+
# Gradient communication precision.
|
| 174 |
+
reduce_dtype=torch.bfloat16,
|
| 175 |
+
# Buffer precision.
|
| 176 |
+
buffer_dtype=torch.bfloat16,
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
fp32 = MixedPrecision(
|
| 180 |
+
## param precision
|
| 181 |
+
param_dtype=torch.float32,
|
| 182 |
+
# Gradient communication precision.
|
| 183 |
+
reduce_dtype=torch.float32,
|
| 184 |
+
# Buffer precision.
|
| 185 |
+
buffer_dtype=torch.float32,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
self.model = FSDP(self.model,
|
| 189 |
+
auto_wrap_policy=my_auto_wrap_policy,
|
| 190 |
+
mixed_precision=fp32,
|
| 191 |
+
device_id=torch.cuda.current_device(),
|
| 192 |
+
# sharding_strategy=ShardingStrategy.FULL_SHARD,
|
| 193 |
+
sharding_strategy=ShardingStrategy._HYBRID_SHARD_ZERO2,
|
| 194 |
+
forward_prefetch=True,
|
| 195 |
+
#sharding_strategy=ShardingStrategy.SHARD_GRAD_OP,
|
| 196 |
+
backward_prefetch=BackwardPrefetch.BACKWARD_PRE,
|
| 197 |
+
use_orig_params=True,
|
| 198 |
+
limit_all_gathers=True)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
self.optimizer = build_selftok_optimizer(self.model, self.cfg)
|
| 202 |
+
if state_dict is not None and (not self.resume_exclude_opt) and 'opt' in state_dict:
|
| 203 |
+
if hasattr(self.cfg.common, "use_fsdp") and self.cfg.common.use_fsdp:
|
| 204 |
+
from torch.distributed.fsdp import FullStateDictConfig, FullOptimStateDictConfig, StateDictType
|
| 205 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 206 |
+
|
| 207 |
+
save_policy = FullStateDictConfig(rank0_only=False)
|
| 208 |
+
save_opt_policy = FullOptimStateDictConfig(rank0_only=False)
|
| 209 |
+
FSDP.set_state_dict_type(self.model, StateDictType.FULL_STATE_DICT, save_policy, save_opt_policy)
|
| 210 |
+
opt_state = FSDP.optim_state_dict_to_load(
|
| 211 |
+
self.model, self.optimizer,state_dict['opt']
|
| 212 |
+
)
|
| 213 |
+
self.optimizer.load_state_dict(opt_state)
|
| 214 |
+
else:
|
| 215 |
+
self.optimizer.load_state_dict(state_dict['opt'])
|
| 216 |
+
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
|
| 217 |
+
self._scaler = ShardedGradScaler(enabled=self.use_fp16)
|
| 218 |
+
dist.barrier()
|
| 219 |
+
else:
|
| 220 |
+
# setup DDP
|
| 221 |
+
self.model.cuda()
|
| 222 |
+
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[cfg.rank % 8], find_unused_parameters=True)
|
| 223 |
+
dist.barrier()
|
| 224 |
+
if dist.get_rank() % 8==1:
|
| 225 |
+
for n,p in self.model.module.named_parameters():
|
| 226 |
+
print(n, p.requires_grad)
|
| 227 |
+
|
| 228 |
+
# create optimizer and scaler
|
| 229 |
+
self.optimizer = build_selftok_optimizer(self.model, self.cfg)
|
| 230 |
+
if state_dict is not None and (not self.resume_exclude_opt) and 'opt' in state_dict:
|
| 231 |
+
self.optimizer.load_state_dict(state_dict['opt'])
|
| 232 |
+
if self.use_zero:
|
| 233 |
+
from fairscale.optim.grad_scaler import ShardedGradScaler
|
| 234 |
+
from fairscale.nn.data_parallel import ShardedDataParallel
|
| 235 |
+
self.model = ShardedDataParallel(
|
| 236 |
+
self.model.module,
|
| 237 |
+
self.optimizer,
|
| 238 |
+
reduce_buffer_size=2000000,
|
| 239 |
+
reduce_fp16=self.use_fp16,
|
| 240 |
+
)
|
| 241 |
+
self._scaler = ShardedGradScaler(enabled=self.use_fp16)
|
| 242 |
+
else:
|
| 243 |
+
if DEVICE_TYPE == "ascend":
|
| 244 |
+
dynamic = cfg.common.use_dynamic if hasattr(cfg.common, "use_dynamic") else True
|
| 245 |
+
self._scaler = GradScaler(enabled=self.use_fp16, dynamic=dynamic)
|
| 246 |
+
else:
|
| 247 |
+
self._scaler = GradScaler(enabled=self.use_fp16)
|
| 248 |
+
|
| 249 |
+
# logging
|
| 250 |
+
self.meters = EasyDict()
|
| 251 |
+
for key in log_dict_keys:
|
| 252 |
+
self.meters[key] = AverageMeter(self.cfg.common.log_interval, fstr="%.5f")
|
| 253 |
+
self.meters["lpips"] = AverageMeter(1, fstr="%.5f") # only log the most recent lpips
|
| 254 |
+
self.meters["psnr"] = AverageMeter(1, fstr="%.5f") # only log the most recent psnr
|
| 255 |
+
self.flops1 = None
|
| 256 |
+
self.flops2 = None
|
| 257 |
+
|
| 258 |
+
def get_pretrain_model_path(self):
|
| 259 |
+
try:
|
| 260 |
+
ckpts = mox.file.list_directory(self.cfg.model.recovery_path, recursive=False)
|
| 261 |
+
max_iter = -1
|
| 262 |
+
for ckpt in ckpts:
|
| 263 |
+
matched = re.search(r"iter_(\d+).pth", ckpt)
|
| 264 |
+
if matched:
|
| 265 |
+
current_iter = int(matched.group(1))
|
| 266 |
+
if current_iter > max_iter:
|
| 267 |
+
max_iter = current_iter
|
| 268 |
+
max_iter_ckpt = ckpt
|
| 269 |
+
hf_logger.info(f"Downloading resume model from {self.cfg.model.recovery_path}, {max_iter_ckpt}...")
|
| 270 |
+
mox.file.copy(os.path.join(self.cfg.model.recovery_path, max_iter_ckpt), dst_url='/cache/model/pretrained.pth')
|
| 271 |
+
pretrain_model_path = '/cache/model/pretrained.pth'
|
| 272 |
+
except:
|
| 273 |
+
if hasattr(self.cfg.model, "pretrain_model") and self.cfg.model.pretrain_model:
|
| 274 |
+
if 's3://' in self.cfg.model.pretrain_model:
|
| 275 |
+
hf_logger.info(f"Downloading pretrained model from {self.cfg.model.pretrain_model}...")
|
| 276 |
+
mox.file.copy_parallel(self.cfg.model.pretrain_model, '/cache/model/pretrained.pth')
|
| 277 |
+
pretrain_model_path = '/cache/model/pretrained.pth'
|
| 278 |
+
else:
|
| 279 |
+
pretrain_model_path = self.cfg.model.pretrain_model
|
| 280 |
+
else:
|
| 281 |
+
# no pretrain model provided
|
| 282 |
+
pretrain_model_path = None
|
| 283 |
+
return pretrain_model_path
|
| 284 |
+
|
| 285 |
+
def set_attribute(self):
|
| 286 |
+
self.start_time = ttime()
|
| 287 |
+
self.vae_path = self.cfg.common.vae_path
|
| 288 |
+
self.resume_exclude_opt = self.cfg.common.resume_exclude_opt
|
| 289 |
+
self.pre_encode = self.cfg.common.pre_encode
|
| 290 |
+
self.log_every = self.cfg.common.log_interval
|
| 291 |
+
self.full_tokens = self.cfg.model.full_tokens \
|
| 292 |
+
if hasattr(self.cfg.model, "full_tokens") else False
|
| 293 |
+
if not hasattr(self.cfg.model, "fix_encoder"):
|
| 294 |
+
self.cfg.model.fix_encoder = False
|
| 295 |
+
if not hasattr(self.cfg.model, "fix_decoder"):
|
| 296 |
+
self.cfg.model.fix_decoder = False
|
| 297 |
+
self.dist = EasyDict()
|
| 298 |
+
self.dist.rank = dist.get_rank()
|
| 299 |
+
self.dist.world_size = dist.get_world_size()
|
| 300 |
+
self.dist_rank = dist.get_rank()
|
| 301 |
+
self.dist_world_size = dist.get_world_size()
|
| 302 |
+
self.use_deepspeed = self.cfg.common.use_deepspeed \
|
| 303 |
+
if hasattr(self.cfg.common, "use_deepspeed") else False
|
| 304 |
+
self.use_fp16 = self.cfg.common.use_fp16
|
| 305 |
+
self.use_bf16 = self.cfg.common.use_bf16
|
| 306 |
+
self.use_zero = self.cfg.common.use_zero
|
| 307 |
+
|
| 308 |
+
def set_vae(self):
|
| 309 |
+
if self.cfg.tokenizer.params.model == 'MMDiT_XL':
|
| 310 |
+
self.vae = SDVAE(device="cpu", dtype=torch.bfloat16)
|
| 311 |
+
state_dict = torch.load(self.vae_path, map_location='cpu')
|
| 312 |
+
load_state(self.vae, state_dict, 'first_stage_model.')
|
| 313 |
+
else:
|
| 314 |
+
self.vae = AutoencoderKL.from_pretrained(self.vae_path)
|
| 315 |
+
self.vae.cuda()
|
| 316 |
+
self.vae.eval()
|
| 317 |
+
|
| 318 |
+
def set_ema_model(self, model):
|
| 319 |
+
ema = deepcopy(model).to(torch.float32) # Create an EMA of the model for use after training
|
| 320 |
+
requires_grad(ema, False)
|
| 321 |
+
update_ema(ema, model, decay=0)
|
| 322 |
+
if not self.cfg.optimize.ema_in_cpu:
|
| 323 |
+
ema = ema.cuda()
|
| 324 |
+
ema.eval()
|
| 325 |
+
return ema
|
| 326 |
+
|
| 327 |
+
def get_model(self,):
|
| 328 |
+
model = self.model.module if hasattr(self.model, "module") else self.model
|
| 329 |
+
return model
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def run_step(self):
|
| 333 |
+
torch.cuda.synchronize()
|
| 334 |
+
t0 = time.perf_counter()
|
| 335 |
+
batch = next(self._data_loader_iter) # b n c h w
|
| 336 |
+
x = batch['image'].cuda()
|
| 337 |
+
if self.low_res_list is not None:
|
| 338 |
+
x_low = batch[f'image_{self.low_res_list[0]}'].cuda()
|
| 339 |
+
if not self.use_deepspeed:
|
| 340 |
+
self.optimizer.zero_grad()
|
| 341 |
+
# get vae latent
|
| 342 |
+
with torch.no_grad():
|
| 343 |
+
latent = self.vae.encode(x)
|
| 344 |
+
latent = SD3LatentFormat().process_in(latent)
|
| 345 |
+
if self.low_res_list is not None:
|
| 346 |
+
latent_low = self.vae.encode(x_low)
|
| 347 |
+
latent_low = SD3LatentFormat().process_in(latent_low)
|
| 348 |
+
full_tokens = self.full_tokens
|
| 349 |
+
|
| 350 |
+
with autocast(dtype=torch.bfloat16, cache_enabled=False):
|
| 351 |
+
if hasattr(self, 'renderer'):
|
| 352 |
+
with torch.no_grad():
|
| 353 |
+
ids, ids_emb, render_latents = self.renderer(
|
| 354 |
+
x=latent_low,
|
| 355 |
+
recon=self.decoder_use_low_res_rec,
|
| 356 |
+
)
|
| 357 |
+
token_embeds = ids_emb
|
| 358 |
+
low_res_latent = render_latents
|
| 359 |
+
else:
|
| 360 |
+
token_embeds = None
|
| 361 |
+
low_res_latent = None
|
| 362 |
+
|
| 363 |
+
self._loss, log_dict = self.model(
|
| 364 |
+
x=latent, full_tokens=full_tokens, step=self.iter,
|
| 365 |
+
token_embeds=token_embeds, low_res_latent=low_res_latent
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
self._loss.backward()
|
| 369 |
+
self.optimizer.step()
|
| 370 |
+
|
| 371 |
+
# prepare logs
|
| 372 |
+
grads = [param.grad.abs().sum().item()
|
| 373 |
+
if param.grad is not None else 0.0
|
| 374 |
+
for param in self.get_model().encoder.parameters()
|
| 375 |
+
]
|
| 376 |
+
sum_grads = sum(grads)
|
| 377 |
+
log_dict['enc_sum_grad'] = sum_grads
|
| 378 |
+
for k, v in log_dict.items():
|
| 379 |
+
reduced_metric = torch.from_numpy(np.array(v)).float().cuda() / self.dist.world_size
|
| 380 |
+
if isinstance(v,list):
|
| 381 |
+
self.meters[k].reduce_update_list(reduced_metric)
|
| 382 |
+
else:
|
| 383 |
+
self.meters[k].reduce_update(reduced_metric)
|
| 384 |
+
self.batch_mse = log_dict['dm_mse']
|
| 385 |
+
self.n_active = log_dict['n_active']
|
| 386 |
+
|
| 387 |
+
torch.cuda.synchronize()
|
| 388 |
+
t1 = time.perf_counter()
|
| 389 |
+
if dist.get_rank() == 0:
|
| 390 |
+
import datetime
|
| 391 |
+
current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 392 |
+
number_nodes = 40
|
| 393 |
+
number_npus_per_node = 8
|
| 394 |
+
iteration = self.iter
|
| 395 |
+
total_iterations = 120000
|
| 396 |
+
consumed_images = self.iter * x.shape[0] * number_npus_per_node * number_nodes
|
| 397 |
+
elapsed_time_per_iteration = (t1-t0)*1000 # 毫秒
|
| 398 |
+
throughput_per_gpu = 89.8 # TFLOP/s/GPU (computing without renderer)
|
| 399 |
+
learning_rate = 1E-04
|
| 400 |
+
global_batch_size = x.shape[0] * number_npus_per_node * number_nodes
|
| 401 |
+
lm_loss = self.batch_mse
|
| 402 |
+
loss_scale = 1.0
|
| 403 |
+
grad_norm = sum_grads
|
| 404 |
+
skipped_iterations = 0
|
| 405 |
+
nan_iterations = 0
|
| 406 |
+
baseline_time = 1300 # 毫秒
|
| 407 |
+
|
| 408 |
+
print(f"[{current_time}] iteration: {iteration}/{total_iterations} | "
|
| 409 |
+
f"consumed images: {consumed_images} | "
|
| 410 |
+
f"elapsed time per iteration (ms): {elapsed_time_per_iteration:.1f} | "
|
| 411 |
+
f"throughput per GPU (TFLOP/s/GPU): {throughput_per_gpu:.1f} | "
|
| 412 |
+
f"learning rate: {learning_rate:.7E} | "
|
| 413 |
+
f"global batch size: {global_batch_size} | "
|
| 414 |
+
f"lm loss: {lm_loss:.6E} | "
|
| 415 |
+
f"loss scale: {loss_scale:.1f} | "
|
| 416 |
+
f"grad norm: {grad_norm:.3f} | "
|
| 417 |
+
f"number of skipped iterations: {skipped_iterations} | "
|
| 418 |
+
f"number of nan iterations: {nan_iterations} | "
|
| 419 |
+
f"baseline(ms): {baseline_time:.1f} |")
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__init__.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
from .context_utils import MemartsCopyContext
|
| 4 |
+
from .checkpoint import (
|
| 5 |
+
set_seed,
|
| 6 |
+
get_state_dict,
|
| 7 |
+
get_state_dict_resume,
|
| 8 |
+
print_model_params,
|
| 9 |
+
print_model_param_num,
|
| 10 |
+
SaveModel,
|
| 11 |
+
LoadStateDict,
|
| 12 |
+
LoadStateDict_resume,
|
| 13 |
+
SavePipeline,
|
| 14 |
+
SaveModelandDisc,
|
| 15 |
+
)
|
| 16 |
+
from .selftok_hook import build_selftok_optimizer, SelfTokHook, SelfTokSaveHook
|
| 17 |
+
from .selftok_validation import EvalSelftokHook
|
| 18 |
+
from .optimizer import clip_gradient, build_optimizer, setup_deepspeed
|
| 19 |
+
from .parameter import parse_args, parse_args_from_yaml
|
| 20 |
+
from .cloud_copy import mox_copy, vid_dataset_copy, img_dataset_copy, common_cloud_copy, universal_cloud_copy
|
| 21 |
+
from .train_loop import HookBase, TrainerBase
|
| 22 |
+
from .record import (
|
| 23 |
+
TimerAndLogger,
|
| 24 |
+
SimpleLogger,
|
| 25 |
+
UniversalMeterLogger,
|
| 26 |
+
TensorboardLogger,
|
| 27 |
+
TensorboardPriorLogger,
|
| 28 |
+
VideoMeterLogger,
|
| 29 |
+
)
|
| 30 |
+
from .scheduler import LRScheduler
|
| 31 |
+
from .profile import TorchProfileHook
|
| 32 |
+
from .profile_npu import NPUTorchProfileHook
|
| 33 |
+
from .ema import EMAhook
|
| 34 |
+
|
| 35 |
+
__all__ = [
|
| 36 |
+
"EvalSelftokHook",
|
| 37 |
+
"build_selftok_optimizer",
|
| 38 |
+
"SelfTokHook",
|
| 39 |
+
"SelfTokSaveHook",
|
| 40 |
+
"clip_gradient",
|
| 41 |
+
"build_optimizer",
|
| 42 |
+
"setup_deepspeed",
|
| 43 |
+
"set_seed",
|
| 44 |
+
"get_state_dict",
|
| 45 |
+
"get_state_dict_resume",
|
| 46 |
+
"print_model_params",
|
| 47 |
+
"print_model_param_num",
|
| 48 |
+
"parse_args",
|
| 49 |
+
"parse_args_from_yaml",
|
| 50 |
+
"mox_copy",
|
| 51 |
+
"vid_dataset_copy",
|
| 52 |
+
"img_dataset_copy",
|
| 53 |
+
"common_cloud_copy",
|
| 54 |
+
"universal_cloud_copy",
|
| 55 |
+
"HookBase",
|
| 56 |
+
"TrainerBase",
|
| 57 |
+
"SimpleLogger",
|
| 58 |
+
"TimerAndLogger",
|
| 59 |
+
"UniversalMeterLogger",
|
| 60 |
+
"VideoMeterLogger",
|
| 61 |
+
"TensorboardLogger",
|
| 62 |
+
"TensorboardPriorLogger",
|
| 63 |
+
"LRScheduler",
|
| 64 |
+
"SaveModel",
|
| 65 |
+
"SavePipeline",
|
| 66 |
+
"LoadStateDict",
|
| 67 |
+
"LoadStateDict_resume",
|
| 68 |
+
"TorchProfileHook",
|
| 69 |
+
"EMAhook",
|
| 70 |
+
"NPUTorchProfileHook",
|
| 71 |
+
"SaveModelandDisc",
|
| 72 |
+
]
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/__init__.cpython-39.pyc
ADDED
|
Binary file (1.68 kB). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/checkpoint.cpython-39.pyc
ADDED
|
Binary file (21 kB). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/cloud_copy.cpython-39.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/context_utils.cpython-39.pyc
ADDED
|
Binary file (1.86 kB). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/selftok_hook.cpython-39.pyc
ADDED
|
Binary file (9.35 kB). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/__pycache__/train_loop.cpython-39.pyc
ADDED
|
Binary file (4.23 kB). View file
|
|
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/checkpoint.py
ADDED
|
@@ -0,0 +1,836 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
import copy
|
| 6 |
+
import yaml
|
| 7 |
+
import json
|
| 8 |
+
import shutil
|
| 9 |
+
import torch
|
| 10 |
+
import random
|
| 11 |
+
import numpy as np
|
| 12 |
+
from fairscale.optim import OSS
|
| 13 |
+
|
| 14 |
+
from .cloud_copy import mox_copy
|
| 15 |
+
from .train_loop import HookBase
|
| 16 |
+
from mimogpt.utils import hf_logger
|
| 17 |
+
|
| 18 |
+
import torch.distributed as dist
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def set_seed(seed):
|
| 22 |
+
random.seed(seed)
|
| 23 |
+
np.random.seed(seed)
|
| 24 |
+
torch.manual_seed(seed)
|
| 25 |
+
torch.cuda.manual_seed(seed)
|
| 26 |
+
torch.cuda.manual_seed_all(seed)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_state_dict(pretrained_path):
|
| 30 |
+
if "ViT-B-32.pt" not in pretrained_path and "RN50.pt" not in pretrained_path:
|
| 31 |
+
# for HW pretrained model
|
| 32 |
+
state_dict = torch.load(pretrained_path, map_location=torch.device("cpu"))
|
| 33 |
+
if "state_dict" in state_dict.keys():
|
| 34 |
+
state_dict = state_dict["state_dict"]
|
| 35 |
+
if "module" in state_dict.keys():
|
| 36 |
+
state_dict = state_dict["module"]
|
| 37 |
+
return state_dict
|
| 38 |
+
else:
|
| 39 |
+
# for openai official pretrained model
|
| 40 |
+
model = torch.jit.load(pretrained_path, map_location="cpu")
|
| 41 |
+
state_dict = model.state_dict()
|
| 42 |
+
for key in ["input_resolution", "context_length", "vocab_size"]:
|
| 43 |
+
del state_dict[key]
|
| 44 |
+
return state_dict
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_state_dict_resume(pretrained_path):
|
| 48 |
+
state_dict = torch.load(pretrained_path, map_location=torch.device("cpu"))
|
| 49 |
+
if len(state_dict) < 10:
|
| 50 |
+
print(state_dict.keys())
|
| 51 |
+
return state_dict
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def print_model_params(model):
|
| 55 |
+
print("\n--------------- trainable params ---------------")
|
| 56 |
+
for name, param in model.named_parameters():
|
| 57 |
+
if param.requires_grad:
|
| 58 |
+
print(name)
|
| 59 |
+
print("--------------- trainable params ---------------\n")
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def print_model_param_num(model_info, model):
|
| 63 |
+
params_total = sum(p.numel() for p in model.parameters())
|
| 64 |
+
params_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 65 |
+
if torch.distributed.is_initialized():
|
| 66 |
+
if torch.distributed.get_rank() == 0:
|
| 67 |
+
print(
|
| 68 |
+
"\nmodel_info:\n{}\ntotal_params: {}\ntrainable_params: {}\n".format(
|
| 69 |
+
model_info, params_total / 1024 / 1024, params_trainable / 1024 / 1024
|
| 70 |
+
)
|
| 71 |
+
)
|
| 72 |
+
else:
|
| 73 |
+
print(
|
| 74 |
+
"\nmodel_info:\n{}\ntotal_params: {}\ntrainable_params: {}\n".format(
|
| 75 |
+
model_info, params_total, params_trainable
|
| 76 |
+
)
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def calc_total_flops(model_info, model):
|
| 81 |
+
from thop import profile
|
| 82 |
+
|
| 83 |
+
input_v = torch.randn(1, 8, 3, 224, 224).cuda() # img:[1, 1, 3, 224, 224] / video:[1, 8, 3, 224, 224]
|
| 84 |
+
input_t = torch.ones(1, 80).long().cuda() # [1, 80]
|
| 85 |
+
flops, params = profile(model, inputs=(input_v, input_t))
|
| 86 |
+
print("model: {}\nflops: {:.2f} Gflops\ntotal_params: {:.2f} M".format(model_info, flops / 1.0e9, params / 1.0e6))
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
class SaveModel(HookBase):
|
| 90 |
+
def __init__(self, cfg, is_root):
|
| 91 |
+
self.output_path = cfg.common.output_path
|
| 92 |
+
self.is_root = is_root
|
| 93 |
+
self.save_interval = int(cfg.common.save_per_epochs * getattr(cfg, 'dataloader_len', 2000))
|
| 94 |
+
|
| 95 |
+
# only for save config before train
|
| 96 |
+
self._cfg = cfg
|
| 97 |
+
self.delete_after_upload = cfg.common.get("delete_after_upload", False)
|
| 98 |
+
self.only_save_lora = cfg.common.get("only_save_lora", False)
|
| 99 |
+
|
| 100 |
+
@property
|
| 101 |
+
def save_path(self):
|
| 102 |
+
if self.__dict__.get("_output_path", None) is None:
|
| 103 |
+
output_path = os.path.join(self.output_path, "ckpt")
|
| 104 |
+
os.makedirs(output_path, exist_ok=True)
|
| 105 |
+
self.__dict__["_output_path"] = output_path
|
| 106 |
+
return self.__dict__["_output_path"]
|
| 107 |
+
|
| 108 |
+
def before_train(self):
|
| 109 |
+
self.save_config()
|
| 110 |
+
self.save_code()
|
| 111 |
+
|
| 112 |
+
def after_step(self):
|
| 113 |
+
if self.trainer.iter % self.save_interval == (self.save_interval - 1):
|
| 114 |
+
if self.only_save_lora:
|
| 115 |
+
save_name = "lora_iter_%d.pth" % (self.trainer.iter)
|
| 116 |
+
else:
|
| 117 |
+
save_name = "iter_%d.pth" % (self.trainer.iter)
|
| 118 |
+
# tmp_cfg = copy.deepcopy(self._cfg.__dict__)
|
| 119 |
+
#
|
| 120 |
+
# self.save_model({
|
| 121 |
+
# 'iter': self.trainer.iter,
|
| 122 |
+
# 'state_dict': self.trainer.model.module.state_dict(),
|
| 123 |
+
# 'opt': self.trainer.optimizer.state_dict(),
|
| 124 |
+
# 'cfg': tmp_cfg
|
| 125 |
+
# }, save_name)
|
| 126 |
+
if hasattr(self._cfg.common, "use_fsdp") and self._cfg.common.use_fsdp:
|
| 127 |
+
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
|
| 128 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 129 |
+
|
| 130 |
+
save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
| 131 |
+
|
| 132 |
+
with FSDP.state_dict_type(self.trainer.model, StateDictType.FULL_STATE_DICT, save_policy):
|
| 133 |
+
cpu_state = self.trainer.model.state_dict()
|
| 134 |
+
self.save_model(cpu_state, save_name)
|
| 135 |
+
elif hasattr(self._cfg.common, "use_deepspeed") and self._cfg.common.use_deepspeed:
|
| 136 |
+
self.save_model_deepspeed(save_name)
|
| 137 |
+
else:
|
| 138 |
+
if hasattr(self.trainer.model, "module"):
|
| 139 |
+
self.save_model(self.trainer.model.module.state_dict(), save_name)
|
| 140 |
+
else:
|
| 141 |
+
self.save_model(self.trainer.model.state_dict(), save_name)
|
| 142 |
+
torch.cuda.empty_cache()
|
| 143 |
+
|
| 144 |
+
def save_config(self):
|
| 145 |
+
if self.is_root:
|
| 146 |
+
hf_logger.save_args(self._cfg)
|
| 147 |
+
run_save_path = os.path.join(self.output_path, "run.yml")
|
| 148 |
+
if not os.path.isfile(run_save_path):
|
| 149 |
+
try:
|
| 150 |
+
with open(run_save_path, "w") as args_fh:
|
| 151 |
+
yaml.dump(self._cfg.__dict__, args_fh, sort_keys=False)
|
| 152 |
+
hf_logger.info("Run configs dump to %s" % run_save_path)
|
| 153 |
+
except:
|
| 154 |
+
hf_logger.info("fail to dump run config!!")
|
| 155 |
+
|
| 156 |
+
def save_code(self):
|
| 157 |
+
if self.is_root:
|
| 158 |
+
try:
|
| 159 |
+
import moxing as mox
|
| 160 |
+
|
| 161 |
+
local_code_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../../.."))
|
| 162 |
+
roma_code_path = os.path.join(self._cfg.train_url, os.path.split(local_code_path)[-1])
|
| 163 |
+
mox_copy(local_code_path, roma_code_path, parallel=True)
|
| 164 |
+
hf_logger.info("backup code success, roma_code_path:{}".format(roma_code_path))
|
| 165 |
+
except:
|
| 166 |
+
pass
|
| 167 |
+
|
| 168 |
+
def save_model(self, checkpoint, save_name):
|
| 169 |
+
if self.is_root:
|
| 170 |
+
local_weights = os.path.join(self.save_path, save_name)
|
| 171 |
+
for k, v in checkpoint.items():
|
| 172 |
+
checkpoint[k] = checkpoint[k].cpu()
|
| 173 |
+
torch.save(checkpoint, local_weights)
|
| 174 |
+
self.upload_and_delete_local_model(local_weights)
|
| 175 |
+
|
| 176 |
+
def save_model_deepspeed(self, save_name, ema_state=None):
|
| 177 |
+
# deepspeed save pths
|
| 178 |
+
self.trainer.model.cpu().save_checkpoint(self.save_path, f"deepspeed_save_dir_{save_name[:-4]}")
|
| 179 |
+
self.trainer.model.cuda()
|
| 180 |
+
pth_src_path = os.path.join(self.save_path, f"deepspeed_save_dir_{save_name[:-4]}")
|
| 181 |
+
if ema_state is not None:
|
| 182 |
+
if self.is_root:
|
| 183 |
+
local_weights = os.path.join(pth_src_path, "ema.pt")
|
| 184 |
+
torch.save(ema_state, local_weights)
|
| 185 |
+
if hasattr(self._cfg.common, "deepspeed_merge_save") and self._cfg.common.deepspeed_merge_save:
|
| 186 |
+
# create nas path to merge deepspeed pths
|
| 187 |
+
MA_NFS_MOUNT_VOLUMES = json.loads(os.getenv("MA_NFS_MOUNT_VOLUMES"))
|
| 188 |
+
nas_dst_path = MA_NFS_MOUNT_VOLUMES[0]["local_path"]
|
| 189 |
+
nas_dst_path = os.path.join(nas_dst_path, "ckpt_tmp_{}".format(os.getenv("MASTER_ADDR")))
|
| 190 |
+
if dist.get_rank() == 0 and not os.path.exists(nas_dst_path):
|
| 191 |
+
os.mkdir(nas_dst_path)
|
| 192 |
+
os.mkdir(os.path.join(nas_dst_path, f"deepspeed_save_dir_{save_name[:-4]}"))
|
| 193 |
+
dist.barrier()
|
| 194 |
+
# copy local pths to nas
|
| 195 |
+
if dist.get_rank() % 8 == 0:
|
| 196 |
+
os.system("cp {}/* {}/deepspeed_save_dir_{}".format(pth_src_path, nas_dst_path, save_name[:-4]))
|
| 197 |
+
dist.barrier()
|
| 198 |
+
# merge pths to one on nas
|
| 199 |
+
if dist.get_rank() == 0:
|
| 200 |
+
os.system("cp {} {}".format(os.path.join(self.save_path, "latest"), nas_dst_path))
|
| 201 |
+
merged_pth_path = os.path.join(self.save_path, save_name)
|
| 202 |
+
try:
|
| 203 |
+
os.system("python {}/zero_to_fp32.py {} {}".format(self.save_path, nas_dst_path, merged_pth_path))
|
| 204 |
+
self.upload_and_delete_local_model(merged_pth_path)
|
| 205 |
+
except Exception as e:
|
| 206 |
+
print("save and upload deepspeed checkpoint {} failed, error: ".format(merged_pth_path, e))
|
| 207 |
+
shutil.rmtree(nas_dst_path)
|
| 208 |
+
dist.barrier()
|
| 209 |
+
# delete local tmp pths
|
| 210 |
+
if dist.get_rank() % 8 == 0:
|
| 211 |
+
shutil.rmtree(pth_src_path)
|
| 212 |
+
else:
|
| 213 |
+
self.upload_and_delete_local_model(pth_src_path, rank0_only=False, parallel=True)
|
| 214 |
+
dist.barrier()
|
| 215 |
+
|
| 216 |
+
def upload_and_delete_local_model(self, local_weights, rank0_only=True, parallel=False):
|
| 217 |
+
device_rank = dist.get_rank()
|
| 218 |
+
allow_current_device_operate_files = (device_rank % 8 == 0 and not rank0_only) or (
|
| 219 |
+
device_rank == 0 and rank0_only
|
| 220 |
+
)
|
| 221 |
+
try:
|
| 222 |
+
import moxing as mox
|
| 223 |
+
|
| 224 |
+
roma_weights_fp = os.path.join(self._cfg.train_url, local_weights)
|
| 225 |
+
roma_weights_dirname = os.path.dirname(roma_weights_fp)
|
| 226 |
+
if allow_current_device_operate_files:
|
| 227 |
+
if not mox.file.exists(roma_weights_dirname):
|
| 228 |
+
mox.file.make_dirs(roma_weights_dirname)
|
| 229 |
+
mox_copy(local_weights, roma_weights_fp, parallel)
|
| 230 |
+
hf_logger.info("save weight success, roma_weights_fp:{}".format(roma_weights_fp))
|
| 231 |
+
except:
|
| 232 |
+
hf_logger.info("save weight success, local_weights_fp:{}".format(local_weights))
|
| 233 |
+
|
| 234 |
+
if self.delete_after_upload and allow_current_device_operate_files:
|
| 235 |
+
if os.path.isdir(local_weights):
|
| 236 |
+
shutil.rmtree(local_weights)
|
| 237 |
+
else:
|
| 238 |
+
os.remove(local_weights)
|
| 239 |
+
hf_logger.info(f"{local_weights} removed")
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
class SavePipeline(SaveModel):
|
| 243 |
+
def save_model(self, checkpoint, save_name):
|
| 244 |
+
# if hasattr(self.trainer.optimizer, "consolidate_state_dict"):
|
| 245 |
+
# self.trainer.optimizer.consolidate_state_dict(recipient_rank=0)
|
| 246 |
+
if dist.get_rank() == 0:
|
| 247 |
+
if not os.path.exists("/cache/pipe"):
|
| 248 |
+
vae = self.trainer.first_stage_model
|
| 249 |
+
tokenizer = self.trainer.tokenizer
|
| 250 |
+
text_encoder = self.trainer.cond_stage_model
|
| 251 |
+
unet = self.trainer.model.module if hasattr(self.trainer.model, "module") else self.trainer.model
|
| 252 |
+
scheduler = self.trainer.noise_scheduler
|
| 253 |
+
from diffusers import TextToVideoSDPipeline
|
| 254 |
+
|
| 255 |
+
t2v_pipe = TextToVideoSDPipeline(
|
| 256 |
+
vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler
|
| 257 |
+
)
|
| 258 |
+
t2v_pipe.save_pretrained("/cache/pipe/")
|
| 259 |
+
import moxing as mox
|
| 260 |
+
|
| 261 |
+
roma_weights_dirname = os.path.join(self._cfg.train_url)
|
| 262 |
+
if not mox.file.exists(roma_weights_dirname):
|
| 263 |
+
mox.file.make_dirs(roma_weights_dirname)
|
| 264 |
+
mox_copy("/cache/pipe/", roma_weights_dirname, parallel=True)
|
| 265 |
+
hf_logger.info("save pipe success, roma_weights_fp:{}".format(roma_weights_dirname))
|
| 266 |
+
else:
|
| 267 |
+
unet = self.trainer.model.module if hasattr(self.trainer.model, "module") else self.trainer.model
|
| 268 |
+
unet.save_pretrained("/cache/pipe/unet", variant="iter" + str(self.trainer.iter))
|
| 269 |
+
# unet.save_pretrained('/cache/pipe/unet')
|
| 270 |
+
# ckpt_name = f'diffusion_pytorch_model.iter{self.trainer.iter}.bin'
|
| 271 |
+
# roma_weights_fp = os.path.join(self._cfg.train_url, 'unet', ckpt_name)
|
| 272 |
+
# mox_copy(f'/cache/pipe/unet/{ckpt_name}', roma_weights_fp)
|
| 273 |
+
# hf_logger.info("save pipe success, roma_weights_fp:{}".format(roma_weights_fp))
|
| 274 |
+
optimizer = None if isinstance(self.trainer.optimizer, OSS) else self.trainer.optimizer.state_dict()
|
| 275 |
+
state = {
|
| 276 |
+
"iter": self.trainer.iter,
|
| 277 |
+
"opt": optimizer,
|
| 278 |
+
"cfg": copy.deepcopy(self._cfg.__dict__),
|
| 279 |
+
}
|
| 280 |
+
torch.save(state, "/cache/pipe/unet/state.pth")
|
| 281 |
+
roma_weights_dirname = os.path.join(self._cfg.train_url, "unet")
|
| 282 |
+
mox_copy("/cache/pipe/unet", roma_weights_dirname, parallel=True)
|
| 283 |
+
|
| 284 |
+
dist.barrier()
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class LoadStateDict(HookBase):
|
| 288 |
+
def __init__(self, pretrained_path, task, cfg):
|
| 289 |
+
self.task = task
|
| 290 |
+
self.pretrained_path = pretrained_path
|
| 291 |
+
self.use_deepspeed = cfg.common.use_deepspeed
|
| 292 |
+
self.cfg = cfg
|
| 293 |
+
|
| 294 |
+
def before_train(self):
|
| 295 |
+
if self.use_deepspeed:
|
| 296 |
+
return
|
| 297 |
+
if self.task == "vlip":
|
| 298 |
+
self.load_vlip()
|
| 299 |
+
elif self.task == "mimo":
|
| 300 |
+
self.load_mimo()
|
| 301 |
+
elif self.task == "mimo_gpt":
|
| 302 |
+
self.load_mimo_gpt()
|
| 303 |
+
elif self.task in ("mimo_interleaved", "mimo_interleaved_enc"):
|
| 304 |
+
self.load_mimo_interleaved()
|
| 305 |
+
elif self.task == "video_ldm":
|
| 306 |
+
self.load_video_ldm()
|
| 307 |
+
elif self.task == "mmdit":
|
| 308 |
+
self.load_mmdit()
|
| 309 |
+
else:
|
| 310 |
+
self.load_mimo()
|
| 311 |
+
|
| 312 |
+
def load_mimo_interleaved(self):
|
| 313 |
+
print("I AM IN load_mimo_interleaved!!!!")
|
| 314 |
+
if self.pretrained_path:
|
| 315 |
+
state_dict = get_state_dict(self.pretrained_path)
|
| 316 |
+
try:
|
| 317 |
+
hf_logger.info("pretrained_path is {}".format(self.pretrained_path))
|
| 318 |
+
self.trainer.model.module.load_state_dict(state_dict)
|
| 319 |
+
hf_logger.info("successful loaded state dict from {}".format(self.pretrained_path))
|
| 320 |
+
except:
|
| 321 |
+
hf_logger.info("\nload pretrained_path: strict=False, remove visual branches......\n")
|
| 322 |
+
|
| 323 |
+
model_dict = self.trainer.model.module.state_dict() # 当前网络结构
|
| 324 |
+
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict} # 预训练模型中可用的weight
|
| 325 |
+
|
| 326 |
+
# load V2
|
| 327 |
+
cur_vocab = model_dict["text_embeddings.weight"].shape[0]
|
| 328 |
+
use_df = self.cfg.tokenizer.get("use_dfvqgan", False)
|
| 329 |
+
if use_df:
|
| 330 |
+
img_vocab = 8192
|
| 331 |
+
else:
|
| 332 |
+
img_vocab = 16384
|
| 333 |
+
for key, weight in state_dict.items():
|
| 334 |
+
if key in model_dict and model_dict[key].shape != state_dict[key].shape:
|
| 335 |
+
if "text_embeddings" in key:
|
| 336 |
+
pretrained_dict[key] = torch.cat(
|
| 337 |
+
(
|
| 338 |
+
state_dict[key][:30522],
|
| 339 |
+
model_dict[key][30522:cur_vocab, :].to(state_dict[key].device),
|
| 340 |
+
),
|
| 341 |
+
dim=0,
|
| 342 |
+
)
|
| 343 |
+
hf_logger.info(
|
| 344 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 345 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 346 |
+
)
|
| 347 |
+
)
|
| 348 |
+
elif "to_logits" in key:
|
| 349 |
+
pretrained_dict[key] = torch.cat(
|
| 350 |
+
(
|
| 351 |
+
state_dict[key][:30522],
|
| 352 |
+
model_dict[key][30522:cur_vocab].to(state_dict[key].device),
|
| 353 |
+
state_dict[key][-img_vocab:],
|
| 354 |
+
),
|
| 355 |
+
dim=0,
|
| 356 |
+
)
|
| 357 |
+
hf_logger.info(
|
| 358 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 359 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 360 |
+
)
|
| 361 |
+
)
|
| 362 |
+
elif "text_pos_embeddings" in key:
|
| 363 |
+
pre_len = state_dict[key].shape[0]
|
| 364 |
+
cur_len = model_dict[key].shape[0]
|
| 365 |
+
if pre_len < cur_len:
|
| 366 |
+
pretrained_dict[key] = torch.cat(
|
| 367 |
+
(
|
| 368 |
+
state_dict[key][:pre_len],
|
| 369 |
+
model_dict[key][pre_len:cur_len].to(state_dict[key].device),
|
| 370 |
+
),
|
| 371 |
+
dim=0,
|
| 372 |
+
)
|
| 373 |
+
else:
|
| 374 |
+
pretrained_dict[key] = state_dict[key][:pre_len]
|
| 375 |
+
else:
|
| 376 |
+
pretrained_dict.pop(key)
|
| 377 |
+
hf_logger.info(
|
| 378 |
+
"[{}] popped: pretrained model {} --> new model {}".format(
|
| 379 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 380 |
+
)
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
# 输出没有加载的参数
|
| 384 |
+
for k in model_dict:
|
| 385 |
+
if k not in pretrained_dict:
|
| 386 |
+
hf_logger.info("{} not loaded".format(k))
|
| 387 |
+
|
| 388 |
+
model_dict.update(pretrained_dict)
|
| 389 |
+
self.trainer.model.module.load_state_dict(model_dict, strict=False)
|
| 390 |
+
|
| 391 |
+
def load_mimo_gpt(self):
|
| 392 |
+
if self.pretrained_path:
|
| 393 |
+
state_dict = get_state_dict(self.pretrained_path)
|
| 394 |
+
try:
|
| 395 |
+
hf_logger.info("pretrained_path is {}".format(self.pretrained_path))
|
| 396 |
+
self.trainer.model.module.load_state_dict(state_dict)
|
| 397 |
+
hf_logger.info("successful loaded state dict from {}".format(self.pretrained_path))
|
| 398 |
+
except:
|
| 399 |
+
hf_logger.info("\nload pretrained_path: strict=False, remove visual branches......\n")
|
| 400 |
+
|
| 401 |
+
model_dict = self.trainer.model.module.state_dict() # 当前网络结构
|
| 402 |
+
pretrained_dict = dict()
|
| 403 |
+
for k, v in state_dict.items():
|
| 404 |
+
if "decoder" in k and "transformer" not in k:
|
| 405 |
+
k = k.replace("decoder", "decoder.transformer")
|
| 406 |
+
if ".c_" in k:
|
| 407 |
+
v = v.t()
|
| 408 |
+
if k in model_dict:
|
| 409 |
+
pretrained_dict[k] = v
|
| 410 |
+
pretrained_keys = list(pretrained_dict.keys())
|
| 411 |
+
|
| 412 |
+
for key in pretrained_keys:
|
| 413 |
+
if model_dict[key].shape != pretrained_dict[key].shape:
|
| 414 |
+
print(
|
| 415 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 416 |
+
key, pretrained_dict[key].shape, model_dict[key].shape
|
| 417 |
+
)
|
| 418 |
+
)
|
| 419 |
+
# pretrained_dict.pop(key)
|
| 420 |
+
if len(model_dict[key].shape) == 2:
|
| 421 |
+
cur_shape0, cur_shape1 = model_dict[key].shape
|
| 422 |
+
pre_shape0, pre_shape1 = pretrained_dict[key].shape
|
| 423 |
+
if cur_shape0 >= pre_shape0 and cur_shape1 >= pre_shape1:
|
| 424 |
+
tmp = torch.cat(
|
| 425 |
+
(
|
| 426 |
+
state_dict[key],
|
| 427 |
+
model_dict[key][pre_shape0:, :pre_shape1].to(state_dict[key].device),
|
| 428 |
+
),
|
| 429 |
+
dim=0,
|
| 430 |
+
)
|
| 431 |
+
tmp = torch.cat(
|
| 432 |
+
(tmp, model_dict[key][:, pre_shape1:].to(state_dict[key].device)), dim=1
|
| 433 |
+
)
|
| 434 |
+
pretrained_dict[key] = tmp
|
| 435 |
+
else:
|
| 436 |
+
pretrained_dict.pop(key)
|
| 437 |
+
elif len(model_dict[key].shape) == 1:
|
| 438 |
+
cur_shape0 = model_dict[key].shape[0]
|
| 439 |
+
pre_shape0 = pretrained_dict[key].shape[0]
|
| 440 |
+
if cur_shape0 >= pre_shape0:
|
| 441 |
+
tmp = torch.cat(
|
| 442 |
+
(state_dict[key], model_dict[key][pre_shape0:].to(state_dict[key].device)), dim=0
|
| 443 |
+
)
|
| 444 |
+
pretrained_dict[key] = tmp
|
| 445 |
+
else:
|
| 446 |
+
pretrained_dict.pop(key)
|
| 447 |
+
|
| 448 |
+
# 输出没有加载的参数
|
| 449 |
+
for k in model_dict:
|
| 450 |
+
if k not in pretrained_dict:
|
| 451 |
+
hf_logger.info("{} not loaded".format(k))
|
| 452 |
+
|
| 453 |
+
model_dict.update(pretrained_dict)
|
| 454 |
+
self.trainer.model.module.load_state_dict(model_dict, strict=True)
|
| 455 |
+
|
| 456 |
+
def load_mimo(self):
|
| 457 |
+
if self.pretrained_path:
|
| 458 |
+
state_dict = get_state_dict(self.pretrained_path)
|
| 459 |
+
try:
|
| 460 |
+
hf_logger.info("pretrained_path is {}".format(self.pretrained_path))
|
| 461 |
+
self.trainer.model.module.load_state_dict(state_dict)
|
| 462 |
+
hf_logger.info("successful loaded state dict from {}".format(self.pretrained_path))
|
| 463 |
+
except:
|
| 464 |
+
hf_logger.info("\nload pretrained_path: strict=False, remove visual branches......\n")
|
| 465 |
+
|
| 466 |
+
model_dict = self.trainer.model.module.state_dict() # 当前网络结构
|
| 467 |
+
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict} # 预训练模型中可用的weight
|
| 468 |
+
|
| 469 |
+
# # TODO: hard code, load 3B model weights
|
| 470 |
+
# for key, _ in model_dict.items():
|
| 471 |
+
# if "transformer.layers." in key:
|
| 472 |
+
# cur_layer_id = key.split('layers')[-1].split('.')[1] # '41'
|
| 473 |
+
# # ref_layer_id = int(cur_layer_id) // 2
|
| 474 |
+
# if int(cur_layer_id) > 23:
|
| 475 |
+
# ref_layer_id = int(cur_layer_id) - 24
|
| 476 |
+
# k_ref = key.replace('transformer.layers.{}.'.format(cur_layer_id),
|
| 477 |
+
# 'transformer.layers.{}.'.format(ref_layer_id))
|
| 478 |
+
# if k_ref in state_dict and model_dict[key].shape == state_dict[k_ref].shape:
|
| 479 |
+
# pretrained_dict[key] = state_dict[k_ref]
|
| 480 |
+
# hf_logger.info('{} weight is load from {}'.format(key, k_ref))
|
| 481 |
+
|
| 482 |
+
cur_vocab = model_dict["text_embeddings.weight"].shape[0]
|
| 483 |
+
img_vocab = 16384
|
| 484 |
+
for key, weight in state_dict.items():
|
| 485 |
+
if key in model_dict and model_dict[key].shape != state_dict[key].shape:
|
| 486 |
+
if "text_embeddings" in key:
|
| 487 |
+
pretrained_dict[key] = torch.cat(
|
| 488 |
+
(
|
| 489 |
+
state_dict[key][:30522],
|
| 490 |
+
model_dict[key][30522:cur_vocab, :].to(state_dict[key].device),
|
| 491 |
+
),
|
| 492 |
+
dim=0,
|
| 493 |
+
)
|
| 494 |
+
hf_logger.info(
|
| 495 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 496 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 497 |
+
)
|
| 498 |
+
)
|
| 499 |
+
elif "to_logits" in key:
|
| 500 |
+
pretrained_dict[key] = torch.cat(
|
| 501 |
+
(
|
| 502 |
+
state_dict[key][:30522],
|
| 503 |
+
model_dict[key][30522:cur_vocab].to(state_dict[key].device),
|
| 504 |
+
state_dict[key][-img_vocab:],
|
| 505 |
+
),
|
| 506 |
+
dim=0,
|
| 507 |
+
)
|
| 508 |
+
hf_logger.info(
|
| 509 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 510 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 511 |
+
)
|
| 512 |
+
)
|
| 513 |
+
elif "text_pos_embeddings" in key:
|
| 514 |
+
pre_len = state_dict[key].shape[0]
|
| 515 |
+
cur_len = model_dict[key].shape[0]
|
| 516 |
+
if pre_len < cur_len:
|
| 517 |
+
pretrained_dict[key] = torch.cat(
|
| 518 |
+
(
|
| 519 |
+
state_dict[key][:pre_len],
|
| 520 |
+
model_dict[key][pre_len:cur_len].to(state_dict[key].device),
|
| 521 |
+
),
|
| 522 |
+
dim=0,
|
| 523 |
+
)
|
| 524 |
+
else:
|
| 525 |
+
pretrained_dict[key] = state_dict[key][:pre_len]
|
| 526 |
+
else:
|
| 527 |
+
pretrained_dict.pop(key)
|
| 528 |
+
hf_logger.info(
|
| 529 |
+
"[{}] popped: pretrained model {} --> new model {}".format(
|
| 530 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 531 |
+
)
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
# if "condition_to_decoder" in key:
|
| 535 |
+
# pretrained_dict[key] = state_dict[key][:, :512]
|
| 536 |
+
# hf_logger.info("[{}] shape change: pretrained model {} --> new model {}"
|
| 537 |
+
# .format(key, state_dict[key].shape, model_dict[key].shape))
|
| 538 |
+
# elif 'image_row_embeddings' in key or 'image_col_embeddings' in key:
|
| 539 |
+
# pretrained_dict[key] = torch.cat(
|
| 540 |
+
# (state_dict[key][:16, :],
|
| 541 |
+
# model_dict[key][16:32, :].to(state_dict[key].device)),
|
| 542 |
+
# dim=0)
|
| 543 |
+
# hf_logger.info("[{}] shape change: pretrained model {} --> new model {}"
|
| 544 |
+
# .format(key, state_dict[key].shape, model_dict[key].shape))
|
| 545 |
+
|
| 546 |
+
# # load dalle weights
|
| 547 |
+
# for key, weight in state_dict.items():
|
| 548 |
+
# # text_embeddings: 16384 + 128 -> 21128 + 128
|
| 549 |
+
# if key == 'text_embeddings.weight':
|
| 550 |
+
# if model_dict[key].shape != state_dict[key].shape:
|
| 551 |
+
# pretrained_dict.pop(key)
|
| 552 |
+
# print("[{}] shape change: pretrained model {} --> new model {}"
|
| 553 |
+
# .format(key, state_dict[key].shape, model_dict[key].shape))
|
| 554 |
+
#
|
| 555 |
+
# if key in ['to_logits.1.weight', 'to_logits.1.bias']:
|
| 556 |
+
# if model_dict[key].shape != state_dict[key].shape:
|
| 557 |
+
# total_vocab_size = model_dict[key].shape[0]
|
| 558 |
+
# pretrained_dict[key] = torch.cat(
|
| 559 |
+
# (model_dict[key][:total_vocab_size - 8192].to(state_dict[key].device),
|
| 560 |
+
# state_dict[key][-8192:]), dim=0)
|
| 561 |
+
#
|
| 562 |
+
# # load GPT2 weights
|
| 563 |
+
# for key, _ in model_dict.items():
|
| 564 |
+
# if "decoder." in key:
|
| 565 |
+
# k_ref = key.replace("decoder.", "")
|
| 566 |
+
# if k_ref in state_dict and model_dict[key].shape == state_dict[k_ref].shape:
|
| 567 |
+
# pretrained_dict[key] = state_dict[k_ref]
|
| 568 |
+
#
|
| 569 |
+
# for key, weight in state_dict.items():
|
| 570 |
+
# if key in model_dict and model_dict[key].shape != state_dict[key].shape:
|
| 571 |
+
# pretrained_dict.pop(key)
|
| 572 |
+
# hf_logger.info("[{}] shape change: pretrained model {} --> new model {}"
|
| 573 |
+
# .format(key, state_dict[key].shape, model_dict[key].shape))
|
| 574 |
+
|
| 575 |
+
# 输出没有加载的参数
|
| 576 |
+
for k in model_dict:
|
| 577 |
+
if k not in pretrained_dict:
|
| 578 |
+
hf_logger.info("{} not loaded".format(k))
|
| 579 |
+
|
| 580 |
+
model_dict.update(pretrained_dict)
|
| 581 |
+
self.trainer.model.module.load_state_dict(model_dict, strict=False)
|
| 582 |
+
|
| 583 |
+
def load_vlip(self):
|
| 584 |
+
if self.pretrained_path:
|
| 585 |
+
state_dict = get_state_dict(self.pretrained_path)
|
| 586 |
+
try:
|
| 587 |
+
hf_logger.info("pretrained_path is {}".format(self.pretrained_path))
|
| 588 |
+
self.trainer.model.module.load_state_dict(state_dict)
|
| 589 |
+
hf_logger.info("successful loaded state dict from {}".format(self.pretrained_path))
|
| 590 |
+
except:
|
| 591 |
+
hf_logger.info("\nload pretrained_path: strict=False, remove visual branches......\n")
|
| 592 |
+
state_dict["state_dict"] = state_dict
|
| 593 |
+
|
| 594 |
+
model_dict = self.trainer.model.module.state_dict() # 当前网络结构
|
| 595 |
+
pretrained_dict = {
|
| 596 |
+
k: v for k, v in state_dict["state_dict"].items() if k in model_dict
|
| 597 |
+
} # 预训练模型中可用的weight
|
| 598 |
+
|
| 599 |
+
for k, _ in model_dict.items():
|
| 600 |
+
if "frame_fusion" in k:
|
| 601 |
+
k_ref = k.replace("frame_fusion", "transformer")
|
| 602 |
+
pretrained_dict[k] = state_dict["state_dict"][k_ref]
|
| 603 |
+
|
| 604 |
+
for k, _ in model_dict.items():
|
| 605 |
+
if "textual." in k:
|
| 606 |
+
k_ref = k.replace("textual.", "")
|
| 607 |
+
pretrained_dict[k] = state_dict["state_dict"][k_ref]
|
| 608 |
+
|
| 609 |
+
# for CLIP_VIP
|
| 610 |
+
for key, weight in state_dict["state_dict"].items():
|
| 611 |
+
if "in_proj_weight" in key:
|
| 612 |
+
q_proj, k_proj, v_proj = weight.chunk(3, dim=0)
|
| 613 |
+
key_q = key.replace("in_proj_weight", "q_proj.weight")
|
| 614 |
+
key_k = key.replace("in_proj_weight", "k_proj.weight")
|
| 615 |
+
key_v = key.replace("in_proj_weight", "v_proj.weight")
|
| 616 |
+
pretrained_dict[key_q] = q_proj
|
| 617 |
+
pretrained_dict[key_k] = k_proj
|
| 618 |
+
pretrained_dict[key_v] = v_proj
|
| 619 |
+
elif "in_proj_bias" in key:
|
| 620 |
+
q_proj, k_proj, v_proj = weight.chunk(3, dim=0)
|
| 621 |
+
key_q = key.replace("in_proj_bias", "q_proj.bias")
|
| 622 |
+
key_k = key.replace("in_proj_bias", "k_proj.bias")
|
| 623 |
+
key_v = key.replace("in_proj_bias", "v_proj.bias")
|
| 624 |
+
pretrained_dict[key_q] = q_proj
|
| 625 |
+
pretrained_dict[key_k] = k_proj
|
| 626 |
+
pretrained_dict[key_v] = v_proj
|
| 627 |
+
elif key == "visual.positional_embedding":
|
| 628 |
+
pretrained_dict["visual.positional_embedding.weight"] = weight
|
| 629 |
+
elif key == "visual.class_embedding":
|
| 630 |
+
pretrained_dict["visual.added_cls"] = weight.expand(3, -1)
|
| 631 |
+
|
| 632 |
+
# 输出没有加载的参数
|
| 633 |
+
for k in model_dict:
|
| 634 |
+
if k not in pretrained_dict:
|
| 635 |
+
hf_logger.info("{} not loaded".format(k))
|
| 636 |
+
|
| 637 |
+
model_dict.update(pretrained_dict)
|
| 638 |
+
self.trainer.model.module.load_state_dict(model_dict, strict=False)
|
| 639 |
+
|
| 640 |
+
def load_video_ldm(self):
|
| 641 |
+
if self.pretrained_path:
|
| 642 |
+
state = torch.load(self.pretrained_path, "cpu")
|
| 643 |
+
if hasattr(self.trainer.model, "module"):
|
| 644 |
+
missing_keys, unexpected_keys = self.trainer.model.module.load_state_dict(state, strict=False)
|
| 645 |
+
else:
|
| 646 |
+
missing_keys, unexpected_keys = self.trainer.model.load_state_dict(state, strict=False)
|
| 647 |
+
hf_logger.info(f"missing_keys: {missing_keys}")
|
| 648 |
+
hf_logger.info(f"unexpected_keys: {unexpected_keys}")
|
| 649 |
+
|
| 650 |
+
def load_mmdit(self):
|
| 651 |
+
if self.pretrained_path:
|
| 652 |
+
state_dict = get_state_dict(self.pretrained_path)
|
| 653 |
+
try:
|
| 654 |
+
hf_logger.info("pretrained_path is {}".format(self.pretrained_path))
|
| 655 |
+
self.trainer.model.module.load_state_dict(state_dict)
|
| 656 |
+
hf_logger.info("successful loaded state dict from {}".format(self.pretrained_path))
|
| 657 |
+
except:
|
| 658 |
+
hf_logger.info("\nload pretrained_path: strict=False, remove invalid parameter......\n")
|
| 659 |
+
|
| 660 |
+
model_dict = self.trainer.model.module.state_dict() # 当前网络结构
|
| 661 |
+
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict} # 预训练模型中可用的weight
|
| 662 |
+
|
| 663 |
+
for key, weight in state_dict.items():
|
| 664 |
+
if key in model_dict and model_dict[key].shape != state_dict[key].shape:
|
| 665 |
+
if "x_embedder.proj.weight" in key: # [1280, 4, 2, 2] -> [1280, 16, 2, 2]
|
| 666 |
+
pretrained_dict[key] = torch.cat(
|
| 667 |
+
(state_dict[key], model_dict[key][:, 4:].to(state_dict[key].device)), dim=1
|
| 668 |
+
)
|
| 669 |
+
hf_logger.info(
|
| 670 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 671 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 672 |
+
)
|
| 673 |
+
)
|
| 674 |
+
else:
|
| 675 |
+
pretrained_dict.pop(key)
|
| 676 |
+
hf_logger.info(
|
| 677 |
+
"[{}] popped: pretrained model {} --> new model {}".format(
|
| 678 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 679 |
+
)
|
| 680 |
+
)
|
| 681 |
+
|
| 682 |
+
# 输出没有加载的参数
|
| 683 |
+
for k in model_dict:
|
| 684 |
+
if k not in pretrained_dict:
|
| 685 |
+
hf_logger.info("{} not loaded".format(k))
|
| 686 |
+
|
| 687 |
+
model_dict.update(pretrained_dict)
|
| 688 |
+
self.trainer.model.module.load_state_dict(model_dict, strict=False)
|
| 689 |
+
|
| 690 |
+
def load_selftok(self):
|
| 691 |
+
if self.pretrained_path:
|
| 692 |
+
state_dict = get_state_dict(self.pretrained_path)
|
| 693 |
+
try:
|
| 694 |
+
hf_logger.info("pretrained_path is {}".format(self.pretrained_path))
|
| 695 |
+
self.trainer.model.module.load_state_dict(state_dict)
|
| 696 |
+
hf_logger.info("successful loaded state dict from {}".format(self.pretrained_path))
|
| 697 |
+
except:
|
| 698 |
+
hf_logger.info("\nload pretrained_path: strict=False, remove invalid parameter......\n")
|
| 699 |
+
|
| 700 |
+
model_dict = self.trainer.model.module.state_dict() # 当前网络结构
|
| 701 |
+
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict} # 预训练模型中可用的weight
|
| 702 |
+
|
| 703 |
+
cur_vocab = model_dict["image_embeddings.weight"]
|
| 704 |
+
print('cur_vocab',cur_vocab.shape)
|
| 705 |
+
img_vocab = 16384
|
| 706 |
+
for key, weight in state_dict.items():
|
| 707 |
+
if "image_embeddings" in key:
|
| 708 |
+
print(model_dict[key].shape)
|
| 709 |
+
print('state_dict',state_dict[key].shape)
|
| 710 |
+
pretrained_dict[key] = torch.cat(
|
| 711 |
+
(
|
| 712 |
+
state_dict[key][:16384],
|
| 713 |
+
model_dict[key][16384:].to(state_dict[key].device),
|
| 714 |
+
),
|
| 715 |
+
dim=0,
|
| 716 |
+
)
|
| 717 |
+
hf_logger.info(
|
| 718 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 719 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 720 |
+
)
|
| 721 |
+
)
|
| 722 |
+
elif "head" in key:
|
| 723 |
+
pretrained_dict[key] = torch.cat(
|
| 724 |
+
(
|
| 725 |
+
state_dict[key][:16384],
|
| 726 |
+
model_dict[key][16384:].to(state_dict[key].device),
|
| 727 |
+
),
|
| 728 |
+
dim=0,
|
| 729 |
+
)
|
| 730 |
+
hf_logger.info(
|
| 731 |
+
"[{}] shape change: pretrained model {} --> new model {}".format(
|
| 732 |
+
key, state_dict[key].shape, model_dict[key].shape
|
| 733 |
+
)
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
# 输出没有加载的参数
|
| 737 |
+
for k in model_dict:
|
| 738 |
+
if k not in pretrained_dict:
|
| 739 |
+
hf_logger.info("{} not loaded".format(k))
|
| 740 |
+
|
| 741 |
+
model_dict.update(pretrained_dict)
|
| 742 |
+
self.trainer.model.module.load_state_dict(model_dict, strict=False)
|
| 743 |
+
|
| 744 |
+
|
| 745 |
+
class LoadStateDict_resume(HookBase):
|
| 746 |
+
def __init__(self, pretrained_path):
|
| 747 |
+
self.pretrained_path = pretrained_path
|
| 748 |
+
|
| 749 |
+
def before_train(self):
|
| 750 |
+
if self.pretrained_path:
|
| 751 |
+
state_dict = get_state_dict_resume(self.pretrained_path)
|
| 752 |
+
|
| 753 |
+
self.trainer.optimizer.load_state_dict(state_dict["opt"])
|
| 754 |
+
hf_logger.info("successfully resume optimizer state dict from {}".format(self.pretrained_path))
|
| 755 |
+
|
| 756 |
+
self.trainer.iter = state_dict["iter"]
|
| 757 |
+
hf_logger.info("successfully resume from iter: {}".format(self.trainer.iter))
|
| 758 |
+
|
| 759 |
+
self.trainer.model.module.load_state_dict(state_dict["state_dict"])
|
| 760 |
+
hf_logger.info("successfully resume model state dict from {}".format(self.pretrained_path))
|
| 761 |
+
|
| 762 |
+
|
| 763 |
+
class SaveModelandDisc(HookBase):
|
| 764 |
+
def __init__(self, cfg, is_root):
|
| 765 |
+
self.output_path = cfg.common.output_path
|
| 766 |
+
self.is_root = is_root
|
| 767 |
+
self.save_interval = int(cfg.common.save_per_epochs * getattr(cfg, 'dataloader_len', 2000))
|
| 768 |
+
|
| 769 |
+
# only for save config before train
|
| 770 |
+
self._cfg = cfg
|
| 771 |
+
self.delete_after_upload = cfg.common.get("delete_after_upload", False)
|
| 772 |
+
|
| 773 |
+
@property
|
| 774 |
+
def save_path(self):
|
| 775 |
+
if self.__dict__.get("_output_path", None) is None:
|
| 776 |
+
output_path = os.path.join(self.output_path, "ckpt")
|
| 777 |
+
os.makedirs(output_path, exist_ok=True)
|
| 778 |
+
self.__dict__["_output_path"] = output_path
|
| 779 |
+
return self.__dict__["_output_path"]
|
| 780 |
+
|
| 781 |
+
def before_train(self):
|
| 782 |
+
self.save_config()
|
| 783 |
+
self.save_code()
|
| 784 |
+
|
| 785 |
+
def after_step(self):
|
| 786 |
+
if self.trainer.iter % self.save_interval == (self.save_interval - 1):
|
| 787 |
+
save_name = "iter_%d.pth" % (self.trainer.iter)
|
| 788 |
+
save_disc_name = "iter_%d_disc.pth" % (self.trainer.iter)
|
| 789 |
+
|
| 790 |
+
self.save_model(self.trainer.model.module.state_dict(), save_name)
|
| 791 |
+
self.save_model(self.trainer.loss.module.discriminator.state_dict(), save_disc_name)
|
| 792 |
+
torch.cuda.empty_cache()
|
| 793 |
+
|
| 794 |
+
def save_config(self):
|
| 795 |
+
if self.is_root:
|
| 796 |
+
hf_logger.save_args(self._cfg)
|
| 797 |
+
run_save_path = os.path.join(self.output_path, "run.yml")
|
| 798 |
+
if not os.path.isfile(run_save_path):
|
| 799 |
+
try:
|
| 800 |
+
with open(run_save_path, "w") as args_fh:
|
| 801 |
+
yaml.dump(self._cfg.__dict__, args_fh, sort_keys=False)
|
| 802 |
+
hf_logger.info("Run configs dump to %s" % run_save_path)
|
| 803 |
+
except:
|
| 804 |
+
hf_logger.info("fail to dump run config!!")
|
| 805 |
+
|
| 806 |
+
def save_code(self):
|
| 807 |
+
if self.is_root:
|
| 808 |
+
try:
|
| 809 |
+
import moxing as mox
|
| 810 |
+
|
| 811 |
+
local_code_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../../.."))
|
| 812 |
+
roma_code_path = os.path.join(self._cfg.train_url, os.path.split(local_code_path)[-1])
|
| 813 |
+
mox_copy(local_code_path, roma_code_path, parallel=True)
|
| 814 |
+
hf_logger.info("backup code success, roma_code_path:{}".format(roma_code_path))
|
| 815 |
+
except:
|
| 816 |
+
pass
|
| 817 |
+
|
| 818 |
+
def save_model(self, checkpoint, save_name):
|
| 819 |
+
if self.is_root:
|
| 820 |
+
local_weights = os.path.join(self.save_path, save_name)
|
| 821 |
+
torch.save(checkpoint, local_weights)
|
| 822 |
+
try:
|
| 823 |
+
import moxing as mox
|
| 824 |
+
|
| 825 |
+
roma_weights_fp = os.path.join(self._cfg.train_url, local_weights)
|
| 826 |
+
roma_weights_dirname = os.path.dirname(roma_weights_fp)
|
| 827 |
+
if not mox.file.exists(roma_weights_dirname):
|
| 828 |
+
mox.file.make_dirs(roma_weights_dirname)
|
| 829 |
+
mox_copy(local_weights, roma_weights_fp)
|
| 830 |
+
hf_logger.info("save weight success, roma_weights_fp:{}".format(roma_weights_fp))
|
| 831 |
+
except:
|
| 832 |
+
hf_logger.info("save weight success, local_weights_fp:{}".format(local_weights))
|
| 833 |
+
|
| 834 |
+
if self.delete_after_upload:
|
| 835 |
+
os.remove(local_weights)
|
| 836 |
+
hf_logger.info(f"{local_weights} removed")
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/cloud_copy.py
ADDED
|
@@ -0,0 +1,450 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import subprocess
|
| 6 |
+
from typing import Any
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from contextlib import ContextDecorator
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
os.environ["MOX_SILENT_MODE"] = "1"
|
| 12 |
+
os.environ["MOX_FILE_LARGE_FILE_METHOD"] = "1" # for moxing download acceleration
|
| 13 |
+
import moxing as mox
|
| 14 |
+
|
| 15 |
+
mox.file.set_auth(is_secure=False)
|
| 16 |
+
|
| 17 |
+
except:
|
| 18 |
+
mox = None
|
| 19 |
+
from mimogpt.utils import read_from_yaml
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class MemartsCopyContext(ContextDecorator):
|
| 23 |
+
def __init__(self):
|
| 24 |
+
"""
|
| 25 |
+
This context manager is only in use when memarts is enabled, if memarts is not enabled it does nothing.
|
| 26 |
+
It basically set _USE_MEMARTS to False when enter this context, then set _USE_MEMARTS back to True when exit
|
| 27 |
+
Because normal mox copy in main process will cause error in dataloader when memarts is enabled,
|
| 28 |
+
so we need to use this context manager to wrap any mox copy call to avoid error.
|
| 29 |
+
|
| 30 |
+
To use this context manager:
|
| 31 |
+
with MemartsCopyContext():
|
| 32 |
+
mox.file.copy(xxx, xx)
|
| 33 |
+
|
| 34 |
+
or
|
| 35 |
+
|
| 36 |
+
@MemartsCopyContext()
|
| 37 |
+
def mox_copy(src, dst):
|
| 38 |
+
mox.file.copy_parallel(src, dst)
|
| 39 |
+
"""
|
| 40 |
+
self.use_memarts = (os.environ.get("USE_MEMARTS") == "1") and (mox is not None)
|
| 41 |
+
|
| 42 |
+
def __enter__(self):
|
| 43 |
+
if self.use_memarts:
|
| 44 |
+
mox.file.file_io._USE_MEMARTS = False
|
| 45 |
+
|
| 46 |
+
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
|
| 47 |
+
if self.use_memarts:
|
| 48 |
+
mox.file.file_io._USE_MEMARTS = True
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _check_dir(dist_dir):
|
| 52 |
+
copy_flag = True
|
| 53 |
+
if os.path.exists(dist_dir):
|
| 54 |
+
copy_flag = False
|
| 55 |
+
if not os.path.exists(os.path.dirname(dist_dir)):
|
| 56 |
+
os.makedirs(os.path.dirname(dist_dir))
|
| 57 |
+
return copy_flag
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def cmd_exec(cmd, just_print=False):
|
| 61 |
+
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
| 62 |
+
print("\n{}:INFO:{}".format(t, cmd))
|
| 63 |
+
if not just_print:
|
| 64 |
+
os.system(cmd)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@MemartsCopyContext()
|
| 68 |
+
def mox_copy(src, dst, parallel=False):
|
| 69 |
+
if src == dst:
|
| 70 |
+
cmd_exec("mox_copy, src=dst={}, return".format(src), just_print=True)
|
| 71 |
+
return
|
| 72 |
+
if not (src.startswith("s3://") or dst.startswith("s3://")):
|
| 73 |
+
cmd_exec(
|
| 74 |
+
"mox_copy, at least one of src and dst need startswith s3://, src={}, dst={}, return".format(src, dst),
|
| 75 |
+
just_print=True,
|
| 76 |
+
)
|
| 77 |
+
return
|
| 78 |
+
while True:
|
| 79 |
+
failed = 0
|
| 80 |
+
try:
|
| 81 |
+
cmd_exec(f"mox copy: {src} {dst}", just_print=True)
|
| 82 |
+
if parallel:
|
| 83 |
+
mox.file.copy_parallel(src, dst)
|
| 84 |
+
else:
|
| 85 |
+
mox.file.copy(src, dst)
|
| 86 |
+
break
|
| 87 |
+
except Exception as e:
|
| 88 |
+
failed += 1
|
| 89 |
+
time.sleep(60)
|
| 90 |
+
if failed % 10 == 0:
|
| 91 |
+
cmd_exec(
|
| 92 |
+
"error, maybe need check. copy failed {} times from {} to {}".format(failed, src, dst),
|
| 93 |
+
just_print=True,
|
| 94 |
+
)
|
| 95 |
+
cmd_exec("error message: {}".format(e), just_print=True)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def uncompress(tar_file):
|
| 99 |
+
ret = subprocess.check_output("ps -ef | grep tar | grep xf | grep -v grep | grep -v 'sh -c' | wc -l", shell=True)
|
| 100 |
+
ret = int(ret.decode("utf-8"))
|
| 101 |
+
if ret > 0:
|
| 102 |
+
cmd_exec("find uncompress running process:", just_print=True)
|
| 103 |
+
os.system("ps -ef | grep tar | grep xf | grep -v grep | grep -v 'sh -c'")
|
| 104 |
+
tar_name = os.path.split(tar_file)[-1]
|
| 105 |
+
tar_dir = os.path.dirname(tar_file)
|
| 106 |
+
cmd_exec("cd {} && tar -xf {} && rm -rf {} &".format(tar_dir, tar_name, tar_name))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def copy_data_to_cache(src_dir="", dist_dir="", rank=0, world_size=1, args=None):
|
| 110 |
+
start_t = time.time()
|
| 111 |
+
copy_flag = _check_dir(dist_dir)
|
| 112 |
+
|
| 113 |
+
if args is not None and args.local_shuffle_type == 4:
|
| 114 |
+
local_shuffle = args.local_shuffle_type
|
| 115 |
+
zip_max_split = args.zip_max_split
|
| 116 |
+
print(
|
| 117 |
+
"training in cloud, using local_shuffle_type={}, zip_max_split={}".format(
|
| 118 |
+
args.local_shuffle_type, zip_max_split
|
| 119 |
+
)
|
| 120 |
+
)
|
| 121 |
+
else:
|
| 122 |
+
local_shuffle = 0
|
| 123 |
+
zip_max_split = -1
|
| 124 |
+
|
| 125 |
+
if copy_flag:
|
| 126 |
+
print("copy from {} to {}".format(src_dir, dist_dir))
|
| 127 |
+
tar_files = []
|
| 128 |
+
t0 = time.time()
|
| 129 |
+
if ".mindrecord" in src_dir:
|
| 130 |
+
src_dir = os.path.split(src_dir)[0]
|
| 131 |
+
dist_dir = os.path.split(dist_dir)[0]
|
| 132 |
+
|
| 133 |
+
last_file = None
|
| 134 |
+
allready_uncompress = []
|
| 135 |
+
copy_dir = []
|
| 136 |
+
|
| 137 |
+
if mox.file.is_directory(src_dir): # no new tar in tar !!!
|
| 138 |
+
subfiles = [subfile for subfile in mox.file.list_directory(src_dir, recursive=False)]
|
| 139 |
+
subfiles.sort()
|
| 140 |
+
for subfile in subfiles:
|
| 141 |
+
sub_src_dir = os.path.join(src_dir, subfile)
|
| 142 |
+
sub_dist_dir = os.path.join(dist_dir, subfile)
|
| 143 |
+
|
| 144 |
+
if local_shuffle and "split_part" in sub_src_dir:
|
| 145 |
+
if sub_src_dir.endswith("_map.pkl"):
|
| 146 |
+
continue
|
| 147 |
+
part_idx = int(
|
| 148 |
+
os.path.split(sub_src_dir)[-1][-8:-4]
|
| 149 |
+
) # "AAAA_split_partBBBB.pkl" or "AAAA_split_partBBBB.zip"
|
| 150 |
+
if part_idx % world_size != rank or part_idx >= zip_max_split:
|
| 151 |
+
continue
|
| 152 |
+
|
| 153 |
+
# uncompress last file
|
| 154 |
+
if last_file is not None and last_file.endswith(".tar"):
|
| 155 |
+
uncompress(last_file)
|
| 156 |
+
allready_uncompress.append(last_file)
|
| 157 |
+
|
| 158 |
+
# copy new file
|
| 159 |
+
cmd_exec("copy from {} to {}".format(sub_src_dir, sub_dist_dir), just_print=True)
|
| 160 |
+
if mox.file.is_directory(sub_src_dir):
|
| 161 |
+
mox_copy(sub_src_dir, sub_dist_dir, parallel=True)
|
| 162 |
+
copy_dir.append(sub_dist_dir)
|
| 163 |
+
last_file = None
|
| 164 |
+
else:
|
| 165 |
+
mox_copy(sub_src_dir, sub_dist_dir)
|
| 166 |
+
last_file = sub_dist_dir
|
| 167 |
+
|
| 168 |
+
if last_file is not None and last_file.endswith(".tar"):
|
| 169 |
+
uncompress(last_file)
|
| 170 |
+
allready_uncompress.append(last_file)
|
| 171 |
+
|
| 172 |
+
else:
|
| 173 |
+
mox_copy(src_dir, dist_dir)
|
| 174 |
+
if dist_dir.endswith("tar") or dist_dir.endswith("tar.gz"):
|
| 175 |
+
tar_files.append(dist_dir)
|
| 176 |
+
|
| 177 |
+
t1 = time.time()
|
| 178 |
+
cmd_exec("copy datasets, time used={:.2f}s".format(t1 - t0), just_print=True)
|
| 179 |
+
|
| 180 |
+
# final check, no tar forget
|
| 181 |
+
for _dir in copy_dir:
|
| 182 |
+
tar_list = list(Path(_dir).glob("**/*.tar"))
|
| 183 |
+
tar_files.extend(tar_list)
|
| 184 |
+
tar_list = list(Path(_dir).glob("**/*.tar.gz"))
|
| 185 |
+
tar_files.extend(tar_list)
|
| 186 |
+
|
| 187 |
+
tar_files = [x for x in tar_files if str(x) not in allready_uncompress]
|
| 188 |
+
|
| 189 |
+
cmd_exec("tar_files:{}".format(tar_files), just_print=True)
|
| 190 |
+
for tar_file in tar_files:
|
| 191 |
+
tar_dir = os.path.dirname(tar_file)
|
| 192 |
+
cmd_exec("cd {} && tar -xf {} && rm -rf {} &".format(tar_dir, tar_file, tar_file))
|
| 193 |
+
|
| 194 |
+
# final check, no tar process
|
| 195 |
+
while True:
|
| 196 |
+
ret = subprocess.check_output(
|
| 197 |
+
"ps -ef | grep tar | grep xf | grep -v grep | grep -v 'sh -c' | wc -l", shell=True
|
| 198 |
+
)
|
| 199 |
+
ret = int(ret.decode("utf-8"))
|
| 200 |
+
if ret == 0:
|
| 201 |
+
cmd_exec("not find tar process, break", just_print=True)
|
| 202 |
+
break
|
| 203 |
+
else:
|
| 204 |
+
cmd_exec("find {} tar process, sleep 10s".format(ret), just_print=True)
|
| 205 |
+
os.system("ps -ef | grep tar | grep xf | grep -v grep | grep -v 'sh -c'")
|
| 206 |
+
time.sleep(10)
|
| 207 |
+
|
| 208 |
+
cmd_exec("copy data completed!", just_print=True)
|
| 209 |
+
|
| 210 |
+
else:
|
| 211 |
+
cmd_exec(
|
| 212 |
+
"since data already exists, copying is not required, src={}, dst={}".format(src_dir, dist_dir),
|
| 213 |
+
just_print=True,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
end_t = time.time()
|
| 217 |
+
cmd_exec("copy cost total time {:.2f} sec".format(end_t - start_t), just_print=True)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
def vid_dataset_copy(fp, args=None):
|
| 221 |
+
import moxing as mox
|
| 222 |
+
from pathlib import Path
|
| 223 |
+
|
| 224 |
+
y = read_from_yaml(fp)
|
| 225 |
+
src = y["data_path"]["cloud_copy"]["src_dir"]
|
| 226 |
+
dst = y["data_path"]["cloud_copy"]["dst_dir"]
|
| 227 |
+
|
| 228 |
+
if os.path.exists(os.path.dirname(dst)):
|
| 229 |
+
print("Train dataset exist, skip dataset copy.")
|
| 230 |
+
return
|
| 231 |
+
|
| 232 |
+
# print("Copying data from {} to {}".format(src, dst))
|
| 233 |
+
# tar_files = []
|
| 234 |
+
# mox.file.copy_parallel(src, dst)
|
| 235 |
+
# tar_files.extend(list(Path(dst).glob('**/*.tar')))
|
| 236 |
+
|
| 237 |
+
subfiles = [
|
| 238 |
+
"CC3M.tar",
|
| 239 |
+
"dev_test.tar",
|
| 240 |
+
"vid_test.tar",
|
| 241 |
+
"vid_9_open_1fps.tar",
|
| 242 |
+
"models.tar",
|
| 243 |
+
"json.tar",
|
| 244 |
+
] # , 'open_pretrain_0328.tar', 'open_pretrain_0331.tar']
|
| 245 |
+
for subfile in subfiles:
|
| 246 |
+
s3_tar_file = os.path.join(src, subfile)
|
| 247 |
+
tar_file = os.path.join(dst, subfile)
|
| 248 |
+
print("Copying data from {} to {}".format(s3_tar_file, tar_file))
|
| 249 |
+
mox.file.copy(s3_tar_file, tar_file)
|
| 250 |
+
|
| 251 |
+
tar_dir = os.path.dirname(tar_file)
|
| 252 |
+
print("cd {}; tar -xvf {} > /dev/null 2>&1; rm -rf {}".format(tar_dir, tar_file, tar_file))
|
| 253 |
+
os.system("cd {}; tar -xvf {} > /dev/null 2>&1; rm -rf {}".format(tar_dir, tar_file, tar_file))
|
| 254 |
+
return y
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def img_dataset_copy(fp, rank=0, world_size=1, args=None):
|
| 258 |
+
import moxing as mox
|
| 259 |
+
from pathlib import Path
|
| 260 |
+
|
| 261 |
+
y = read_from_yaml(fp)
|
| 262 |
+
src = y["data_path"]["cloud_copy"]["src_dir"]
|
| 263 |
+
dst = y["data_path"]["cloud_copy"]["dst_dir"]
|
| 264 |
+
|
| 265 |
+
if os.path.exists(os.path.dirname(dst)):
|
| 266 |
+
print("Train dataset exist, skip dataset copy.")
|
| 267 |
+
return
|
| 268 |
+
|
| 269 |
+
local_shuffle = int(y["dataloader"]["local_shuffle_type"])
|
| 270 |
+
zip_max_split = int(y["data_path"]["zip_max_split"])
|
| 271 |
+
zip_min_split = int(y["data_path"]["zip_min_split"]) if "zip_min_split" in y["data_path"] else 0
|
| 272 |
+
do_unzip = bool(y["data_path"]["do_unzip"])
|
| 273 |
+
|
| 274 |
+
subfiles = [subfile for subfile in mox.file.list_directory(src, recursive=False)]
|
| 275 |
+
subfiles.sort()
|
| 276 |
+
for subfile in subfiles:
|
| 277 |
+
sub_src_file = os.path.join(src, subfile)
|
| 278 |
+
sub_dst_file = os.path.join(dst, subfile)
|
| 279 |
+
|
| 280 |
+
if local_shuffle and "split_part" in sub_src_file:
|
| 281 |
+
if sub_src_file.endswith("_map.pkl") or sub_src_file.endswith("_otn.pkl"):
|
| 282 |
+
continue
|
| 283 |
+
part_idx = int(
|
| 284 |
+
os.path.split(sub_src_file)[-1][-8:-4]
|
| 285 |
+
) # "AAAA_split_partBBBB.pkl" or "AAAA_split_partBBBB.zip"
|
| 286 |
+
if part_idx % world_size != rank or part_idx >= zip_max_split or part_idx < zip_min_split:
|
| 287 |
+
continue
|
| 288 |
+
|
| 289 |
+
# TODO hard code for MTI_ori_split
|
| 290 |
+
if "MTI_ori_split" in sub_src_file and sub_src_file.endswith(".pkl"):
|
| 291 |
+
sub_src_file = sub_src_file.replace("MTI_ori_split", "MTI_ori_split_en_zh_pkl")
|
| 292 |
+
|
| 293 |
+
mox.file.copy(sub_src_file, sub_dst_file)
|
| 294 |
+
|
| 295 |
+
if sub_dst_file.endswith(".zip") and do_unzip:
|
| 296 |
+
zip_dir, zip_name = os.path.split(sub_dst_file)
|
| 297 |
+
zip_idx = int(zip_name[-8:-4]) # AAAA_split_partBBBB.zip"
|
| 298 |
+
cmd_unzip = "(cd {}; mkdir {}; unzip -qq {} -d {}/ > /dev/null 2>&1; rm -rf {})&".format(
|
| 299 |
+
zip_dir, zip_idx, zip_name, zip_idx, zip_name
|
| 300 |
+
)
|
| 301 |
+
print(cmd_unzip)
|
| 302 |
+
os.system(cmd_unzip)
|
| 303 |
+
|
| 304 |
+
# copy aux tar files
|
| 305 |
+
s3_tar_files = y["data_path"]["cloud_copy"]["aux_tars"]
|
| 306 |
+
for s3_tar_file in s3_tar_files:
|
| 307 |
+
tar_name = os.path.basename(s3_tar_file)
|
| 308 |
+
dst_tar_file = os.path.join(dst, tar_name)
|
| 309 |
+
mox.file.copy(s3_tar_file, dst_tar_file)
|
| 310 |
+
|
| 311 |
+
if dst_tar_file.endswith(".tar"):
|
| 312 |
+
tar_dir = os.path.dirname(dst_tar_file)
|
| 313 |
+
print("cd {}; tar -xvf {} > /dev/null 2>&1; rm -rf {}".format(tar_dir, tar_name, tar_name))
|
| 314 |
+
os.system("cd {}; tar -xvf {} > /dev/null 2>&1; rm -rf {}".format(tar_dir, tar_name, tar_name))
|
| 315 |
+
return y
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
# below are new api, img_dataset_copy is deprecated.
|
| 319 |
+
def mox_copy_with_check(cloud_file, local_file, parallel=False):
|
| 320 |
+
if os.path.exists(local_file) or os.path.exists(local_file[:-4]):
|
| 321 |
+
print(f"mox_copy, dst={local_file} already exists!, skip copy")
|
| 322 |
+
return
|
| 323 |
+
mox_copy(cloud_file, local_file, parallel)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def common_cloud_copy(cfg, rank=0, world_size=1):
|
| 327 |
+
# step1: copy aux file
|
| 328 |
+
src_files = cfg.cloud_copy.src_files
|
| 329 |
+
dst_dir = cfg.cloud_copy.dst_dir
|
| 330 |
+
for cloud_file in src_files:
|
| 331 |
+
if os.path.splitext(cloud_file)[-1]: # file
|
| 332 |
+
file_name = os.path.basename(cloud_file)
|
| 333 |
+
local_file = os.path.join(dst_dir, file_name)
|
| 334 |
+
else: # directory
|
| 335 |
+
cloud_file = os.path.dirname(cloud_file)
|
| 336 |
+
file_name = cloud_file.split("/")[-1]
|
| 337 |
+
local_file = os.path.join(dst_dir, file_name)
|
| 338 |
+
mox_copy_with_check(cloud_file, local_file, mox.file.is_directory(cloud_file))
|
| 339 |
+
|
| 340 |
+
if file_name.endswith(".tar"):
|
| 341 |
+
print("cd {}; tar -xvf {} > /dev/null 2>&1; rm -rf {}".format(dst_dir, file_name, file_name))
|
| 342 |
+
os.system("cd {}; tar -xvf {} > /dev/null 2>&1; rm -rf {}".format(dst_dir, file_name, file_name))
|
| 343 |
+
if file_name.endswith(".zip"):
|
| 344 |
+
print("cd {}; unzip {} > /dev/null 2>&1; rm -rf {}".format(dst_dir, file_name, file_name))
|
| 345 |
+
os.system("cd {}; unzip {} > /dev/null 2>&1; rm -rf {}".format(dst_dir, file_name, file_name))
|
| 346 |
+
if file_name.endswith(".whl"):
|
| 347 |
+
print("cd {}; pip install {}".format(dst_dir, file_name))
|
| 348 |
+
os.system("cd {}; pip install {}".format(dst_dir, file_name))
|
| 349 |
+
|
| 350 |
+
# step2, check memarts
|
| 351 |
+
if os.environ.get("USE_MEMARTS") == "1":
|
| 352 |
+
return
|
| 353 |
+
|
| 354 |
+
# step3, copy train zip and pkl
|
| 355 |
+
for _, info in cfg.data_path.train.__dict__.items():
|
| 356 |
+
zip_root, pkl_root, data_list, type, columns, pkl_format, split_range, ratio = info[:8]
|
| 357 |
+
|
| 358 |
+
# step2, check memarts
|
| 359 |
+
# if os.environ.get("USE_MEMARTS") == "1" and type != '':
|
| 360 |
+
# continue
|
| 361 |
+
|
| 362 |
+
for idx in range(split_range[0], split_range[1]):
|
| 363 |
+
# split entire list by nodes (here world size is the total nodes of one job, rank is node number)
|
| 364 |
+
if idx % world_size != rank:
|
| 365 |
+
continue
|
| 366 |
+
|
| 367 |
+
# copy pkl
|
| 368 |
+
if pkl_root:
|
| 369 |
+
pkl_name = data_list.format(idx)
|
| 370 |
+
cloud_file = os.path.join(pkl_root, pkl_name)
|
| 371 |
+
mox_copy_with_check(
|
| 372 |
+
cloud_file, cloud_file.replace("s3://", "/cache/"), mox.file.is_directory(cloud_file)
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
# copy zip
|
| 376 |
+
if zip_root:
|
| 377 |
+
if data_list.endswith("pkl"):
|
| 378 |
+
zip_name = data_list.replace(".pkl", ".zip").format(idx)
|
| 379 |
+
elif data_list.endswith("parquet"):
|
| 380 |
+
zip_name = data_list.replace(".parquet", ".zip").format(idx)
|
| 381 |
+
else:
|
| 382 |
+
raise NotImplementedError
|
| 383 |
+
cloud_file = os.path.join(zip_root, zip_name)
|
| 384 |
+
mox_copy_with_check(
|
| 385 |
+
cloud_file, cloud_file.replace("s3://", "/cache/"), mox.file.is_directory(cloud_file)
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
# TODO: hard code for multi-zip data copy
|
| 389 |
+
if len(info) >= 9 and isinstance(info[8], (list, tuple)):
|
| 390 |
+
file_root, file_name = info[8]
|
| 391 |
+
if isinstance(file_root, str) and file_root.startswith("s3://"):
|
| 392 |
+
file_name = file_name.format(idx)
|
| 393 |
+
cloud_file = os.path.join(file_root, file_name)
|
| 394 |
+
mox_copy_with_check(
|
| 395 |
+
cloud_file, cloud_file.replace("s3://", "/cache/"), mox.file.is_directory(cloud_file)
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def apply_filename_adapter(type, file_pattern, idx):
|
| 400 |
+
if type is None:
|
| 401 |
+
return file_pattern.format(idx)
|
| 402 |
+
if type == "webvid_raw":
|
| 403 |
+
return file_pattern.format(idx * 50 + 1, (idx + 1) * 50)
|
| 404 |
+
else:
|
| 405 |
+
raise NotImplementedError
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def universal_cloud_copy(
|
| 409 |
+
url=None,
|
| 410 |
+
dst=None,
|
| 411 |
+
local_shuffle=False,
|
| 412 |
+
file_pattern=None,
|
| 413 |
+
adapter_type=None,
|
| 414 |
+
split_range=None,
|
| 415 |
+
cmd=None,
|
| 416 |
+
rank=0,
|
| 417 |
+
world_size=1,
|
| 418 |
+
):
|
| 419 |
+
if url is None or dst is None:
|
| 420 |
+
return
|
| 421 |
+
if local_shuffle:
|
| 422 |
+
assert (
|
| 423 |
+
file_pattern is not None and split_range is not None
|
| 424 |
+
), f"Set file pattern and split range in yaml for local shuffle"
|
| 425 |
+
for idx in range(split_range[0], split_range[1]):
|
| 426 |
+
# split entire list by nodes (here world size is the total nodes of one job, rank is node number)
|
| 427 |
+
if idx % world_size != rank:
|
| 428 |
+
continue
|
| 429 |
+
cur_file = apply_filename_adapter(adapter_type, file_pattern, idx)
|
| 430 |
+
cloud_file = os.path.join(url, cur_file)
|
| 431 |
+
local_file = os.path.join(dst, cur_file)
|
| 432 |
+
if os.path.exists(local_file):
|
| 433 |
+
# only mimo needs this because of debug mode 1, skip already copied files
|
| 434 |
+
print(f"mox_copy, dst={local_file} already exists!, skip copy")
|
| 435 |
+
continue
|
| 436 |
+
if not mox.file.exists(cloud_file):
|
| 437 |
+
print(f"mox_copy, src={cloud_file} not exists, skip copy")
|
| 438 |
+
continue
|
| 439 |
+
is_dir = mox.file.is_directory(cloud_file)
|
| 440 |
+
mox_copy(cloud_file, local_file, is_dir)
|
| 441 |
+
else:
|
| 442 |
+
if os.path.exists(dst):
|
| 443 |
+
# only mimo needs this because of debug mode 1, skip already copied files
|
| 444 |
+
print(f"mox_copy, dst={dst} already exists!, skip copy")
|
| 445 |
+
return
|
| 446 |
+
is_dir = mox.file.is_directory(url)
|
| 447 |
+
mox_copy(url, dst, parallel=is_dir)
|
| 448 |
+
if cmd is not None:
|
| 449 |
+
print(f"Begin to run: {cmd}")
|
| 450 |
+
os.system(cmd)
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/context_utils.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from contextlib import ContextDecorator
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
try:
|
| 6 |
+
import moxing as mox
|
| 7 |
+
|
| 8 |
+
mox.file.set_auth(is_secure=False)
|
| 9 |
+
except ImportError:
|
| 10 |
+
mox = None
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MemartsCopyContext(ContextDecorator):
|
| 14 |
+
def __init__(self):
|
| 15 |
+
"""
|
| 16 |
+
This context manager is only in use when memarts is enabled, if memarts is not enabled it does nothing.
|
| 17 |
+
It basically set _USE_MEMARTS to False when enter this context, then set _USE_MEMARTS back to True when exit
|
| 18 |
+
Because normal mox copy in main process will cause error in dataloader when memarts is enabled,
|
| 19 |
+
so we need to use this context manager to wrap any mox copy call to avoid error.
|
| 20 |
+
|
| 21 |
+
To use this context manager:
|
| 22 |
+
with MemartsCopyContext():
|
| 23 |
+
mox.file.copy(xxx, xx)
|
| 24 |
+
|
| 25 |
+
or
|
| 26 |
+
|
| 27 |
+
@MemartsCopyContext()
|
| 28 |
+
def mox_copy(src, dst):
|
| 29 |
+
mox.file.copy_parallel(src, dst)
|
| 30 |
+
"""
|
| 31 |
+
self.use_memarts = (os.environ.get("USE_MEMARTS") == "1") and (mox is not None)
|
| 32 |
+
|
| 33 |
+
def __enter__(self):
|
| 34 |
+
if self.use_memarts:
|
| 35 |
+
mox.file.file_io._USE_MEMARTS = False
|
| 36 |
+
|
| 37 |
+
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
|
| 38 |
+
if self.use_memarts:
|
| 39 |
+
mox.file.file_io._USE_MEMARTS = True
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/ema.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
from .train_loop import HookBase
|
| 5 |
+
import torch
|
| 6 |
+
import copy
|
| 7 |
+
import os
|
| 8 |
+
from .cloud_copy import mox_copy
|
| 9 |
+
from mimogpt.utils import hf_logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def devices():
|
| 13 |
+
if torch.cuda.is_available():
|
| 14 |
+
return torch.device("cuda")
|
| 15 |
+
else:
|
| 16 |
+
return torch.device("cpu")
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class EMAhook(HookBase):
|
| 20 |
+
def __init__(self, cfg, is_root):
|
| 21 |
+
self.cfg = cfg
|
| 22 |
+
self.use_ema = self.cfg.optimize.use_ema
|
| 23 |
+
self.ema_test_interval = self.cfg.optimize.ema_test_interval
|
| 24 |
+
self.ema_factor = self.cfg.optimize.ema_factor
|
| 25 |
+
self.ema_in_cpu = self.cfg.optimize.ema_in_cpu
|
| 26 |
+
self.gradient_accumulation_steps = self.cfg.optimize.get("gradient_accumulation_steps", 1)
|
| 27 |
+
|
| 28 |
+
# self.Trainer = Trainer
|
| 29 |
+
|
| 30 |
+
self.is_root = is_root
|
| 31 |
+
self.save_interval = int(self.cfg.common.save_per_epochs * self.cfg.dataloader_len)
|
| 32 |
+
self.delete_after_upload = cfg.common.get("delete_after_upload", False)
|
| 33 |
+
self.output_path = self.cfg.common.output_path
|
| 34 |
+
|
| 35 |
+
def after_step(self):
|
| 36 |
+
# print((self.trainer.iter + 1))
|
| 37 |
+
# print(self.use_ema)
|
| 38 |
+
if self.use_ema and (self.trainer.iter + 1) % self.gradient_accumulation_steps == 0:
|
| 39 |
+
with torch.no_grad():
|
| 40 |
+
for p, p_ema in zip(self.trainer.model.parameters(), self.trainer.ema_model.parameters()):
|
| 41 |
+
if self.ema_in_cpu:
|
| 42 |
+
p1 = p.data.detach().clone().cpu()
|
| 43 |
+
else:
|
| 44 |
+
p1 = p.data.detach().clone()
|
| 45 |
+
p_ema.data.mul_(self.ema_factor).add_((1 - self.ema_factor) * p1)
|
| 46 |
+
|
| 47 |
+
if self.trainer.iter % self.save_interval == (self.save_interval - 1):
|
| 48 |
+
if self.cfg.optimize.use_ema:
|
| 49 |
+
save_name = "ema_iter_%d.pth" % (self.trainer.iter)
|
| 50 |
+
self.save_model(self.trainer.ema_model.state_dict(), save_name)
|
| 51 |
+
torch.cuda.empty_cache()
|
| 52 |
+
|
| 53 |
+
def before_train(self):
|
| 54 |
+
if self.use_ema:
|
| 55 |
+
print("Build EMA Model...")
|
| 56 |
+
if self.ema_in_cpu:
|
| 57 |
+
self.trainer.ema_model = copy.deepcopy(self.trainer.model_before_ddp)
|
| 58 |
+
else:
|
| 59 |
+
self.trainer.ema_model = copy.deepcopy(self.trainer.model_before_ddp).to(devices())
|
| 60 |
+
self.trainer.ema_model.requires_grad_(False)
|
| 61 |
+
self.trainer.ema_model.eval()
|
| 62 |
+
print("Done...")
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def save_path(self):
|
| 66 |
+
if self.__dict__.get("_output_path", None) is None:
|
| 67 |
+
output_path = os.path.join(self.output_path, "ckpt")
|
| 68 |
+
os.makedirs(output_path, exist_ok=True)
|
| 69 |
+
self.__dict__["_output_path"] = output_path
|
| 70 |
+
return self.__dict__["_output_path"]
|
| 71 |
+
|
| 72 |
+
def save_model(self, checkpoint, save_name):
|
| 73 |
+
if self.use_ema:
|
| 74 |
+
if self.is_root:
|
| 75 |
+
local_weights = os.path.join(self.save_path, save_name)
|
| 76 |
+
torch.save(checkpoint, local_weights)
|
| 77 |
+
try:
|
| 78 |
+
import moxing as mox
|
| 79 |
+
|
| 80 |
+
roma_weights_fp = os.path.join(self.cfg.train_url, local_weights)
|
| 81 |
+
roma_weights_dirname = os.path.dirname(roma_weights_fp)
|
| 82 |
+
if not mox.file.exists(roma_weights_dirname):
|
| 83 |
+
mox.file.make_dirs(roma_weights_dirname)
|
| 84 |
+
mox_copy(local_weights, roma_weights_fp)
|
| 85 |
+
hf_logger.info("save weight success, roma_weights_fp:{}".format(roma_weights_fp))
|
| 86 |
+
if self.delete_after_upload:
|
| 87 |
+
os.remove(local_weights)
|
| 88 |
+
hf_logger.info(f"{local_weights} removed")
|
| 89 |
+
except:
|
| 90 |
+
hf_logger.info("save weight success, local_weights_fp:{}".format(local_weights))
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/lion.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple, Optional, Callable
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch.optim.optimizer import Optimizer
|
| 5 |
+
|
| 6 |
+
# functions
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def exists(val):
|
| 10 |
+
return val is not None
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# update functions
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def update_fn(p, grad, exp_avg, lr, wd, beta1, beta2):
|
| 17 |
+
# stepweight decay
|
| 18 |
+
|
| 19 |
+
p.data.mul_(1 - lr * wd)
|
| 20 |
+
|
| 21 |
+
# weight update
|
| 22 |
+
|
| 23 |
+
update = exp_avg.clone().mul_(beta1).add(grad, alpha=1 - beta1).sign_()
|
| 24 |
+
p.add_(update, alpha=-lr)
|
| 25 |
+
|
| 26 |
+
# decay the momentum running average coefficient
|
| 27 |
+
|
| 28 |
+
exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# class
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Lion(Optimizer):
|
| 35 |
+
def __init__(
|
| 36 |
+
self,
|
| 37 |
+
params,
|
| 38 |
+
lr: float = 1e-4,
|
| 39 |
+
betas: Tuple[float, float] = (0.9, 0.99),
|
| 40 |
+
weight_decay: float = 0.0,
|
| 41 |
+
eps: float = 0.0,
|
| 42 |
+
use_triton: bool = False,
|
| 43 |
+
):
|
| 44 |
+
assert lr > 0.0
|
| 45 |
+
assert all([0.0 <= beta <= 1.0 for beta in betas])
|
| 46 |
+
|
| 47 |
+
defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)
|
| 48 |
+
|
| 49 |
+
super().__init__(params, defaults)
|
| 50 |
+
|
| 51 |
+
self.update_fn = update_fn
|
| 52 |
+
|
| 53 |
+
# if use_triton:
|
| 54 |
+
# from lion_pytorch.triton import update_fn as triton_update_fn
|
| 55 |
+
# self.update_fn = triton_update_fn
|
| 56 |
+
|
| 57 |
+
@torch.no_grad()
|
| 58 |
+
def step(self, closure: Optional[Callable] = None):
|
| 59 |
+
|
| 60 |
+
loss = None
|
| 61 |
+
if exists(closure):
|
| 62 |
+
with torch.enable_grad():
|
| 63 |
+
loss = closure()
|
| 64 |
+
|
| 65 |
+
for group in self.param_groups:
|
| 66 |
+
for p in filter(lambda p: exists(p.grad), group["params"]):
|
| 67 |
+
|
| 68 |
+
grad, lr, wd, beta1, beta2, state = (
|
| 69 |
+
p.grad,
|
| 70 |
+
group["lr"],
|
| 71 |
+
group["weight_decay"],
|
| 72 |
+
*group["betas"],
|
| 73 |
+
self.state[p],
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# init state - exponential moving average of gradient values
|
| 77 |
+
|
| 78 |
+
if len(state) == 0:
|
| 79 |
+
state["exp_avg"] = torch.zeros_like(p)
|
| 80 |
+
|
| 81 |
+
exp_avg = state["exp_avg"]
|
| 82 |
+
|
| 83 |
+
self.update_fn(p, grad, exp_avg, lr, wd, beta1, beta2)
|
| 84 |
+
|
| 85 |
+
return loss
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/optimizer.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import torch
|
| 5 |
+
import inspect
|
| 6 |
+
import deepspeed
|
| 7 |
+
|
| 8 |
+
DEVICE_TYPE = os.environ.get("DEVICE_TYPE", "gpu")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def get_optimizer(cfg, parameters):
|
| 12 |
+
if cfg.optimize.optimizer == "adam":
|
| 13 |
+
optimize_cls = torch.optim.Adam
|
| 14 |
+
elif cfg.optimize.optimizer == "adamw":
|
| 15 |
+
optimize_cls = torch.optim.AdamW
|
| 16 |
+
elif cfg.optimize.optimizer == "sgd":
|
| 17 |
+
optimize_cls = torch.optim.SGD
|
| 18 |
+
elif cfg.optimize.optimizer == "adamw1":
|
| 19 |
+
from .lion import Lion
|
| 20 |
+
|
| 21 |
+
optimize_cls = Lion
|
| 22 |
+
else:
|
| 23 |
+
raise NotImplementedError("Not Implement {} optimizer !!!".format(cfg.optimize.optimizer))
|
| 24 |
+
|
| 25 |
+
if hasattr(cfg.common, "use_deepspeed") and cfg.common.use_deepspeed:
|
| 26 |
+
return optimize_cls(
|
| 27 |
+
parameters,
|
| 28 |
+
lr=cfg.optimize.lr,
|
| 29 |
+
weight_decay=cfg.optimize.get("weight_decay", 0.01),
|
| 30 |
+
betas=cfg.optimize.get("betas", (0.9, 0.999)),
|
| 31 |
+
eps=cfg.optimize.get("eps", 1e-08),
|
| 32 |
+
)
|
| 33 |
+
# return deepspeed.ops.adam.DeepSpeedCPUAdam(parameters, cfg.optimize.lr)
|
| 34 |
+
elif cfg.common.use_zero:
|
| 35 |
+
print("USING ZeRO")
|
| 36 |
+
from fairscale import __version__ as __fs_version__
|
| 37 |
+
from fairscale.optim import OSS
|
| 38 |
+
|
| 39 |
+
oss_kwargs = {"broadcast_fp16": cfg.common.use_fp16}
|
| 40 |
+
if __fs_version__ >= "0.4.6":
|
| 41 |
+
oss_kwargs["force_broadcast_object"] = True
|
| 42 |
+
return OSS(
|
| 43 |
+
parameters,
|
| 44 |
+
optimize_cls,
|
| 45 |
+
lr=cfg.optimize.lr,
|
| 46 |
+
weight_decay=cfg.optimize.get("weight_decay", 0.01),
|
| 47 |
+
betas=cfg.optimize.get("betas", (0.9, 0.999)),
|
| 48 |
+
eps=cfg.optimize.get("eps", 1e-08),
|
| 49 |
+
**oss_kwargs,
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
return optimize_cls(
|
| 53 |
+
parameters,
|
| 54 |
+
lr=cfg.optimize.lr,
|
| 55 |
+
weight_decay=cfg.optimize.get("weight_decay", 0.01),
|
| 56 |
+
betas=cfg.optimize.get("betas", (0.9, 0.999)),
|
| 57 |
+
eps=cfg.optimize.get("eps", 1e-08),
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def build_optimizer(cfg, model):
|
| 62 |
+
from mimogpt.models.modules.llama.model import RMSNorm
|
| 63 |
+
from mimogpt.models.modules.nanoGPT.model import LayerNorm
|
| 64 |
+
|
| 65 |
+
norm_types = (
|
| 66 |
+
torch.nn.BatchNorm2d,
|
| 67 |
+
torch.nn.BatchNorm1d,
|
| 68 |
+
torch.nn.BatchNorm3d,
|
| 69 |
+
torch.nn.SyncBatchNorm,
|
| 70 |
+
torch.nn.GroupNorm,
|
| 71 |
+
torch.nn.InstanceNorm1d,
|
| 72 |
+
torch.nn.InstanceNorm2d,
|
| 73 |
+
torch.nn.InstanceNorm3d,
|
| 74 |
+
torch.nn.LayerNorm,
|
| 75 |
+
torch.nn.LocalResponseNorm,
|
| 76 |
+
RMSNorm,
|
| 77 |
+
LayerNorm,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
nodecay_names = []
|
| 81 |
+
for name, mod in model.named_modules():
|
| 82 |
+
if hasattr(mod, "bias"):
|
| 83 |
+
nodecay_names.append("{}.bias".format(name))
|
| 84 |
+
if isinstance(mod, norm_types):
|
| 85 |
+
if hasattr(mod, "weight"):
|
| 86 |
+
nodecay_names.append("{}.weight".format(name))
|
| 87 |
+
if hasattr(mod, "scale"):
|
| 88 |
+
nodecay_names.append("{}.scale".format(name))
|
| 89 |
+
|
| 90 |
+
model_params = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
|
| 91 |
+
decay_params = [p for n, p in model_params if n not in nodecay_names]
|
| 92 |
+
nodecay_params = [p for n, p in model_params if n in nodecay_names]
|
| 93 |
+
# decay_params = [p for n, p in model_params if p.dim() >= 2]
|
| 94 |
+
# nodecay_params = [p for n, p in model_params if p.dim() < 2]
|
| 95 |
+
grouped_parameters = [
|
| 96 |
+
{
|
| 97 |
+
"params": decay_params,
|
| 98 |
+
"init_lr": cfg.optimize.lr,
|
| 99 |
+
"lr": cfg.optimize.lr,
|
| 100 |
+
"weight_decay": cfg.optimize.weight_decay,
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"params": nodecay_params,
|
| 104 |
+
"init_lr": cfg.optimize.lr,
|
| 105 |
+
"lr": cfg.optimize.lr,
|
| 106 |
+
"weight_decay": 0.0,
|
| 107 |
+
},
|
| 108 |
+
]
|
| 109 |
+
num_all_params = sum(p.numel() for n, p in model_params)
|
| 110 |
+
num_decay_params = sum(p.numel() for p in decay_params)
|
| 111 |
+
num_nodecay_params = sum(p.numel() for p in nodecay_params)
|
| 112 |
+
print(f"num all trained parameter tensors: {len(model_params)}, with {num_all_params:,} parameters")
|
| 113 |
+
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
|
| 114 |
+
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
|
| 115 |
+
|
| 116 |
+
# exclude = lambda n: "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n or 'LayerNorm' in n
|
| 117 |
+
# include = lambda n: not exclude(n)
|
| 118 |
+
#
|
| 119 |
+
# params_to_learn = [
|
| 120 |
+
# "frame_position_embeddings",
|
| 121 |
+
# "temporal_fusion",
|
| 122 |
+
# "visual_decoder",
|
| 123 |
+
# "text_decoder",
|
| 124 |
+
# ]
|
| 125 |
+
#
|
| 126 |
+
# model_params = [(n, p) for n, p in model.named_parameters() if p.requires_grad]
|
| 127 |
+
# grouped_parameters = [
|
| 128 |
+
# # params to learn
|
| 129 |
+
# {
|
| 130 |
+
# 'params': [p for n, p in model_params if any(nd in n for nd in params_to_learn) and exclude(n)],
|
| 131 |
+
# "init_lr": cfg.optimize.lr,
|
| 132 |
+
# "lr": cfg.optimize.lr,
|
| 133 |
+
# "weight_decay": 0.,
|
| 134 |
+
# },
|
| 135 |
+
# {
|
| 136 |
+
# 'params': [p for n, p in model_params if any(nd in n for nd in params_to_learn) and include(n)],
|
| 137 |
+
# "init_lr": cfg.optimize.lr,
|
| 138 |
+
# "lr": cfg.optimize.lr,
|
| 139 |
+
# "weight_decay": cfg.optimize.weight_decay,
|
| 140 |
+
# },
|
| 141 |
+
#
|
| 142 |
+
# # params to tune
|
| 143 |
+
# {
|
| 144 |
+
# 'params': [p for n, p in model_params if not any(nd in n for nd in params_to_learn) and exclude(n)],
|
| 145 |
+
# "init_lr": cfg.optimize.lr * cfg.optimize.tune_lr_scale,
|
| 146 |
+
# "lr": cfg.optimize.lr * cfg.optimize.tune_lr_scale,
|
| 147 |
+
# "weight_decay": 0.
|
| 148 |
+
# },
|
| 149 |
+
# {
|
| 150 |
+
# 'params': [p for n, p in model_params if not any(nd in n for nd in params_to_learn) and include(n)],
|
| 151 |
+
# "init_lr": cfg.optimize.lr * cfg.optimize.tune_lr_scale,
|
| 152 |
+
# "lr": cfg.optimize.lr * cfg.optimize.tune_lr_scale,
|
| 153 |
+
# "weight_decay": cfg.optimize.weight_decay
|
| 154 |
+
# }
|
| 155 |
+
# ]
|
| 156 |
+
|
| 157 |
+
optimizer = get_optimizer(cfg, grouped_parameters)
|
| 158 |
+
return optimizer
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def setup_deepspeed(args, rank):
|
| 162 |
+
deepspeed_config = json.load(open(args.deepspeed_config))
|
| 163 |
+
if "fp16" in deepspeed_config:
|
| 164 |
+
assert "bf16" not in deepspeed_config
|
| 165 |
+
if deepspeed_config["fp16"]["enabled"] is False:
|
| 166 |
+
torch.backends.cuda.matmul.allow_tf32 = args.tf32
|
| 167 |
+
torch.backends.cudnn.allow_tf32 = args.tf32
|
| 168 |
+
if args.tf32:
|
| 169 |
+
print(["rank", rank, "enable tf32 if device support"])
|
| 170 |
+
else:
|
| 171 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 172 |
+
torch.backends.cudnn.allow_tf32 = False
|
| 173 |
+
elif "bf16" in deepspeed_config:
|
| 174 |
+
assert "fp16" not in deepspeed_config
|
| 175 |
+
if deepspeed_config["bf16"]["enabled"] is False:
|
| 176 |
+
torch.backends.cuda.matmul.allow_tf32 = args.tf32
|
| 177 |
+
torch.backends.cudnn.allow_tf32 = args.tf32
|
| 178 |
+
if args.tf32:
|
| 179 |
+
print(["rank", rank, "enable tf32 if device support"])
|
| 180 |
+
else:
|
| 181 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 182 |
+
torch.backends.cudnn.allow_tf32 = False
|
| 183 |
+
else:
|
| 184 |
+
torch.backends.cuda.matmul.allow_tf32 = args.tf32
|
| 185 |
+
torch.backends.cudnn.allow_tf32 = args.tf32
|
| 186 |
+
if args.tf32:
|
| 187 |
+
print(["rank", rank, "enable tf32 if device support"])
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def clip_gradient(optimizer, grad_clip=5.0, eps=1.0e-15):
|
| 191 |
+
"""
|
| 192 |
+
Clips gradients computed during backpropagation to avoid explosion of gradients.
|
| 193 |
+
|
| 194 |
+
:param optimizer: optimizer with the gradients to be clipped
|
| 195 |
+
:param grad_clip: clip value
|
| 196 |
+
"""
|
| 197 |
+
for group in optimizer.param_groups:
|
| 198 |
+
for param in group["params"]:
|
| 199 |
+
if param.grad is not None:
|
| 200 |
+
param.grad.data = torch.where(
|
| 201 |
+
torch.isnan(param.grad.data), torch.zeros_like(param.grad.data), param.grad.data
|
| 202 |
+
)
|
| 203 |
+
param.grad.data = torch.where(
|
| 204 |
+
torch.abs(param.grad.data) < eps, torch.zeros_like(param.grad.data), param.grad.data
|
| 205 |
+
)
|
| 206 |
+
param.grad.data.clamp_(-grad_clip, grad_clip)
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/parameter.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import re
|
| 5 |
+
import sys
|
| 6 |
+
import yaml
|
| 7 |
+
import argparse
|
| 8 |
+
import deepspeed
|
| 9 |
+
from easydict import EasyDict
|
| 10 |
+
|
| 11 |
+
from mimogpt.utils import read_from_yaml
|
| 12 |
+
from .cloud_copy import copy_data_to_cache
|
| 13 |
+
|
| 14 |
+
__all__ = ["parse_args", "ConfigObject", "parse_args_from_yaml"]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def parse_args():
|
| 18 |
+
parser = argparse.ArgumentParser(description="DeepLearning framework based on PyTorch")
|
| 19 |
+
|
| 20 |
+
# ----------------------distributed parameter-----------------------
|
| 21 |
+
parser.add_argument("--backend", type=str, default="nccl", help="use for current backend for distributed")
|
| 22 |
+
parser.add_argument("--init_method", type=str, default="tcp://127.0.0.1:56947", help="init method for distributed")
|
| 23 |
+
parser.add_argument("--rank", type=int, default=0, help="current rank for distributed")
|
| 24 |
+
parser.add_argument("--local_rank", type=int, default=0, help="local rank, range 0-7")
|
| 25 |
+
parser.add_argument("--world_size", type=int, default=1, help="current process number for distributed")
|
| 26 |
+
|
| 27 |
+
# -----------------------common parameter-----------------------
|
| 28 |
+
parser.add_argument("--yml_path", type=str, default="", help="cfg name")
|
| 29 |
+
parser.add_argument("--list_data_root", type=str, help="root of dataset-list location, usually a folder")
|
| 30 |
+
parser.add_argument("--img_data_root", type=str, help="root of dataset-img location, usually a folder")
|
| 31 |
+
parser.add_argument("--eval_data_dir", type=str, help="eval dataset location, usually a folder")
|
| 32 |
+
parser.add_argument("--eval_imagenet_dir", type=str, help="eval dataset location, usually a folder")
|
| 33 |
+
parser.add_argument("--pretrained_path", type=str, help="pretrained model path")
|
| 34 |
+
parser.add_argument("--load_optimizer", type=int, default=0, help="0: don't load; 1:load optimizer")
|
| 35 |
+
parser.add_argument(
|
| 36 |
+
"--caption_shuffle_percent",
|
| 37 |
+
type=float,
|
| 38 |
+
default=0,
|
| 39 |
+
help="shuffle the caption with a certain probability, from 0 to 1, don't shuffle if set to 0",
|
| 40 |
+
)
|
| 41 |
+
parser.add_argument("--train_data_index", type=int, default=0, help="train_data_index")
|
| 42 |
+
parser.add_argument(
|
| 43 |
+
"--local_shuffle_type",
|
| 44 |
+
type=int,
|
| 45 |
+
help="0: not use local shuffle "
|
| 46 |
+
"1: use local shuffle by node "
|
| 47 |
+
"2: use local shuffle by card "
|
| 48 |
+
"4: use local shuffle by card in zip format, recommend",
|
| 49 |
+
)
|
| 50 |
+
parser.add_argument("--zip_max_split", type=int, default=1024, help="used when local_shuffle_type=4")
|
| 51 |
+
parser.add_argument(
|
| 52 |
+
"--visual_memory_format", type=str, default="contiguous_format", help="channels_last or " "contiguous_format"
|
| 53 |
+
)
|
| 54 |
+
parser.add_argument("--show_model_arch", type=int, default=0, help="show model arch and params on log")
|
| 55 |
+
parser.add_argument("--output_path", type=str, default="./exp", help="output path for saving log and pth")
|
| 56 |
+
parser.add_argument("--log_interval", type=int, default=100, help="steps to show log info")
|
| 57 |
+
parser.add_argument("--save_per_epochs", type=float, default=0.5, help="epochs to save pth, can be less than 1")
|
| 58 |
+
parser.add_argument("--max_epochs", type=int, default=3, help="training epochs")
|
| 59 |
+
parser.add_argument("--warmup_epochs", type=float, default=0.5, help="warmup epochs, can be less than 1")
|
| 60 |
+
parser.add_argument("--lr_scheduler", type=str, default="cosine", help="lr scheduler")
|
| 61 |
+
parser.add_argument("--DATALOADER", type=str, default="CLIP_zip_dataloader")
|
| 62 |
+
parser.add_argument(
|
| 63 |
+
"--data_list", type=str, help="list name (pattern, code auto change idx to number) to match training data"
|
| 64 |
+
)
|
| 65 |
+
parser.add_argument("--mode", type=str, default="trian", help="training sign, do not change ")
|
| 66 |
+
parser.add_argument("--resume", type=int, default=0, help="resume model")
|
| 67 |
+
parser.add_argument("--prefetch_factor", type=int, default=2, help="prefetch in dataloader, for faster training")
|
| 68 |
+
parser.add_argument("--lr", type=float, default=0.0008, help="learning rate")
|
| 69 |
+
# parser.add_argument('--optimizer', type=str, default='fused_adamw', help='optimizer')
|
| 70 |
+
parser.add_argument("--weight_decay", type=float, default=0.0, help="weight decay")
|
| 71 |
+
parser.add_argument("--label_smooth", type=float, default=0.1, help="label smooth")
|
| 72 |
+
parser.add_argument("--image_size", type=int, default=224, help="reshape the size of image")
|
| 73 |
+
parser.add_argument("--num_workers", type=int, default=6, help="process number of data loader")
|
| 74 |
+
parser.add_argument("--tokenizer_type", type=str, default="bert_chinese", help="tokenizer type in data loader")
|
| 75 |
+
|
| 76 |
+
# ----------------------debug related----------------------
|
| 77 |
+
parser.add_argument("--fix_inputs", type=int, default=0, help="ignore dataloader when training, for profiling")
|
| 78 |
+
parser.add_argument("--profile", type=int, default=0, help="pytorch profile")
|
| 79 |
+
parser.add_argument("--profile_skip_first", type=int, default=5, help="pytorch profile")
|
| 80 |
+
parser.add_argument("--profile_wait", type=int, default=5, help="pytorch profile")
|
| 81 |
+
parser.add_argument("--profile_warmup", type=int, default=2, help="pytorch profile")
|
| 82 |
+
parser.add_argument("--profile_active", type=int, default=3, help="pytorch profile")
|
| 83 |
+
parser.add_argument("--profile_repeat", type=int, default=5, help="pytorch profile")
|
| 84 |
+
parser.add_argument("--profile_step", type=int, default=150, help="npu profile")
|
| 85 |
+
parser.add_argument("--debug", action="store_true")
|
| 86 |
+
|
| 87 |
+
# ----------------------optmize----------------------
|
| 88 |
+
parser.add_argument("--base_lr", type=float, help="base learning rate")
|
| 89 |
+
parser.add_argument("--use_fp16", type=int, help="flag for triggering AMP")
|
| 90 |
+
parser.add_argument("--CRITERION", type=str, default="clip_loss_gather_parallel", help="loss function")
|
| 91 |
+
parser.add_argument("--beta1", type=float, default=0.9, help="adam beta1")
|
| 92 |
+
parser.add_argument("--beta2", type=float, default=0.96, help="adam beta2")
|
| 93 |
+
|
| 94 |
+
# ----------------------model----------------------
|
| 95 |
+
parser.add_argument("--embed_dim", type=int, default=512, help="dimension of output")
|
| 96 |
+
parser.add_argument("--BACKBONE", type=str, help="model backbone")
|
| 97 |
+
parser.add_argument("--context_length", type=int, default=80, help="length of token sent to model")
|
| 98 |
+
|
| 99 |
+
# ----------------------eval----------------------
|
| 100 |
+
parser.add_argument("--eval_first", type=int, default=0, help="whether eval at 1st step")
|
| 101 |
+
parser.add_argument("--eval_yml_path", type=str, default="", help="cfg name for eval")
|
| 102 |
+
# hwzhquery eval
|
| 103 |
+
parser.add_argument("--do_multilabeling", type=int, default=0, help="validation related @xkx")
|
| 104 |
+
parser.add_argument(
|
| 105 |
+
"--min_recall", type=float, default=0.8, help="hwzhquery eval for thr reliable, eg. recall=80%, test acc"
|
| 106 |
+
)
|
| 107 |
+
parser.add_argument("--exclude", type=list, default=[], help="hwzhquery exclude some part when eval")
|
| 108 |
+
parser.add_argument("--test_plan", type=list, default=[], help="settings for hwzhquery")
|
| 109 |
+
parser.add_argument("--eval_hierarchy", type=str, help="path to hwzhquery imgs")
|
| 110 |
+
parser.add_argument("--labels_root", type=str, help="path to hwzhquery labels")
|
| 111 |
+
parser.add_argument("--map_en_zh", type=str, help="pkl of hwzhquery")
|
| 112 |
+
# hwzhquery eval (need to be clarify)
|
| 113 |
+
parser.add_argument("--windows_path", type=str)
|
| 114 |
+
parser.add_argument("--thres", type=int, default=0, help="have correlation to thr reliable")
|
| 115 |
+
# others eval
|
| 116 |
+
parser.add_argument("--eval_coco_dir", type=str, help="path to coco dir")
|
| 117 |
+
parser.add_argument("--eval_coco_cn_dir", type=str, help="path to coco-cn dir")
|
| 118 |
+
parser.add_argument("--eval_muge_dir", type=str, help="path to muge dir")
|
| 119 |
+
parser.add_argument("--eval_coco_en_dir", type=str, help="path to coco-en dir")
|
| 120 |
+
parser.add_argument("--eval_imagenet_en_dir", type=str, help="path to imagenet-en dir")
|
| 121 |
+
|
| 122 |
+
# ---------------------ema----------------------------------
|
| 123 |
+
parser.add_argument("--ema", type=int, default=0, help="whether use ema")
|
| 124 |
+
parser.add_argument("--ema_decay", type=float, default=0.999, help="ema_decay")
|
| 125 |
+
parser.add_argument(
|
| 126 |
+
"--ema_multi_tensor_apply_chunk_size", type=int, default=10000, help="ema_multi_tensor_apply_chunk_size"
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
# train on cloud
|
| 130 |
+
parser.add_argument("--random_seed", type=int, help="random_seed")
|
| 131 |
+
parser.add_argument("--train_url", type=str, help="train_url")
|
| 132 |
+
parser.add_argument("--cloud_list_data_root", type=str, help="root path to list on OBS, start with s3:")
|
| 133 |
+
parser.add_argument("--cloud_img_data_root", type=str, help="root path to img or zip on OBS, start with s3:")
|
| 134 |
+
parser.add_argument("--user_id", type=str, default="", help="user account")
|
| 135 |
+
parser.add_argument("--tf32", action="store_true")
|
| 136 |
+
parser = deepspeed.add_config_arguments(parser)
|
| 137 |
+
args, unknown = parser.parse_known_args()
|
| 138 |
+
# args = merge_args(args, args.yml_path)
|
| 139 |
+
|
| 140 |
+
try:
|
| 141 |
+
import moxing as mox
|
| 142 |
+
|
| 143 |
+
args.list_data_root = args.cloud_list_data_root
|
| 144 |
+
args.img_data_root = args.cloud_img_data_root
|
| 145 |
+
except:
|
| 146 |
+
pass
|
| 147 |
+
return EasyDict(vars(args))
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class ConfigObject:
|
| 151 |
+
def __init__(self, entries):
|
| 152 |
+
for a, b in entries.items():
|
| 153 |
+
if isinstance(b, (list, tuple)):
|
| 154 |
+
setattr(self, a, [ConfigObject(x) if isinstance(x, dict) else x for x in b])
|
| 155 |
+
else:
|
| 156 |
+
setattr(self, a, ConfigObject(b) if isinstance(b, dict) else b)
|
| 157 |
+
|
| 158 |
+
def __str__(self):
|
| 159 |
+
return str(self.__dict__)
|
| 160 |
+
|
| 161 |
+
def __repr__(self):
|
| 162 |
+
return self.__str__()
|
| 163 |
+
|
| 164 |
+
def merge_from_args(self, args):
|
| 165 |
+
for k, v in args.__dict__.items():
|
| 166 |
+
if k in self.__dict__:
|
| 167 |
+
# yml file has a higher priority than parameters
|
| 168 |
+
continue
|
| 169 |
+
if v is not None:
|
| 170 |
+
self.__dict__[k] = v
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def parse_args_from_yaml(yml_path):
|
| 174 |
+
config = read_from_yaml(yml_path)
|
| 175 |
+
config_obj = EasyDict(config)
|
| 176 |
+
return config_obj
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def parse_replace_roma(fp, copy_to_cache=False, rank=0, world_size=1, args=None):
|
| 180 |
+
y = read_from_yaml(fp)
|
| 181 |
+
for y_key in y.keys():
|
| 182 |
+
y_val = y[y_key]
|
| 183 |
+
if type(y_val) is str and y_val.startswith("s3://"):
|
| 184 |
+
# copy to /cache and replace to /cache
|
| 185 |
+
y_val_cache = y_val.replace("s3://", "/cache/")
|
| 186 |
+
if copy_to_cache:
|
| 187 |
+
# rank and world_size means node rank and node world_size
|
| 188 |
+
# ugly for rec with idx
|
| 189 |
+
if y_val_cache.endswith(".rec"):
|
| 190 |
+
y_val_rec_idx = y_val.replace(".rec", ".idx")
|
| 191 |
+
y_val_cache_rec_idx = y_val_cache.replace(".rec", ".idx")
|
| 192 |
+
print("copy {} to {}".format(y_val_rec_idx, y_val_cache_rec_idx))
|
| 193 |
+
copy_data_to_cache(y_val_rec_idx, y_val_cache_rec_idx, rank=rank, world_size=world_size, args=args)
|
| 194 |
+
copy_data_to_cache(y_val, y_val_cache, rank=rank, world_size=world_size, args=args)
|
| 195 |
+
y[y_key] = y_val_cache
|
| 196 |
+
return y
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def merge_args(args, args_yml_fn):
|
| 200 |
+
if os.path.exists(args_yml_fn):
|
| 201 |
+
args_dict = args.__dict__
|
| 202 |
+
args_yml = parse_replace_roma(args_yml_fn, copy_to_cache=False)
|
| 203 |
+
|
| 204 |
+
# make sure key in yml is subset of parameter.py
|
| 205 |
+
for key in args_yml:
|
| 206 |
+
assert key in args_dict, "{} in yml not in args_dict, please set into common/parameter.py".format(key)
|
| 207 |
+
|
| 208 |
+
args_dict_merge = dict(args_dict, **args_yml)
|
| 209 |
+
args = ConfigObject(args_dict_merge)
|
| 210 |
+
elif len(args_yml_fn) != 0:
|
| 211 |
+
print("yml file {} is not existed".format(args_yml_fn))
|
| 212 |
+
exit(0)
|
| 213 |
+
|
| 214 |
+
sys_args = sys.argv[1:]
|
| 215 |
+
for arg in sys_args:
|
| 216 |
+
if re.match("^--(.*)=(.*)$", arg):
|
| 217 |
+
arg = arg.replace("--", "")
|
| 218 |
+
key, val = arg.split("=")
|
| 219 |
+
default_value = getattr(args, key, "key_not_exist")
|
| 220 |
+
if default_value == "key_not_exist":
|
| 221 |
+
# TODO, string type
|
| 222 |
+
setattr(args, key, val)
|
| 223 |
+
else:
|
| 224 |
+
new_value = type(default_value)(val)
|
| 225 |
+
if default_value != new_value:
|
| 226 |
+
print("set {} from {} to {}".format(key, default_value, new_value))
|
| 227 |
+
setattr(args, key, new_value)
|
| 228 |
+
else:
|
| 229 |
+
print("unmatched, arg: {}".format(arg))
|
| 230 |
+
|
| 231 |
+
return args
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profile.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pprint
|
| 2 |
+
import os
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
from torch.profiler import profile, schedule, ProfilerActivity, tensorboard_trace_handler
|
| 5 |
+
|
| 6 |
+
from mimogpt.engine.utils.train_loop import HookBase
|
| 7 |
+
from mimogpt.utils import hf_logger
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Nothing(object):
|
| 11 |
+
def __init__(self, *args, **kwargs):
|
| 12 |
+
return
|
| 13 |
+
|
| 14 |
+
def __enter__(self):
|
| 15 |
+
return self
|
| 16 |
+
|
| 17 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 18 |
+
return
|
| 19 |
+
|
| 20 |
+
def start(self):
|
| 21 |
+
return
|
| 22 |
+
|
| 23 |
+
def stop(self):
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
def step(self):
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
def export_chrome_trace(self, path):
|
| 30 |
+
return
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TorchProfileHook(HookBase):
|
| 34 |
+
def __init__(self, cfg):
|
| 35 |
+
is_root = dist.get_rank() == 0
|
| 36 |
+
if is_root:
|
| 37 |
+
skip_first = cfg.get("profile_skip_first", 0)
|
| 38 |
+
wait = cfg.get("profile_wait", 1)
|
| 39 |
+
warmup = cfg.get("profile_warmup", 1)
|
| 40 |
+
active = cfg.get("profile_active", 1)
|
| 41 |
+
repeat = cfg.get("profile_repeat", 0)
|
| 42 |
+
|
| 43 |
+
profile_tb_logger = os.path.join(cfg.common.log_path, "gpu_profile_results")
|
| 44 |
+
os.makedirs(profile_tb_logger, exist_ok=True)
|
| 45 |
+
|
| 46 |
+
my_schedule = schedule(wait=wait, warmup=warmup, active=active, repeat=repeat, skip_first=skip_first)
|
| 47 |
+
tb_logger_trace_handler = tensorboard_trace_handler(profile_tb_logger)
|
| 48 |
+
self.profiler = profile(
|
| 49 |
+
schedule=my_schedule,
|
| 50 |
+
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
|
| 51 |
+
record_shapes=True,
|
| 52 |
+
profile_memory=True,
|
| 53 |
+
with_stack=False,
|
| 54 |
+
with_flops=False,
|
| 55 |
+
with_modules=False,
|
| 56 |
+
on_trace_ready=tb_logger_trace_handler,
|
| 57 |
+
)
|
| 58 |
+
hf_logger.info("Profile enabled in rank 0")
|
| 59 |
+
else:
|
| 60 |
+
self.profiler = Nothing()
|
| 61 |
+
|
| 62 |
+
def before_train(self):
|
| 63 |
+
self.profiler.start()
|
| 64 |
+
hf_logger.info(f"Profiller start!!!!!")
|
| 65 |
+
|
| 66 |
+
def after_step(self):
|
| 67 |
+
self.profiler.step()
|
| 68 |
+
|
| 69 |
+
def after_train(self):
|
| 70 |
+
self.profiler.stop()
|
| 71 |
+
hf_logger.info(f"Profiller stop!!!!!")
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profile_npu.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch.distributed as dist
|
| 3 |
+
|
| 4 |
+
DEVICE_TYPE = os.environ.get("DEVICE_TYPE", "gpu")
|
| 5 |
+
if DEVICE_TYPE == "ascend":
|
| 6 |
+
import torch_npu
|
| 7 |
+
|
| 8 |
+
from torch.profiler import ProfilerActivity, tensorboard_trace_handler
|
| 9 |
+
from mimogpt.engine.utils.profiler_npu.profile_utils import (
|
| 10 |
+
get_profile_fn,
|
| 11 |
+
trace_handler,
|
| 12 |
+
Nothing,
|
| 13 |
+
)
|
| 14 |
+
from mimogpt.engine.utils.train_loop import HookBase
|
| 15 |
+
from mimogpt.utils import hf_logger
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class NPUTorchProfileHook(HookBase):
|
| 19 |
+
def __init__(self, cfg):
|
| 20 |
+
train_schedule, profile_fn = get_profile_fn(cfg)
|
| 21 |
+
experimental_config = torch_npu.profiler._ExperimentalConfig(
|
| 22 |
+
aic_metrics=torch_npu.profiler.AiCMetrics.PipeUtilization,
|
| 23 |
+
profiler_level=torch_npu.profiler.ProfilerLevel.Level1, # 如需修改profiling级别,可在此调整参数Leval0、Leval1、Leval2
|
| 24 |
+
l2_cache=False, # 此参数仅在Leval2时设置成True
|
| 25 |
+
)
|
| 26 |
+
profile_tb_logger = os.path.join(cfg.common.log_path, "ascend_profile_results")
|
| 27 |
+
os.makedirs(profile_tb_logger, exist_ok=True)
|
| 28 |
+
tb_logger_trace_handler = tensorboard_trace_handler(profile_tb_logger)
|
| 29 |
+
|
| 30 |
+
if cfg.profile and DEVICE_TYPE == "ascend" and dist.get_rank() == 0:
|
| 31 |
+
self.profiler = profile_fn(
|
| 32 |
+
activities=[
|
| 33 |
+
ProfilerActivity.CPU,
|
| 34 |
+
ProfilerActivity.CUDA,
|
| 35 |
+
],
|
| 36 |
+
schedule=train_schedule,
|
| 37 |
+
record_shapes=True,
|
| 38 |
+
profile_memory=True,
|
| 39 |
+
with_stack=False,
|
| 40 |
+
with_flops=False,
|
| 41 |
+
with_modules=False,
|
| 42 |
+
experimental_config=experimental_config, # 专家参数默认级别Leval0,可根据需要设置不同Leval
|
| 43 |
+
on_trace_ready=tb_logger_trace_handler,
|
| 44 |
+
)
|
| 45 |
+
else:
|
| 46 |
+
self.profiler = Nothing()
|
| 47 |
+
|
| 48 |
+
def before_train(self):
|
| 49 |
+
self.profiler.__enter__()
|
| 50 |
+
hf_logger.info(f"NPU Profiller start!!!!!")
|
| 51 |
+
|
| 52 |
+
def after_step(self):
|
| 53 |
+
self.profiler.step()
|
| 54 |
+
|
| 55 |
+
def after_train(self):
|
| 56 |
+
self.profiler.__exit__(None, None, None)
|
| 57 |
+
hf_logger.info(f"NPU Profiller stop!!!!!")
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profiler_npu/profile_utils.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
from torch.profiler import profile, schedule
|
| 5 |
+
from .timeline_analysis import parse_timeline
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class Nothing(object):
|
| 9 |
+
def __init__(self, *args, **kwargs):
|
| 10 |
+
return
|
| 11 |
+
|
| 12 |
+
def __enter__(self):
|
| 13 |
+
return self
|
| 14 |
+
|
| 15 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 16 |
+
return
|
| 17 |
+
|
| 18 |
+
def start(self):
|
| 19 |
+
return
|
| 20 |
+
|
| 21 |
+
def stop(self):
|
| 22 |
+
return
|
| 23 |
+
|
| 24 |
+
def step(self):
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
def export_chrome_trace(self, path):
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def trace_handler(prof, train_url, name="train"):
|
| 32 |
+
if not os.path.exists("pytorch_profile"):
|
| 33 |
+
os.makedirs("pytorch_profile")
|
| 34 |
+
|
| 35 |
+
src = os.path.join("pytorch_profile", "timeline_{}_step_{}.json".format(name, prof.step_num))
|
| 36 |
+
prof.export_chrome_trace(src)
|
| 37 |
+
analysis_ret = parse_timeline(src)
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
import moxing as mox
|
| 41 |
+
|
| 42 |
+
if train_url is not None:
|
| 43 |
+
dst = os.path.join(train_url, src)
|
| 44 |
+
dst_dirname = os.path.dirname(dst)
|
| 45 |
+
if not mox.file.exists(dst_dirname):
|
| 46 |
+
mox.file.make_dirs(dst_dirname)
|
| 47 |
+
mox.file.copy(src, dst)
|
| 48 |
+
print("save profile timeline from {} to {}".format(src, dst))
|
| 49 |
+
|
| 50 |
+
for name in analysis_ret:
|
| 51 |
+
dst = os.path.join(train_url, name)
|
| 52 |
+
mox.file.copy(name, dst)
|
| 53 |
+
print("save profile files from {} to {}".format(name, dst))
|
| 54 |
+
else:
|
| 55 |
+
print("skip backup profile timeline since train_url=None")
|
| 56 |
+
except:
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def get_profile_fn(args):
|
| 61 |
+
if dist.get_rank() == 0 and args.profile:
|
| 62 |
+
my_schedule = schedule(
|
| 63 |
+
skip_first=args.profile_skip_first,
|
| 64 |
+
wait=args.profile_wait,
|
| 65 |
+
warmup=args.profile_warmup,
|
| 66 |
+
active=args.profile_active,
|
| 67 |
+
repeat=args.profile_repeat,
|
| 68 |
+
)
|
| 69 |
+
profile_fn = profile
|
| 70 |
+
else:
|
| 71 |
+
my_schedule = None
|
| 72 |
+
profile_fn = Nothing()
|
| 73 |
+
return my_schedule, profile_fn
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def print_memory_status(msg, empty_cache=True, is_print=True):
|
| 77 |
+
if empty_cache:
|
| 78 |
+
torch.cuda.empty_cache()
|
| 79 |
+
|
| 80 |
+
mem_res = torch.cuda.memory_reserved() / (1024**3)
|
| 81 |
+
mem_alloc = torch.cuda.memory_allocated() / (1024**3)
|
| 82 |
+
if is_print:
|
| 83 |
+
print("{}, memory_reserved={:.2f}G, memory_allocated={:.2f}G".format(msg, mem_res, mem_alloc))
|
| 84 |
+
|
| 85 |
+
return mem_res, mem_alloc
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/profiler_npu/timeline_analysis.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import yaml
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def parse_yaml(fp):
|
| 10 |
+
with open(fp, "r") as fd:
|
| 11 |
+
cont = fd.read()
|
| 12 |
+
try:
|
| 13 |
+
y = yaml.load(cont, Loader=yaml.FullLoader)
|
| 14 |
+
except:
|
| 15 |
+
y = yaml.load(cont)
|
| 16 |
+
return y
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def parse_timeline(filepath):
|
| 20 |
+
# 算子名->算子类型映射
|
| 21 |
+
cate2name = parse_yaml(os.path.join(os.path.dirname(__file__), "mapping.yml"))
|
| 22 |
+
name2cate = {}
|
| 23 |
+
for cate_name, op_names in cate2name.items():
|
| 24 |
+
for op_name in op_names:
|
| 25 |
+
name2cate[op_name] = cate_name
|
| 26 |
+
|
| 27 |
+
filedir, filename = os.path.split(filepath)
|
| 28 |
+
print("start analysis {}".format(filepath))
|
| 29 |
+
with open(filepath) as fn:
|
| 30 |
+
ops = json.load(fn)
|
| 31 |
+
|
| 32 |
+
ops = ops["traceEvents"]
|
| 33 |
+
print("find {} events".format(len(ops)))
|
| 34 |
+
|
| 35 |
+
cat_ = [x["cat"] if "cat" in x.keys() else None for x in ops]
|
| 36 |
+
cat_key = list(set(cat_))
|
| 37 |
+
|
| 38 |
+
out_filenames = []
|
| 39 |
+
|
| 40 |
+
for key in cat_key:
|
| 41 |
+
if key is None:
|
| 42 |
+
cat_this = [x for x in ops if "cat" not in x.keys()]
|
| 43 |
+
else:
|
| 44 |
+
cat_this = [x for x in ops if "cat" in x.keys() and x["cat"] == key]
|
| 45 |
+
|
| 46 |
+
print("")
|
| 47 |
+
print("find cat={}, cnt={}".format(key, len(cat_this)))
|
| 48 |
+
|
| 49 |
+
if key in ["Kernel", "kernel"]:
|
| 50 |
+
stream7 = [x for x in cat_this if x["args"]["stream"] == 7]
|
| 51 |
+
not_stream7 = [x for x in cat_this if x["args"]["stream"] != 7] # nccl, pass
|
| 52 |
+
tot_time = sum([x["dur"] for x in stream7]) / 1000
|
| 53 |
+
print("stream7: total_time={}ms".format(tot_time))
|
| 54 |
+
|
| 55 |
+
ops_groupby_op_name = {}
|
| 56 |
+
ops_groupby_cate = {}
|
| 57 |
+
|
| 58 |
+
for x in stream7:
|
| 59 |
+
if x["name"] not in ops_groupby_op_name.keys():
|
| 60 |
+
ops_groupby_op_name[x["name"]] = [] # name, dur#
|
| 61 |
+
ops_groupby_op_name[x["name"]].append(x["dur"])
|
| 62 |
+
|
| 63 |
+
op_name_this = x["name"][5:] if x["name"].startswith("void ") else x["name"]
|
| 64 |
+
real_cate_name = []
|
| 65 |
+
for name_pattern, cate_name in name2cate.items():
|
| 66 |
+
ret = re.match(name_pattern, op_name_this)
|
| 67 |
+
if ret is not None:
|
| 68 |
+
real_cate_name.append(cate_name)
|
| 69 |
+
|
| 70 |
+
if len(real_cate_name) > 1:
|
| 71 |
+
print("ERROR: op_name: {}, has many pattern: {}".format(x["name"], real_cate_name))
|
| 72 |
+
exit()
|
| 73 |
+
elif len(real_cate_name) == 1:
|
| 74 |
+
real_cate_name = real_cate_name[0]
|
| 75 |
+
else:
|
| 76 |
+
real_cate_name = x["name"]
|
| 77 |
+
|
| 78 |
+
if real_cate_name not in ops_groupby_cate.keys():
|
| 79 |
+
ops_groupby_cate[real_cate_name] = [] # name, dur#
|
| 80 |
+
ops_groupby_cate[real_cate_name].append(x["dur"])
|
| 81 |
+
|
| 82 |
+
for save_tag, init_data in [["init", ops_groupby_op_name], ["cate", ops_groupby_cate]]:
|
| 83 |
+
ops_list = []
|
| 84 |
+
for k, v in init_data.items():
|
| 85 |
+
min_ = min(v)
|
| 86 |
+
max_ = max(v)
|
| 87 |
+
tot_ = sum(v)
|
| 88 |
+
mean_ = tot_ / len(v)
|
| 89 |
+
cnt_ = len(v)
|
| 90 |
+
percent_ = tot_ / 1000.0 / tot_time
|
| 91 |
+
ops_list.append([k, cnt_, tot_ / 1000, mean_ / 1000, min_ / 1000, max_ / 1000, percent_])
|
| 92 |
+
|
| 93 |
+
ops_list.sort(key=lambda x: x[-1], reverse=True)
|
| 94 |
+
df = pd.DataFrame(ops_list, columns=["op_name", "cnt", "sum_t", "avg_t", "min_t", "max_t", "percent"])
|
| 95 |
+
|
| 96 |
+
save_name = "op_statistic_" + save_tag + "_" + filename.replace(".json", ".xlsx")
|
| 97 |
+
save_name = os.path.join(filedir, save_name)
|
| 98 |
+
out_filenames.append(save_name)
|
| 99 |
+
df.to_excel(save_name, index=False)
|
| 100 |
+
|
| 101 |
+
summary = [[x["name"], x["dur"] / 1000.0] for x in stream7]
|
| 102 |
+
df = pd.DataFrame(
|
| 103 |
+
summary,
|
| 104 |
+
columns=[
|
| 105 |
+
"op_name",
|
| 106 |
+
"dur",
|
| 107 |
+
],
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
save_name = "op_summary_" + filename.replace(".json", ".xlsx")
|
| 111 |
+
save_name = os.path.join(filedir, save_name)
|
| 112 |
+
out_filenames.append(save_name)
|
| 113 |
+
df.to_excel(save_name, index=False)
|
| 114 |
+
|
| 115 |
+
return out_filenames
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
parser = argparse.ArgumentParser(description="gpu timeline parser")
|
| 120 |
+
parser.add_argument(
|
| 121 |
+
"--filepath",
|
| 122 |
+
type=str,
|
| 123 |
+
default=None,
|
| 124 |
+
help="filepath",
|
| 125 |
+
)
|
| 126 |
+
args, _ = parser.parse_known_args()
|
| 127 |
+
|
| 128 |
+
if os.path.exists(args.filepath):
|
| 129 |
+
parse_timeline(args.filepath)
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/record.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import torch
|
| 6 |
+
import psutil
|
| 7 |
+
import datetime
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 12 |
+
except:
|
| 13 |
+
print("# Ascend didn't support SummaryWriter, skipped")
|
| 14 |
+
from .train_loop import HookBase
|
| 15 |
+
from mimogpt.utils import hf_logger
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_cpu_mem_status():
|
| 19 |
+
mem = psutil.virtual_memory()
|
| 20 |
+
tot_mem = float(mem.total) / (1024**3)
|
| 21 |
+
used_mem = float(mem.used) / (1024**3)
|
| 22 |
+
return "[{:.1f}G/{:.1f}G]".format(used_mem, tot_mem)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class TimerAndLogger(HookBase):
|
| 26 |
+
def __init__(self, cfg, metrics=None):
|
| 27 |
+
self.cfg = cfg
|
| 28 |
+
self.metrics = metrics
|
| 29 |
+
self.is_root = dist.get_rank() == 0
|
| 30 |
+
|
| 31 |
+
def before_train(self):
|
| 32 |
+
self._train_time = 0
|
| 33 |
+
self._train_time_log_interval = 0
|
| 34 |
+
self._train_time_epoch = 0
|
| 35 |
+
|
| 36 |
+
def after_train(self):
|
| 37 |
+
if self.is_root:
|
| 38 |
+
hf_logger.info(
|
| 39 |
+
"Finish training, total time: {}".format(str(datetime.timedelta(seconds=int(self._train_time))))
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def before_step(self):
|
| 43 |
+
self._start_time = time.perf_counter()
|
| 44 |
+
|
| 45 |
+
def after_step(self):
|
| 46 |
+
if self.is_root:
|
| 47 |
+
step_time = time.perf_counter() - self._start_time
|
| 48 |
+
self._train_time += step_time
|
| 49 |
+
self._train_time_log_interval += step_time
|
| 50 |
+
self._train_time_epoch += step_time
|
| 51 |
+
current_epoch = self.trainer.iter // self.cfg.dataloader_len
|
| 52 |
+
current_iter = self.trainer.iter % self.cfg.dataloader_len
|
| 53 |
+
if (current_iter + 1) % self.cfg.common.log_interval == 0:
|
| 54 |
+
if self.cfg.dataloader.train.hybrid:
|
| 55 |
+
frame_num = (
|
| 56 |
+
(self.cfg.dataloader.train.batch_size.image + self.cfg.dataloader.train.batch_size.video * 8)
|
| 57 |
+
* self.cfg.world_size
|
| 58 |
+
* self.cfg.common.log_interval
|
| 59 |
+
// 2
|
| 60 |
+
)
|
| 61 |
+
else:
|
| 62 |
+
frame_num = (
|
| 63 |
+
(self.cfg.dataloader.train.batch_size.video * 8)
|
| 64 |
+
* self.cfg.world_size
|
| 65 |
+
* self.cfg.common.log_interval
|
| 66 |
+
)
|
| 67 |
+
FPS = int(frame_num / self._train_time_log_interval)
|
| 68 |
+
metric_value = self.metrics(self.trainer._outs)
|
| 69 |
+
current_ime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
| 70 |
+
hf_logger.info(
|
| 71 |
+
"{} training: [{}][{}/{}] iterations, loss: {:.8f}, {}: {:.8f}, lr: {:.8f}, lr_tune: {:.8f}, speed: {:.3f}s/it, scaler: {}, {} FPS".format(
|
| 72 |
+
current_ime,
|
| 73 |
+
current_epoch,
|
| 74 |
+
current_iter + 1,
|
| 75 |
+
self.cfg.dataloader_len,
|
| 76 |
+
self.trainer._loss.item(),
|
| 77 |
+
self.metrics.name,
|
| 78 |
+
round(metric_value.item() * 100, 2),
|
| 79 |
+
self.trainer.optimizer.param_groups[0]["lr"],
|
| 80 |
+
self.trainer.optimizer.param_groups[-1]["lr"],
|
| 81 |
+
self._train_time_log_interval / self.cfg.common.log_interval,
|
| 82 |
+
self.trainer._scaler.get_scale(),
|
| 83 |
+
FPS,
|
| 84 |
+
)
|
| 85 |
+
)
|
| 86 |
+
self._train_time_log_interval = 0
|
| 87 |
+
if (current_iter + 1) == self.cfg.dataloader_len:
|
| 88 |
+
frame_num = (
|
| 89 |
+
(self.cfg.dataloader.train.batch_size.image + self.cfg.dataloader.train.batch_size.video * 8)
|
| 90 |
+
* self.cfg.world_size
|
| 91 |
+
* self.cfg.common.log_interval
|
| 92 |
+
// 2
|
| 93 |
+
)
|
| 94 |
+
FPS = int(frame_num / self._train_time_epoch)
|
| 95 |
+
mem_reserved = "{:.1f}G".format(torch.cuda.memory_reserved() / (1024**3))
|
| 96 |
+
hf_logger.info(
|
| 97 |
+
"training: epoch: {}, mean speed: {:.3f}s/it, mean FPS: {}, gpu_mem: {}, cpu_mem: {}".format(
|
| 98 |
+
current_epoch,
|
| 99 |
+
self._train_time_epoch / self.cfg.dataloader_len,
|
| 100 |
+
FPS,
|
| 101 |
+
mem_reserved,
|
| 102 |
+
get_cpu_mem_status(),
|
| 103 |
+
)
|
| 104 |
+
)
|
| 105 |
+
self._train_time_epoch = 0
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class UniversalMeterLogger(HookBase):
|
| 109 |
+
def __init__(self, cfg):
|
| 110 |
+
self.cfg = cfg
|
| 111 |
+
|
| 112 |
+
def before_train(self):
|
| 113 |
+
self._train_time = 0
|
| 114 |
+
|
| 115 |
+
def after_train(self):
|
| 116 |
+
hf_logger.info("Finish training, total time: {}".format(str(datetime.timedelta(seconds=int(self._train_time)))))
|
| 117 |
+
|
| 118 |
+
def before_step(self):
|
| 119 |
+
self._start_time = time.perf_counter()
|
| 120 |
+
|
| 121 |
+
def after_step(self):
|
| 122 |
+
step_time = time.perf_counter() - self._start_time
|
| 123 |
+
self._train_time += step_time
|
| 124 |
+
self.trainer.meters.batch_time.update(step_time)
|
| 125 |
+
current_iter = self.trainer.iter % self.cfg.dataloader_len
|
| 126 |
+
current_epoch = self.trainer.iter // self.cfg.dataloader_len
|
| 127 |
+
if self.cfg.common.task == "mimo_vqgan":
|
| 128 |
+
lr_d = self.trainer.optimizer_d.param_groups[0]["lr"]
|
| 129 |
+
lr_g = self.trainer.optimizer_g.param_groups[0]["lr"]
|
| 130 |
+
if (current_iter + 1) % self.cfg.common.log_interval == 0:
|
| 131 |
+
data_cnt = (
|
| 132 |
+
self.cfg.dataloader.train.batch_size
|
| 133 |
+
* self.trainer.dist.world_size
|
| 134 |
+
* self.trainer.gradient_accumulation_steps
|
| 135 |
+
)
|
| 136 |
+
fps = data_cnt / step_time
|
| 137 |
+
meters = self.trainer.meters
|
| 138 |
+
totals = self.trainer.totals
|
| 139 |
+
total_iters = totals.total_iters
|
| 140 |
+
remain_secs = (total_iters - self.trainer.iter) * meters.batch_time.avg
|
| 141 |
+
remain_time = datetime.timedelta(seconds=round(remain_secs))
|
| 142 |
+
finish_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + remain_secs))
|
| 143 |
+
time_str = f"\tRemainingTime {remain_time} ({finish_time})"
|
| 144 |
+
msg = f"Iter: [{current_iter+1}/{totals.iter_per_epoch}]"
|
| 145 |
+
msg += f"\tEpoch: [{current_epoch}/{totals.epochs} ({totals.total_iters})]"
|
| 146 |
+
for k in meters.keys():
|
| 147 |
+
msg += f"\t{k} {meters[k].get_val_str()} ({meters[k].get_avg_str()})"
|
| 148 |
+
if self.cfg.common.task == "mimo_vqgan":
|
| 149 |
+
msg += f"\tlr_d: {lr_d} \tlr_g: {lr_g}"
|
| 150 |
+
msg += time_str
|
| 151 |
+
msg += f"\tFPS {fps: .3f}"
|
| 152 |
+
hf_logger.info(msg)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class VideoMeterLogger(UniversalMeterLogger):
|
| 156 |
+
def after_step(self):
|
| 157 |
+
step_time = time.perf_counter() - self._start_time
|
| 158 |
+
self._train_time += step_time
|
| 159 |
+
self.trainer.meters.batch_time.update(step_time)
|
| 160 |
+
current_iter = self.trainer.iter % self.cfg.dataloader_len
|
| 161 |
+
current_epoch = self.trainer.iter // self.cfg.dataloader_len
|
| 162 |
+
if (current_iter + 1) % self.cfg.common.log_interval == 0:
|
| 163 |
+
data_cnt = (
|
| 164 |
+
self.cfg.dataloader.train.batch_size
|
| 165 |
+
* self.cfg.dataloader.train.video_length
|
| 166 |
+
* self.trainer.dist.world_size
|
| 167 |
+
* self.trainer.gradient_accumulation_steps
|
| 168 |
+
)
|
| 169 |
+
fps = data_cnt / step_time
|
| 170 |
+
meters = self.trainer.meters
|
| 171 |
+
totals = self.trainer.totals
|
| 172 |
+
total_iters = totals.total_iters
|
| 173 |
+
remain_secs = (total_iters - self.trainer.iter) * meters.batch_time.avg
|
| 174 |
+
remain_time = datetime.timedelta(seconds=round(remain_secs))
|
| 175 |
+
finish_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + remain_secs))
|
| 176 |
+
time_str = f"\tRemainingTime {remain_time} ({finish_time})"
|
| 177 |
+
msg = f"Iter: [{current_iter+1}/{totals.iter_per_epoch}]"
|
| 178 |
+
msg += f"\tEpoch: [{current_epoch}/{totals.epochs} ({totals.total_iters})]"
|
| 179 |
+
for k in meters.keys():
|
| 180 |
+
msg += f"\t{k} {meters[k].get_val_str()} ({meters[k].get_avg_str()})"
|
| 181 |
+
msg += time_str
|
| 182 |
+
msg += f"\tFPS {fps: .3f}"
|
| 183 |
+
hf_logger.info(msg)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class SimpleLogger(HookBase):
|
| 187 |
+
def __init__(self, cfg):
|
| 188 |
+
self.cfg = cfg
|
| 189 |
+
self.last_fps = 0
|
| 190 |
+
self.is_root = dist.get_rank() == 0
|
| 191 |
+
|
| 192 |
+
def before_train(self):
|
| 193 |
+
self._train_time = 0
|
| 194 |
+
self._train_time_log_interval = 0
|
| 195 |
+
self._train_time_epoch = 0
|
| 196 |
+
self._avg_loss = {}
|
| 197 |
+
self._avg_loss_for_last_metric = {}
|
| 198 |
+
|
| 199 |
+
def after_train(self):
|
| 200 |
+
if self.is_root:
|
| 201 |
+
hf_logger.info(
|
| 202 |
+
"Finish training, total time: {}".format(str(datetime.timedelta(seconds=int(self._train_time))))
|
| 203 |
+
)
|
| 204 |
+
CI_Metric = f"[Last metrics]: "
|
| 205 |
+
for k, v in self._avg_loss_for_last_metric.items():
|
| 206 |
+
CI_Metric += f"{k}: {v[1] / v[0]:.3f}, "
|
| 207 |
+
CI_Metric += f"FPS: {self.last_fps:.6f}"
|
| 208 |
+
hf_logger.info(CI_Metric)
|
| 209 |
+
|
| 210 |
+
def before_step(self):
|
| 211 |
+
self._start_time = time.perf_counter()
|
| 212 |
+
|
| 213 |
+
def after_step(self):
|
| 214 |
+
if self.is_root:
|
| 215 |
+
step_time = time.perf_counter() - self._start_time
|
| 216 |
+
self._train_time += step_time
|
| 217 |
+
self._train_time_log_interval += step_time
|
| 218 |
+
self._train_time_epoch += step_time
|
| 219 |
+
current_epoch = self.trainer.iter // self.cfg.dataloader_len
|
| 220 |
+
current_iter = self.trainer.iter % self.cfg.dataloader_len
|
| 221 |
+
|
| 222 |
+
for k, v in self.trainer._outs.items():
|
| 223 |
+
if isinstance(v, torch.Tensor):
|
| 224 |
+
v = v.mean().item()
|
| 225 |
+
if k not in self._avg_loss.keys():
|
| 226 |
+
self._avg_loss[k] = [0, 0.0]
|
| 227 |
+
self._avg_loss[k][0] += 1
|
| 228 |
+
self._avg_loss[k][1] += v
|
| 229 |
+
|
| 230 |
+
if (current_iter + 1) % self.cfg.common.log_interval == 0:
|
| 231 |
+
data_cnt = (
|
| 232 |
+
self.cfg.dataloader.train.batch_size * self.trainer.dist.world_size * self.cfg.common.log_interval
|
| 233 |
+
)
|
| 234 |
+
fps = data_cnt / self._train_time_log_interval
|
| 235 |
+
self.last_fps = fps
|
| 236 |
+
logging_info = "training: [{}][{}/{}] iterations, loss: {:.3f}, ".format(
|
| 237 |
+
current_epoch,
|
| 238 |
+
current_iter + 1,
|
| 239 |
+
self.cfg.dataloader_len,
|
| 240 |
+
self.trainer._loss.item(),
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
for k, v in self._avg_loss.items():
|
| 244 |
+
cur_loss = self.trainer._outs[k]
|
| 245 |
+
if isinstance(cur_loss, torch.Tensor):
|
| 246 |
+
cur_loss = cur_loss.mean().item()
|
| 247 |
+
logging_info += f"{k}: {cur_loss:.3f}({v[1] / v[0]:.3f}), "
|
| 248 |
+
|
| 249 |
+
logging_info += "lr: {:.8f}, speed: {:.3f}s/it, FPS: {:.3f}, scaler: {}".format(
|
| 250 |
+
self.trainer.optimizer.param_groups[0]["lr"],
|
| 251 |
+
self._train_time_log_interval / self.cfg.common.log_interval,
|
| 252 |
+
fps,
|
| 253 |
+
self.trainer._scaler.get_scale(),
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
hf_logger.info(logging_info)
|
| 257 |
+
self._train_time_log_interval = 0
|
| 258 |
+
self._avg_loss_for_last_metric = self._avg_loss
|
| 259 |
+
self._avg_loss = {}
|
| 260 |
+
|
| 261 |
+
if (current_iter + 1) == self.cfg.dataloader_len:
|
| 262 |
+
mem_reserved = "{:.1f}G".format(torch.cuda.memory_reserved() / (1024**3))
|
| 263 |
+
hf_logger.info(
|
| 264 |
+
"training: epoch: {}, mean speed: {:.3f}s/it, gpu_mem: {}, cpu_mem: {}".format(
|
| 265 |
+
current_epoch,
|
| 266 |
+
self._train_time_epoch / self.cfg.dataloader_len,
|
| 267 |
+
mem_reserved,
|
| 268 |
+
get_cpu_mem_status(),
|
| 269 |
+
)
|
| 270 |
+
)
|
| 271 |
+
self._train_time_epoch = 0
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
class TensorboardLogger(HookBase):
|
| 275 |
+
def __init__(self, cfg):
|
| 276 |
+
self.cfg = cfg
|
| 277 |
+
self.is_root = dist.get_rank() == 0
|
| 278 |
+
self.log_root_dir = cfg.common.log_path
|
| 279 |
+
if self.is_root:
|
| 280 |
+
event_path = os.path.join(self.log_root_dir, "events")
|
| 281 |
+
os.makedirs(event_path, exist_ok=True)
|
| 282 |
+
self.tb_logger = SummaryWriter(event_path, max_queue=1, flush_secs=10)
|
| 283 |
+
|
| 284 |
+
def after_step(self):
|
| 285 |
+
if self.is_root:
|
| 286 |
+
current_iter = self.trainer.iter
|
| 287 |
+
if (current_iter + 1) % self.cfg.common.log_interval == 0:
|
| 288 |
+
name_value_dict = {}
|
| 289 |
+
meters = self.trainer.meters
|
| 290 |
+
for k in meters.keys():
|
| 291 |
+
self.tb_logger.add_scalar(k, meters[k].avg, current_iter)
|
| 292 |
+
|
| 293 |
+
def after_train(self):
|
| 294 |
+
if self.is_root:
|
| 295 |
+
self.tb_logger.close()
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
class TensorboardPriorLogger(TensorboardLogger):
|
| 299 |
+
def __init__(self, cfg):
|
| 300 |
+
super().__init__(cfg)
|
| 301 |
+
self.eval_interval = cfg.evaluation.validation.eval_interval
|
| 302 |
+
self.test_interval = getattr(cfg.evaluation.test, "test_interval", -1)
|
| 303 |
+
|
| 304 |
+
def after_step(self):
|
| 305 |
+
super().after_step()
|
| 306 |
+
if self.is_root:
|
| 307 |
+
current_iter = self.trainer.iter
|
| 308 |
+
if current_iter % self.eval_interval == int(self.eval_interval - 1):
|
| 309 |
+
name_value_dict = {}
|
| 310 |
+
meters = self.trainer.eval_board
|
| 311 |
+
for k in meters.keys():
|
| 312 |
+
self.tb_logger.add_scalar(k, meters[k], current_iter)
|
| 313 |
+
if current_iter % self.test_interval == int(self.test_interval - 1):
|
| 314 |
+
name_value_dict = {}
|
| 315 |
+
meters = self.trainer.test_board
|
| 316 |
+
for k in meters.keys():
|
| 317 |
+
self.tb_logger.add_scalar(k, meters[k], current_iter)
|
| 318 |
+
|
| 319 |
+
def after_train(self):
|
| 320 |
+
if self.is_root:
|
| 321 |
+
self.tb_logger.close()
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/scheduler.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from .train_loop import HookBase
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class LRScheduler(HookBase):
|
| 8 |
+
def __init__(self, optimizer, init_lr, warmup_iter=0, scheduler=None):
|
| 9 |
+
self._optimizer = optimizer
|
| 10 |
+
|
| 11 |
+
self.init_lr = init_lr
|
| 12 |
+
self.scheduler = scheduler
|
| 13 |
+
self.warmup_iter = warmup_iter
|
| 14 |
+
|
| 15 |
+
def before_step(self):
|
| 16 |
+
for param_group in self._optimizer.param_groups:
|
| 17 |
+
param_group["lr"] = self.calc_learning_rate(self.trainer.iter, param_group["init_lr"])
|
| 18 |
+
|
| 19 |
+
def calc_learning_rate(self, iter, init_lr, minimum_lr=1.0e-7):
|
| 20 |
+
if iter < self.warmup_iter:
|
| 21 |
+
new_lr = (iter + 1) / self.warmup_iter * (init_lr - minimum_lr) + minimum_lr
|
| 22 |
+
else:
|
| 23 |
+
if self.scheduler == "linear":
|
| 24 |
+
new_lr = minimum_lr + (init_lr - minimum_lr) * (
|
| 25 |
+
(self.trainer.max_iter - iter) / (self.trainer.max_iter - self.warmup_iter)
|
| 26 |
+
)
|
| 27 |
+
elif self.scheduler == "cosine":
|
| 28 |
+
new_lr = minimum_lr + 0.5 * (init_lr - minimum_lr) * (
|
| 29 |
+
1 + math.cos(math.pi * (iter - self.warmup_iter) / (self.trainer.max_iter - self.warmup_iter))
|
| 30 |
+
)
|
| 31 |
+
else:
|
| 32 |
+
new_lr = init_lr
|
| 33 |
+
return new_lr
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok/lr_scheduler.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
class MyLRScheduler:
|
| 4 |
+
def __init__(
|
| 5 |
+
self,
|
| 6 |
+
optimizer,
|
| 7 |
+
init_step1 = 5000,
|
| 8 |
+
init_step2 = 50000,
|
| 9 |
+
max_step = 100000,
|
| 10 |
+
init_lr = 1e-3,
|
| 11 |
+
min_lr1 = 1e-4,
|
| 12 |
+
min_lr2 = 1e-5,
|
| 13 |
+
):
|
| 14 |
+
self.optimizer = optimizer
|
| 15 |
+
self.init_step1 = init_step1
|
| 16 |
+
self.init_step2 = init_step2
|
| 17 |
+
self.max_step = max_step
|
| 18 |
+
self.init_lr = init_lr
|
| 19 |
+
self.min_lr1 = min_lr1
|
| 20 |
+
self.min_lr2 = min_lr2
|
| 21 |
+
self.idx = -1
|
| 22 |
+
for idx, param_group in enumerate(optimizer.param_groups):
|
| 23 |
+
if param_group['name'] == 'encoder':
|
| 24 |
+
self.idx = idx
|
| 25 |
+
|
| 26 |
+
def step(self, cur_step):
|
| 27 |
+
if cur_step < self.init_step1 or self.idx < 0:
|
| 28 |
+
return
|
| 29 |
+
elif cur_step < self.init_step2:
|
| 30 |
+
step_lr_schedule(self.optimizer, self.init_step2-cur_step, self.init_step2-self.init_step1, self.init_lr, self.min_lr1, self.idx)
|
| 31 |
+
else:
|
| 32 |
+
cosine_lr_schedule(self.optimizer, self.init_step2, cur_step, self.max_step, self.min_lr1, self.min_lr2, self.idx)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def cosine_lr_schedule(optimizer, init_step, cur_step, max_step, init_lr, min_lr, idx):
|
| 36 |
+
cur_step -= init_step
|
| 37 |
+
max_step -= init_step
|
| 38 |
+
lr = (init_lr - min_lr) * 0.5 * (
|
| 39 |
+
1.0 + math.cos(math.pi * cur_step / max_step)
|
| 40 |
+
) + min_lr
|
| 41 |
+
param_group = optimizer.param_groups[idx]
|
| 42 |
+
param_group["lr"] = lr
|
| 43 |
+
|
| 44 |
+
def step_lr_schedule(optimizer, step, max_step, init_lr, min_lr, idx):
|
| 45 |
+
lr = min_lr + (init_lr - min_lr) * max(1, step) / max_step
|
| 46 |
+
param_group = optimizer.param_groups[idx]
|
| 47 |
+
param_group["lr"] = lr
|
| 48 |
+
|
| 49 |
+
def step_lr_schedule2(optimizer, init_step, cur_step, init_lr, min_lr, decay_rate=0.999, idx=0):
|
| 50 |
+
step = cur_step - init_step
|
| 51 |
+
lr = max(min_lr, init_lr * (decay_rate**step))
|
| 52 |
+
param_group = optimizer.param_groups[idx]
|
| 53 |
+
param_group["lr"] = lr
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok/threshold_scheduler.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mimogpt.models.selftok.models_ours import Encoder
|
| 2 |
+
|
| 3 |
+
class ThresholdScheduler:
|
| 4 |
+
def __init__(
|
| 5 |
+
self,
|
| 6 |
+
init_threshold,
|
| 7 |
+
final_threshold,
|
| 8 |
+
constant_step=4000,
|
| 9 |
+
end_step=20000,
|
| 10 |
+
):
|
| 11 |
+
assert end_step > constant_step
|
| 12 |
+
self.init_threshold = init_threshold
|
| 13 |
+
self.final_threshold = final_threshold
|
| 14 |
+
self.constant_step = constant_step
|
| 15 |
+
self.end_step = end_step
|
| 16 |
+
self.fn = max if final_threshold <= init_threshold else min
|
| 17 |
+
|
| 18 |
+
def step(self, encoder, cur_iter):
|
| 19 |
+
if cur_iter <= self.constant_step:
|
| 20 |
+
return
|
| 21 |
+
rate = (self.init_threshold-self.final_threshold) / (self.end_step-self.constant_step)
|
| 22 |
+
cur_threshold = self.init_threshold - rate * (cur_iter-self.constant_step)
|
| 23 |
+
cur_threshold = self.fn(self.final_threshold, cur_threshold)
|
| 24 |
+
encoder.quantizer._codebook.threshold_ema_dead_code = cur_threshold
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class ObjectiveScheduler:
|
| 29 |
+
def __init__(self, start_iter, end_iter):
|
| 30 |
+
self.start = start_iter
|
| 31 |
+
self.end = end_iter
|
| 32 |
+
|
| 33 |
+
def step(self, tokenizer, cur_iter):
|
| 34 |
+
if cur_iter <= self.start:
|
| 35 |
+
return
|
| 36 |
+
if cur_iter >= self.end:
|
| 37 |
+
return
|
| 38 |
+
delta = cur_iter - self.start
|
| 39 |
+
rate = 1. / (self.end - self.start)
|
| 40 |
+
tokenizer.recon_ratio = 1. - rate * delta
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class LowResDroprateScheduler:
|
| 45 |
+
def __init__(self, start_iter=0, end_iter=0, start_rate=0.0, end_rate=0.0):
|
| 46 |
+
self.start_iter = start_iter
|
| 47 |
+
self.end_iter = end_iter
|
| 48 |
+
self.start_rate = start_rate
|
| 49 |
+
self.end_rate = end_rate
|
| 50 |
+
if self.end_iter != self.start_iter:
|
| 51 |
+
self.r = (self.start_rate - self.end_rate) / (self.end_iter - self.start_iter)
|
| 52 |
+
else:
|
| 53 |
+
self.r = 0
|
| 54 |
+
|
| 55 |
+
def step(self, mmdit, cur_iter):
|
| 56 |
+
if cur_iter < self.start_iter:
|
| 57 |
+
mmdit.low_res_drop_rate = 1.0
|
| 58 |
+
elif cur_iter < self.end_iter:
|
| 59 |
+
mmdit.low_res_drop_rate = self.start_rate - self.r * (cur_iter - self.start_iter)
|
| 60 |
+
else:
|
| 61 |
+
mmdit.low_res_drop_rate = self.end_rate
|
| 62 |
+
if cur_iter % 50 == 0:
|
| 63 |
+
print("cur_iter", cur_iter, "mmdit.low_res_drop_rate", mmdit.low_res_drop_rate)
|
| 64 |
+
return
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok_hook.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
# import time
|
| 5 |
+
from time import time
|
| 6 |
+
import torch
|
| 7 |
+
import psutil
|
| 8 |
+
import datetime
|
| 9 |
+
import torch.distributed as dist
|
| 10 |
+
from diffusers.models import AutoencoderKL
|
| 11 |
+
from collections import OrderedDict
|
| 12 |
+
from .selftok.lr_scheduler import MyLRScheduler
|
| 13 |
+
from .checkpoint import SaveModel
|
| 14 |
+
from .selftok.threshold_scheduler import ThresholdScheduler, ObjectiveScheduler, LowResDroprateScheduler
|
| 15 |
+
try:
|
| 16 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 17 |
+
except:
|
| 18 |
+
print("# Ascend didn't support SummaryWriter, skipped")
|
| 19 |
+
from .train_loop import HookBase
|
| 20 |
+
from mimogpt.utils import hf_logger
|
| 21 |
+
import moxing as mox
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def extract_exp_name(output_path, train_url):
|
| 25 |
+
try:
|
| 26 |
+
outputs = train_url.split('/')
|
| 27 |
+
for i, output in enumerate(outputs):
|
| 28 |
+
if 'time' in output and output[0] == '2':
|
| 29 |
+
part2 = output[5:].replace('time_', '')
|
| 30 |
+
part1 = outputs[i-1]
|
| 31 |
+
break
|
| 32 |
+
return f"{part1}_{part2}"
|
| 33 |
+
except:
|
| 34 |
+
return "debug"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def build_selftok_optimizer(model, cfg):
|
| 39 |
+
fix_encoder, fix_decoder = cfg.model.fix_encoder, cfg.model.fix_decoder
|
| 40 |
+
if hasattr(model, "module"):
|
| 41 |
+
model_base = model.module
|
| 42 |
+
else:
|
| 43 |
+
model_base = model
|
| 44 |
+
|
| 45 |
+
param_groups = []
|
| 46 |
+
if not fix_encoder:
|
| 47 |
+
if isinstance(model_base.encoder,dict):
|
| 48 |
+
train_encoder_res = cfg.tokenizer.params.train_encoder_res
|
| 49 |
+
param_groups.append({
|
| 50 |
+
'params': model_base.encoder[train_encoder_res].parameters(),
|
| 51 |
+
'lr': cfg.optimize.lr_scheduler.init_lr,
|
| 52 |
+
'name': 'encoder'
|
| 53 |
+
})
|
| 54 |
+
else:
|
| 55 |
+
param_groups.append({
|
| 56 |
+
'params': model_base.encoder.parameters(),
|
| 57 |
+
'lr': cfg.optimize.lr_scheduler.init_lr,
|
| 58 |
+
'name': 'encoder'
|
| 59 |
+
})
|
| 60 |
+
if not fix_decoder:
|
| 61 |
+
token_lr = cfg.optimize.lr_scheduler.token_lr if hasattr(cfg.optimize.lr_scheduler, "token_lr") else None
|
| 62 |
+
if token_lr is None:
|
| 63 |
+
param_groups.append({
|
| 64 |
+
'params': model_base.model.parameters(),
|
| 65 |
+
'lr': cfg.optimize.lr_scheduler.dit_lr,
|
| 66 |
+
'name': 'decoder'
|
| 67 |
+
})
|
| 68 |
+
else:
|
| 69 |
+
param_groups.append({
|
| 70 |
+
'params': model_base.model.get_params_by_filter(select_list=['x_block', 'x_embedder']),
|
| 71 |
+
'lr': cfg.optimize.lr_scheduler.dit_lr,
|
| 72 |
+
'name': 'decoder'
|
| 73 |
+
})
|
| 74 |
+
param_groups.append({
|
| 75 |
+
'params': model_base.model.get_params_by_filter(remove_list=['x_block', 'x_embedder']),
|
| 76 |
+
'lr': token_lr,
|
| 77 |
+
'name': 'token'
|
| 78 |
+
}) # when train_filter does not include x_block and x_embedder, these two param groups equal model.parameters()
|
| 79 |
+
opt = torch.optim.AdamW(param_groups, betas=(0.9, 0.99), weight_decay=0)
|
| 80 |
+
return opt
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class SelfTokHook(HookBase):
|
| 84 |
+
def __init__(self, cfg):
|
| 85 |
+
self.cfg = cfg
|
| 86 |
+
self.set_attribute()
|
| 87 |
+
self.is_root = (dist.get_rank() == 0)
|
| 88 |
+
self.log_root_dir = os.path.join(cfg.common.output_path, "tb")
|
| 89 |
+
self.save_interval = self.cfg.common.ckpt_interval
|
| 90 |
+
|
| 91 |
+
self.exp_name = extract_exp_name(cfg.common.output_path, cfg.train_url)
|
| 92 |
+
self.debug = (self.exp_name == 'debug')
|
| 93 |
+
self.tb_copy_dir = os.path.join(cfg.common.tb_path, self.exp_name)
|
| 94 |
+
hf_logger.info(f"tb copy dir: {self.tb_copy_dir}")
|
| 95 |
+
hf_logger.info(f"Log root path: {self.log_root_dir}")
|
| 96 |
+
if self.is_root and not self.debug:
|
| 97 |
+
os.makedirs(self.log_root_dir, exist_ok=True)
|
| 98 |
+
self.tb_logger = SummaryWriter(self.log_root_dir, max_queue=1, flush_secs=10)
|
| 99 |
+
else:
|
| 100 |
+
self.tb_logger = None
|
| 101 |
+
|
| 102 |
+
@torch.no_grad()
|
| 103 |
+
def update_ema(self, ema_factor=0.9999):
|
| 104 |
+
if hasattr(self.cfg.common, "use_fsdp") and self.cfg.common.use_fsdp:
|
| 105 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 106 |
+
with FSDP.summon_full_params(self.trainer.model):
|
| 107 |
+
with torch.no_grad():
|
| 108 |
+
if self.trainer.ema is not None:
|
| 109 |
+
for p, p_ema in zip(self.trainer.model.model.parameters(), self.trainer.ema.parameters()):
|
| 110 |
+
if self.ema_in_cpu:
|
| 111 |
+
p1 = p.data.detach().clone().cpu()
|
| 112 |
+
else:
|
| 113 |
+
p1 = p.data.detach().clone()
|
| 114 |
+
p_ema.data.mul_(ema_factor).add_((1 - ema_factor) * p1)
|
| 115 |
+
else:
|
| 116 |
+
with torch.no_grad():
|
| 117 |
+
if hasattr(self.trainer.model, "module"):
|
| 118 |
+
model_before_ddp = self.trainer.model.module.model
|
| 119 |
+
else:
|
| 120 |
+
model_before_ddp = self.trainer.model.model
|
| 121 |
+
for p, p_ema in zip(model_before_ddp.parameters(), self.trainer.ema.parameters()):
|
| 122 |
+
if self.ema_in_cpu:
|
| 123 |
+
p1 = p.data.detach().clone().cpu()
|
| 124 |
+
else:
|
| 125 |
+
p1 = p.data.detach().clone()
|
| 126 |
+
p_ema.data.mul_(ema_factor).add_((1 - ema_factor) * p1)
|
| 127 |
+
|
| 128 |
+
def set_attribute(self):
|
| 129 |
+
self.dead_code_threshold = self.cfg.tokenizer.params.quantizer_config.dead_code_threshold
|
| 130 |
+
self.init_step1 = self.cfg.optimize.lr_scheduler.init_step1
|
| 131 |
+
self.init_step2 = self.cfg.optimize.lr_scheduler.init_step2
|
| 132 |
+
self.max_step = self.cfg.optimize.lr_scheduler.max_step
|
| 133 |
+
self.min_lr1 = self.cfg.optimize.lr_scheduler.min_lr1
|
| 134 |
+
self.min_lr2 = self.cfg.optimize.lr_scheduler.min_lr2
|
| 135 |
+
self.ema_in_cpu = self.cfg.optimize.ema_in_cpu
|
| 136 |
+
self.gradient_accumulation_steps = self.cfg.optimize.get("gradient_accumulation_steps", 1)
|
| 137 |
+
|
| 138 |
+
def set_scheduler(self):
|
| 139 |
+
if hasattr(self.cfg.optimize, "dead_code_scheduler"):
|
| 140 |
+
self.dead_code_scheduler = ThresholdScheduler(
|
| 141 |
+
init_threshold=self.dead_code_threshold,
|
| 142 |
+
final_threshold=self.cfg.optimize.dead_code_scheduler.final_threshold,
|
| 143 |
+
constant_step=self.cfg.optimize.dead_code_scheduler.constant_step,
|
| 144 |
+
end_step=self.cfg.optimize.dead_code_scheduler.end_step
|
| 145 |
+
)
|
| 146 |
+
self.lr_scheduler = MyLRScheduler(
|
| 147 |
+
self.trainer.optimizer,
|
| 148 |
+
init_step1=self.init_step1,
|
| 149 |
+
init_step2=self.init_step2,
|
| 150 |
+
max_step=self.max_step,
|
| 151 |
+
init_lr=self.cfg.optimize.lr_scheduler.init_lr,
|
| 152 |
+
min_lr1=self.min_lr1,
|
| 153 |
+
min_lr2=self.min_lr2
|
| 154 |
+
)
|
| 155 |
+
if hasattr(self.cfg.optimize, 'objective_scheduler'):
|
| 156 |
+
self.objective_scheduler = ObjectiveScheduler(**self.cfg.optimize.objective_scheduler)
|
| 157 |
+
if hasattr(self.cfg.optimize, 'low_res_drop_rate_scheduler'):
|
| 158 |
+
self.low_res_drop_rate_scheduler = LowResDroprateScheduler(
|
| 159 |
+
**self.cfg.optimize.low_res_drop_rate_scheduler
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
def before_step(self):
|
| 163 |
+
current_iter = self.trainer.iter
|
| 164 |
+
if hasattr(self.trainer.model, "module"):
|
| 165 |
+
model_base = self.trainer.model.module
|
| 166 |
+
else:
|
| 167 |
+
model_base = self.trainer.model
|
| 168 |
+
if hasattr(self, 'low_res_drop_rate_scheduler'):
|
| 169 |
+
self.low_res_drop_rate_scheduler.step(model_base.model, current_iter)
|
| 170 |
+
|
| 171 |
+
def update_scheduler(self):
|
| 172 |
+
current_iter = self.trainer.iter
|
| 173 |
+
self.lr_scheduler.step(current_iter)
|
| 174 |
+
if hasattr(self.trainer.model, "module"):
|
| 175 |
+
model_base = self.trainer.model.module
|
| 176 |
+
else:
|
| 177 |
+
model_base = self.trainer.model
|
| 178 |
+
if hasattr(self, 'objective_scheduler'):
|
| 179 |
+
self.objective_scheduler.step(model_base, current_iter)
|
| 180 |
+
if hasattr(self, "dead_code_scheduler"):
|
| 181 |
+
self.dead_code_scheduler.step(model_base.encoder, current_iter)
|
| 182 |
+
|
| 183 |
+
def ema_update(self):
|
| 184 |
+
if (self.trainer.iter + 1) % self.gradient_accumulation_steps == 0:
|
| 185 |
+
self.update_ema()
|
| 186 |
+
|
| 187 |
+
def log(self):
|
| 188 |
+
if self.trainer.iter % self.trainer.log_every == 0:
|
| 189 |
+
if self.is_root:
|
| 190 |
+
torch.cuda.synchronize()
|
| 191 |
+
self.trainer.end_time = time()
|
| 192 |
+
steps_per_sec = self.trainer.log_every / (self.trainer.end_time - self.trainer.start_time)
|
| 193 |
+
lrr = self.trainer.optimizer.param_groups[0]["lr"]
|
| 194 |
+
current_iter = self.trainer.iter
|
| 195 |
+
logging_info = f"(step={current_iter:07d}), lr={lrr:.8f}, "
|
| 196 |
+
|
| 197 |
+
meters = self.trainer.meters
|
| 198 |
+
for k, v in meters.items():
|
| 199 |
+
avg_metric = meters[k].avg
|
| 200 |
+
if k in ['perplexity_list', 'deter_list']:
|
| 201 |
+
metric_str = ",".join([str(int(p)) for p in avg_metric.tolist()])
|
| 202 |
+
avg_metric = avg_metric.mean()
|
| 203 |
+
logging_info += f"avg_{k}={metric_str}, "
|
| 204 |
+
elif k in ['n_active', 'n_reactive']:
|
| 205 |
+
logging_info += f"avg_{k}={int(avg_metric):04d}, "
|
| 206 |
+
elif k not in ['loss_small', 'loss_mid', 'loss_large', 'loss_uncon']:
|
| 207 |
+
logging_info += f"avg_{k}={avg_metric:.4f}, "
|
| 208 |
+
if not self.debug:
|
| 209 |
+
self.tb_logger.add_scalar(f"avg_{k}", avg_metric, current_iter)
|
| 210 |
+
|
| 211 |
+
###
|
| 212 |
+
logging_info += f"steps/sec={steps_per_sec:.2f}"
|
| 213 |
+
hf_logger.info(logging_info)
|
| 214 |
+
|
| 215 |
+
if not self.debug:
|
| 216 |
+
mox.file.copy_parallel(self.log_root_dir, self.tb_copy_dir)
|
| 217 |
+
self.trainer.start_time = time()
|
| 218 |
+
|
| 219 |
+
def after_step(self):
|
| 220 |
+
self.update_scheduler()
|
| 221 |
+
self.log()
|
| 222 |
+
|
| 223 |
+
def after_train(self):
|
| 224 |
+
if self.is_root and not self.debug:
|
| 225 |
+
self.tb_logger.close()
|
| 226 |
+
|
| 227 |
+
def set_train_state(self):
|
| 228 |
+
fix_encoder, fix_decoder = self.cfg.model.fix_encoder, self.cfg.model.fix_decoder
|
| 229 |
+
if hasattr(self.trainer.model, "module"):
|
| 230 |
+
model_base = self.trainer.model.module
|
| 231 |
+
else:
|
| 232 |
+
model_base = self.trainer.model
|
| 233 |
+
if fix_encoder:
|
| 234 |
+
model_base.encoder.eval()
|
| 235 |
+
if fix_decoder:
|
| 236 |
+
model_base.model.eval()
|
| 237 |
+
|
| 238 |
+
def before_train(self):
|
| 239 |
+
self.set_scheduler()
|
| 240 |
+
self.set_train_state()
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class SelfTokSaveHook(SaveModel):
|
| 244 |
+
def __init__(self, cfg, is_root):
|
| 245 |
+
super().__init__(cfg, is_root)
|
| 246 |
+
self.cfg = cfg
|
| 247 |
+
self.save_interval = self.cfg.common.ckpt_interval
|
| 248 |
+
|
| 249 |
+
def save_model(self, checkpoint, save_name):
|
| 250 |
+
if self.is_root:
|
| 251 |
+
local_weights = os.path.join(self.save_path, save_name)
|
| 252 |
+
# for k, v in checkpoint.items():
|
| 253 |
+
# checkpoint[k] = checkpoint[k].cpu()
|
| 254 |
+
torch.save(checkpoint, local_weights)
|
| 255 |
+
self.upload_and_delete_local_model(local_weights)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def after_step(self):
|
| 259 |
+
if self.trainer.iter % self.save_interval == (self.save_interval - 1) and self.trainer.iter > 0:
|
| 260 |
+
save_name = "iter_%d.pth" % (self.trainer.iter)
|
| 261 |
+
# ema_state = self.trainer.ema.state_dict()
|
| 262 |
+
# for k, v in ema_state.items():
|
| 263 |
+
# ema_state[k] = ema_state[k].cpu()
|
| 264 |
+
if hasattr(self.cfg.common, "use_fsdp") and self.cfg.common.use_fsdp:
|
| 265 |
+
from torch.distributed.fsdp import FullStateDictConfig, FullOptimStateDictConfig, StateDictType
|
| 266 |
+
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
| 267 |
+
|
| 268 |
+
save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
| 269 |
+
save_opt_policy = FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True)
|
| 270 |
+
#with FSDP.state_dict_type(self.trainer.model, StateDictType.FULL_STATE_DICT, save_policy, save_opt_policy):
|
| 271 |
+
FSDP.set_state_dict_type(self.trainer.model, StateDictType.FULL_STATE_DICT, save_policy, save_opt_policy)
|
| 272 |
+
cpu_state = self.trainer.model.state_dict()
|
| 273 |
+
#original_osd = self.trainer.optimizer.state_dict()
|
| 274 |
+
#opt_state = FSDP.optim_state_dict(self.trainer.model, self.trainer.optimizer, optim_state_dict=original_osd)
|
| 275 |
+
save_data = {
|
| 276 |
+
'iter': self.trainer.iter,
|
| 277 |
+
'state_dict': cpu_state,
|
| 278 |
+
#'ema_state_dict': ema_state,
|
| 279 |
+
#'opt': opt_state,
|
| 280 |
+
'cfg': self.cfg
|
| 281 |
+
}
|
| 282 |
+
self.save_model(save_data, save_name)
|
| 283 |
+
hf_logger.info(f"Saved checkpoint to {save_name}")
|
| 284 |
+
torch.cuda.empty_cache()
|
| 285 |
+
else:
|
| 286 |
+
if hasattr(self.trainer.model, "module"):
|
| 287 |
+
state_dict = self.trainer.model.module.state_dict()
|
| 288 |
+
# self.save_model(self.trainer.model.module.state_dict(), save_name)
|
| 289 |
+
else:
|
| 290 |
+
state_dict = self.trainer.model.state_dict()
|
| 291 |
+
for k, v in state_dict.items():
|
| 292 |
+
state_dict[k] = state_dict[k].cpu()
|
| 293 |
+
save_data = {
|
| 294 |
+
'iter': self.trainer.iter,
|
| 295 |
+
'state_dict': state_dict,
|
| 296 |
+
'opt': self.trainer.optimizer.state_dict(),
|
| 297 |
+
'cfg': self.cfg
|
| 298 |
+
}
|
| 299 |
+
self.save_model(save_data, save_name)
|
| 300 |
+
hf_logger.info(f"Saved checkpoint to {save_name}")
|
| 301 |
+
torch.cuda.empty_cache()
|
| 302 |
+
|
llamagen-siglip-sb-block-causal/SelftokPipeline/mimogpt/engine/utils/selftok_validation.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.distributed as dist
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from torchvision import transforms
|
| 9 |
+
from torchvision.utils import save_image
|
| 10 |
+
from torchvision import models as tv
|
| 11 |
+
|
| 12 |
+
from mimogpt.models.selftok.diffusion import create_diffusion
|
| 13 |
+
from mimogpt.models.selftok.sd3.rectified_flow import RectifiedFlow
|
| 14 |
+
from mimogpt.models.selftok.sd3.sd3_impls import SDVAE, CFGDenoiser, SD3LatentFormat
|
| 15 |
+
|
| 16 |
+
import lpips as lps
|
| 17 |
+
import numpy as np
|
| 18 |
+
from PIL import Image
|
| 19 |
+
from lpips.pretrained_networks import alexnet
|
| 20 |
+
from mimogpt.utils import hf_logger
|
| 21 |
+
from .train_loop import HookBase
|
| 22 |
+
from .selftok_hook import extract_exp_name
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
import moxing as mox
|
| 27 |
+
|
| 28 |
+
def tqdm(_iter):
|
| 29 |
+
return _iter
|
| 30 |
+
|
| 31 |
+
except:
|
| 32 |
+
from tqdm import tqdm
|
| 33 |
+
|
| 34 |
+
__all__ = ["EvalSelftokHook"]
|
| 35 |
+
|
| 36 |
+
def norm_ip(img, low, high):
|
| 37 |
+
img.clamp_(min=low, max=high)
|
| 38 |
+
img.sub_(low).div_(max(high - low, 1e-5))
|
| 39 |
+
|
| 40 |
+
class local_alexnet(alexnet):
|
| 41 |
+
def __init__(self, path):
|
| 42 |
+
super().__init__(requires_grad=False, pretrained=False)
|
| 43 |
+
tv_alexnet = tv.alexnet(pretrained=False)
|
| 44 |
+
tv_alexnet.load_state_dict(torch.load(path))
|
| 45 |
+
alexnet_pretrained_features = tv_alexnet.features
|
| 46 |
+
for x in range(2):
|
| 47 |
+
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
|
| 48 |
+
for x in range(2, 5):
|
| 49 |
+
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
|
| 50 |
+
for x in range(5, 8):
|
| 51 |
+
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
|
| 52 |
+
for x in range(8, 10):
|
| 53 |
+
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
|
| 54 |
+
for x in range(10, 12):
|
| 55 |
+
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
|
| 56 |
+
for param in self.parameters():
|
| 57 |
+
param.requires_grad = False
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class EvalSelftokHook(HookBase):
|
| 61 |
+
def __init__(self, cfg):
|
| 62 |
+
self.cfg = cfg
|
| 63 |
+
eval_interval = cfg.common.val_interval
|
| 64 |
+
self.eval_interval = int(eval_interval)
|
| 65 |
+
self.best_metric = -1
|
| 66 |
+
self.lpips_loss = lps.LPIPS(net='alex', pnet_rand=True)
|
| 67 |
+
self.lpips_loss.net = local_alexnet(cfg.common.alex_path)
|
| 68 |
+
self.lpips_loss = self.lpips_loss.cuda()
|
| 69 |
+
self.is_root = (dist.get_rank() == 0)
|
| 70 |
+
self.cond_vary = (not cfg.model.full_tokens) \
|
| 71 |
+
if hasattr(cfg.model, "full_tokens") else True
|
| 72 |
+
self.recon_tfm = transforms.Compose([
|
| 73 |
+
transforms.ToPILImage(),
|
| 74 |
+
transforms.Resize(size=128),
|
| 75 |
+
transforms.ToTensor()
|
| 76 |
+
])
|
| 77 |
+
self.local_url = '/cache/data/val/'
|
| 78 |
+
if dist.get_rank() % 8 == 0:
|
| 79 |
+
os.makedirs(self.local_url, exist_ok=True)
|
| 80 |
+
self.exp_name = extract_exp_name(cfg.common.output_path, cfg.train_url)
|
| 81 |
+
self.img_url = os.path.join(cfg.common.val_url, self.exp_name)
|
| 82 |
+
if dist.get_rank() % 8 == 0:
|
| 83 |
+
mox.file.make_dirs(self.img_url)
|
| 84 |
+
self.model_type = 'sd3' if self.cfg.tokenizer.params.model == 'MMDiT_XL' else 'dit'
|
| 85 |
+
|
| 86 |
+
def after_step(self):
|
| 87 |
+
if self.trainer.iter % self.eval_interval == 0 and self.trainer.iter > 0:
|
| 88 |
+
self.validate()
|
| 89 |
+
|
| 90 |
+
def train_mode(self, encoder_training, dm_training):
|
| 91 |
+
if hasattr(self.trainer.model, "module"):
|
| 92 |
+
model = self.trainer.model.module
|
| 93 |
+
else:
|
| 94 |
+
model = self.trainer.model
|
| 95 |
+
if encoder_training:
|
| 96 |
+
model.encoder.train()
|
| 97 |
+
if dm_training:
|
| 98 |
+
model.model.train()
|
| 99 |
+
|
| 100 |
+
def eval_mode(self):
|
| 101 |
+
if hasattr(self.trainer.model, "module"):
|
| 102 |
+
model = self.trainer.model.module
|
| 103 |
+
else:
|
| 104 |
+
model = self.trainer.model
|
| 105 |
+
encoder_training, dm_training = model.encoder.training, model.model.training
|
| 106 |
+
model.encoder.eval()
|
| 107 |
+
model.model.eval()
|
| 108 |
+
return encoder_training, dm_training
|
| 109 |
+
|
| 110 |
+
def compute_psnr(self, recon, ori):
|
| 111 |
+
x = torch.from_numpy(np.array(Image.open(ori))).cuda().float()
|
| 112 |
+
y = torch.from_numpy(np.array(Image.open(recon))).cuda().float()
|
| 113 |
+
mse = F.mse_loss(x, y)
|
| 114 |
+
psnr = 20 * torch.log10(torch.Tensor([255.0]).to(x.device)) - 10 * torch.log10(mse)
|
| 115 |
+
return psnr
|
| 116 |
+
|
| 117 |
+
def process_input(self, x_0):
|
| 118 |
+
if type(x_0) is list:
|
| 119 |
+
x_0 = x_0[0] # remove class label
|
| 120 |
+
with torch.no_grad():
|
| 121 |
+
x_0 = x_0.cuda()
|
| 122 |
+
if not self.trainer.pre_encode:
|
| 123 |
+
if self.model_type == 'sd3':
|
| 124 |
+
x_0 = self.trainer.vae.encode(x_0)
|
| 125 |
+
else:
|
| 126 |
+
x_0 = self.trainer.vae.encode(x_0).latent_dist.sample().mul_(0.18215)
|
| 127 |
+
else:
|
| 128 |
+
x_0 = x_0.squeeze(dim=1)
|
| 129 |
+
if self.model_type == 'sd3':
|
| 130 |
+
x_0 = SD3LatentFormat().process_in(x_0)
|
| 131 |
+
return x_0
|
| 132 |
+
|
| 133 |
+
def validate(self):
|
| 134 |
+
encoder_training, dm_training = self.eval_mode()
|
| 135 |
+
lpips = 0.0
|
| 136 |
+
psnr = 0.0
|
| 137 |
+
tests = 10
|
| 138 |
+
total_steps = 50
|
| 139 |
+
start_steps = 49
|
| 140 |
+
for t in range(tests):
|
| 141 |
+
x_0 = next(self.trainer.val_data_loader_iter)
|
| 142 |
+
x_0 = self.process_input(x_0)
|
| 143 |
+
noise = torch.randn_like(x_0, device=x_0.device)
|
| 144 |
+
with torch.no_grad():
|
| 145 |
+
if hasattr(self.trainer.model, "module"):
|
| 146 |
+
model_base = self.trainer.model.module
|
| 147 |
+
else:
|
| 148 |
+
model_base = self.trainer.model
|
| 149 |
+
recon = self.reconstruct_val(
|
| 150 |
+
total_steps, start_steps, self.trainer.ema, noise, x_0,
|
| 151 |
+
diti=model_base.diti, encoder=model_base.encoder, ddim=True,
|
| 152 |
+
cond_vary=self.cond_vary
|
| 153 |
+
)["pred_x_0"]
|
| 154 |
+
|
| 155 |
+
# original image, reconstruction
|
| 156 |
+
if self.model_type == 'sd3':
|
| 157 |
+
x_0 = SD3LatentFormat().process_out(x_0)
|
| 158 |
+
recon = SD3LatentFormat().process_out(recon)
|
| 159 |
+
img_0, img_recon = \
|
| 160 |
+
self.trainer.vae.decode(x_0),\
|
| 161 |
+
self.trainer.vae.decode(recon)
|
| 162 |
+
else:
|
| 163 |
+
img_0, img_recon = \
|
| 164 |
+
self.trainer.vae.decode(x_0 / 0.18215).sample,\
|
| 165 |
+
self.trainer.vae.decode(recon / 0.18215).sample
|
| 166 |
+
|
| 167 |
+
lpips_batch = self.lpips_loss(img_recon.clamp(-1,1), img_0.clamp(-1,1))
|
| 168 |
+
lpips += lpips_batch.mean()
|
| 169 |
+
norm_ip(img_recon, -1, 1)
|
| 170 |
+
norm_ip(img_0, -1, 1)
|
| 171 |
+
cur_psnr = 0.0
|
| 172 |
+
currank = dist.get_rank()
|
| 173 |
+
for b in range(len(img_0)):
|
| 174 |
+
save_image(img_recon[b], f"/cache/recon_{currank}.png")
|
| 175 |
+
save_image(img_0[b], f"/cache/ori_{currank}.png")
|
| 176 |
+
cur_sub_psnr = self.compute_psnr(f"/cache/recon_{currank}.png", f"/cache/ori_{currank}.png")
|
| 177 |
+
cur_psnr += cur_sub_psnr
|
| 178 |
+
cur_psnr /= len(img_0)
|
| 179 |
+
# print(cur_psnr)
|
| 180 |
+
psnr += cur_psnr.mean()
|
| 181 |
+
meters = self.trainer.meters["lpips"]
|
| 182 |
+
mean_lpips = (lpips / float(tests)) / dist.get_world_size()
|
| 183 |
+
meters.reduce_update(mean_lpips)
|
| 184 |
+
if self.is_root:
|
| 185 |
+
hf_logger.info(f"Step {self.trainer.iter}: Val LPIPS on {tests} batches={meters.avg:.4f}.")
|
| 186 |
+
|
| 187 |
+
meters = self.trainer.meters["psnr"]
|
| 188 |
+
mean_psnr = (psnr / float(tests)) / dist.get_world_size()
|
| 189 |
+
meters.reduce_update(mean_psnr)
|
| 190 |
+
if self.is_root:
|
| 191 |
+
hf_logger.info(f"Step {self.trainer.iter}: Val PSNR on {tests} batches={meters.avg:.4f}.")
|
| 192 |
+
|
| 193 |
+
images = torch.cat((img_0, img_recon), dim=0)
|
| 194 |
+
images = images.clamp(-1, 1)
|
| 195 |
+
images = (images-images.min()) / (images.max()-images.min())
|
| 196 |
+
images = [self.recon_tfm(img) for img in images]
|
| 197 |
+
|
| 198 |
+
if dist.get_rank() % torch.cuda.device_count() == 0:
|
| 199 |
+
eval_image_file = f"{self.local_url}/val_{self.trainer.iter:07d}_{t}.png"
|
| 200 |
+
save_image(images, eval_image_file, nrow=len(x_0), normalize=True, value_range=(0, 1))
|
| 201 |
+
mox.file.copy(eval_image_file, f'{self.img_url}/val_{self.trainer.iter:07d}_{t}.png')
|
| 202 |
+
|
| 203 |
+
self.train_mode(encoder_training, dm_training)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def reconstruct_val(self, num_steps, t, model, noise=None, x0=None, y=None, diti=None, encoder=None,
|
| 207 |
+
cond_vary=False, ddim=False, dit=None):
|
| 208 |
+
# print(str(num_steps))
|
| 209 |
+
if self.model_type == 'sd3':
|
| 210 |
+
diffusion = RectifiedFlow(num_steps, **self.cfg.tokenizer.params.noise_schedule_config)
|
| 211 |
+
else:
|
| 212 |
+
diffusion = create_diffusion(str(num_steps))
|
| 213 |
+
N = x0.shape[0]
|
| 214 |
+
device = x0.device
|
| 215 |
+
x_t = noise
|
| 216 |
+
|
| 217 |
+
with torch.no_grad():
|
| 218 |
+
if diti is None:
|
| 219 |
+
model_kwargs = {'y': y}
|
| 220 |
+
else:
|
| 221 |
+
if self.model_type == 'sd3':
|
| 222 |
+
t_mapped = torch.tensor([diffusion.timestep_map[0]]*N, device=device).long()
|
| 223 |
+
else:
|
| 224 |
+
t_mapped = torch.tensor([diffusion.timestep_map[t]]*N, device=device).long()
|
| 225 |
+
k = diti.to_indices(t_mapped)
|
| 226 |
+
encoder_hidden_states, _, ori_hidden_states, mask, _, _ = encoder(x0, d=k)
|
| 227 |
+
|
| 228 |
+
model_kwargs = dict(
|
| 229 |
+
encoder_hidden_states=encoder_hidden_states,
|
| 230 |
+
mask=mask
|
| 231 |
+
)
|
| 232 |
+
pred_x_0 = diffusion.p_sample_loop(
|
| 233 |
+
model.forward, x_t.shape, x_t, clip_denoised=False,
|
| 234 |
+
model_kwargs=model_kwargs, progress=False, device=device,
|
| 235 |
+
start_t=t, ddim=ddim,cond_vary=cond_vary, diti=diti, encoder=encoder,
|
| 236 |
+
x_0 = x0, ori_hidden_states=ori_hidden_states, dit=dit
|
| 237 |
+
)
|
| 238 |
+
return {"x_t": x_t, "pred_x_0": pred_x_0}
|