Upload folder using huggingface_hub
Browse files
outputs/2025-04-11/10-15-18/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
extras:
|
| 2 |
+
ignore_warnings: false
|
| 3 |
+
enforce_tags: true
|
| 4 |
+
print_config: true
|
| 5 |
+
theme: ansi_light
|
| 6 |
+
task_name: pretrain
|
| 7 |
+
tags:
|
| 8 |
+
- pretrain
|
| 9 |
+
- imagenet1k
|
| 10 |
+
- partmae_v5_2
|
| 11 |
+
- vit_b_16
|
| 12 |
+
ckpt_path: null
|
| 13 |
+
seed: 12345
|
| 14 |
+
trainer:
|
| 15 |
+
accelerator: gpu
|
| 16 |
+
devices: 4
|
| 17 |
+
num_nodes: 1
|
| 18 |
+
precision: bf16-mixed
|
| 19 |
+
strategy: ddp
|
| 20 |
+
max_epochs: 200
|
| 21 |
+
min_epochs: null
|
| 22 |
+
accumulate_grad_batches: 1
|
| 23 |
+
gradient_clip_val: 0.3
|
| 24 |
+
paths:
|
| 25 |
+
root_dir: ${oc.env:PROJECT_ROOT}
|
| 26 |
+
log_dir: ${paths.root_dir}/logs/
|
| 27 |
+
output_dir: ${hydra:runtime.output_dir}
|
| 28 |
+
work_dir: ${hydra:runtime.cwd}
|
| 29 |
+
run:
|
| 30 |
+
dir: ${paths.log_dir}/${task_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
|
| 31 |
+
sweep:
|
| 32 |
+
dir: ${paths.log_dir}/${task_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
|
| 33 |
+
subdir: ${hydra:job.num}
|
| 34 |
+
job_logging:
|
| 35 |
+
handlers:
|
| 36 |
+
file:
|
| 37 |
+
filename: ${hydra:runtime.output_dir}/${task_name}.log
|
| 38 |
+
data:
|
| 39 |
+
transform:
|
| 40 |
+
_target_: src.data.components.transforms.multi_crop_v3.ParametrizedMultiCropV3
|
| 41 |
+
canonical_size: 512
|
| 42 |
+
canonical_crop_scale:
|
| 43 |
+
- 0.9
|
| 44 |
+
- 1.0
|
| 45 |
+
global_crops_scale:
|
| 46 |
+
- 0.3
|
| 47 |
+
- 1.0
|
| 48 |
+
local_crops_scale:
|
| 49 |
+
- 0.05
|
| 50 |
+
- 0.3
|
| 51 |
+
n_global_crops: 1
|
| 52 |
+
n_local_crops: 5
|
| 53 |
+
distort_color: false
|
| 54 |
+
_target_: src.data.components.image_folder.ImageFolderNoLabels
|
| 55 |
+
root: /scratch-nvme/ml-datasets/imagenet/torchvision_ImageFolder/train
|
| 56 |
+
model:
|
| 57 |
+
_target_: src.models.components.partmae_v5_2.PARTMaskedAutoEncoderViT
|
| 58 |
+
img_size: 224
|
| 59 |
+
canonical_img_size: 512
|
| 60 |
+
max_scale_ratio: 6.0
|
| 61 |
+
patch_size: 16
|
| 62 |
+
in_chans: 3
|
| 63 |
+
embed_dim: 768
|
| 64 |
+
depth: 12
|
| 65 |
+
num_heads: 12
|
| 66 |
+
mlp_ratio: 4
|
| 67 |
+
norm_layer:
|
| 68 |
+
_target_: torch.nn.LayerNorm
|
| 69 |
+
eps: 1.0e-06
|
| 70 |
+
_partial_: true
|
| 71 |
+
mask_ratio: 0.75
|
| 72 |
+
pos_mask_ratio: 0.75
|
| 73 |
+
num_views: 6
|
| 74 |
+
decoder_embed_dim: 512
|
| 75 |
+
decoder_depth: 8
|
| 76 |
+
decoder_num_heads: 16
|
| 77 |
+
sampler: stratified_jittered
|
| 78 |
+
criterion: l1
|
| 79 |
+
alpha_t: 0.5
|
| 80 |
+
alpha_ts: 0.8
|
| 81 |
+
alpha_s: 1.0
|
| 82 |
+
permute_segment_embed: true
|
| 83 |
+
callbacks:
|
| 84 |
+
checkpoint:
|
| 85 |
+
_target_: src.callbacks.common.checkpoint.ModelCheckpoint
|
| 86 |
+
dirpath: ${paths.output_dir}
|
| 87 |
+
every_n_epochs: 25
|
| 88 |
+
save_last: true
|
| 89 |
+
verbose: true
|
| 90 |
+
metric_logger:
|
| 91 |
+
_target_: src.callbacks.common.metric_logger.MetricLogger
|
| 92 |
+
every_n_steps: 25
|
| 93 |
+
iter_timer:
|
| 94 |
+
_target_: src.callbacks.common.iter_timer.IterTimer
|
| 95 |
+
every_n_steps: 25
|
| 96 |
+
logger:
|
| 97 |
+
wandb:
|
| 98 |
+
_target_: wandb.integration.lightning.fabric.WandbLogger
|
| 99 |
+
save_dir: ${paths.output_dir}
|
| 100 |
+
offline: false
|
| 101 |
+
id: null
|
| 102 |
+
anonymous: null
|
| 103 |
+
project: PART-pretrain
|
| 104 |
+
log_model: false
|
| 105 |
+
prefix: ''
|
| 106 |
+
group: imagenet1k/partmae_v5_2/vit_b_16
|
| 107 |
+
tags: []
|
| 108 |
+
job_type: ''
|
| 109 |
+
scheduler:
|
| 110 |
+
_target_: timm.scheduler.cosine_lr.CosineLRScheduler
|
| 111 |
+
_partial_: true
|
| 112 |
+
t_initial: 125000
|
| 113 |
+
cycle_mul: 1.0
|
| 114 |
+
lr_min: 1.0e-06
|
| 115 |
+
cycle_decay: 0.1
|
| 116 |
+
warmup_lr_init: 1.0e-05
|
| 117 |
+
warmup_t: 6250
|
| 118 |
+
cycle_limit: 1
|
| 119 |
+
t_in_epochs: false
|
| 120 |
+
noise_range_t: null
|
| 121 |
+
noise_pct: 0.67
|
| 122 |
+
noise_std: 1.0
|
| 123 |
+
noise_seed: 42
|
| 124 |
+
optimizer:
|
| 125 |
+
_target_: torch.optim.AdamW
|
| 126 |
+
_partial_: true
|
| 127 |
+
lr: ${eval:${blr} * ${trainer.accumulate_grad_batches} * ${trainer.num_nodes} *
|
| 128 |
+
${trainer.devices} * ${train_dataloader.batch_size} / 256}
|
| 129 |
+
weight_decay: 0.05
|
| 130 |
+
eps: 1.0e-08
|
| 131 |
+
float32_matmul_precision: high
|
| 132 |
+
cudnn_benchmark: true
|
| 133 |
+
blr: 0.0001
|
| 134 |
+
train_dataloader:
|
| 135 |
+
pin_memory: true
|
| 136 |
+
batch_size: 512
|
| 137 |
+
num_workers: 16
|
| 138 |
+
drop_last: true
|
| 139 |
+
metric_collection:
|
| 140 |
+
_target_: src.models.components.metrics.partmae_v3.V3Metrics
|
| 141 |
+
compile_kwargs:
|
| 142 |
+
fullgraph: true
|
| 143 |
+
mode: default
|
| 144 |
+
compile_expr:
|
| 145 |
+
torch._subclasses.fake_tensor.CONSTANT_NUMEL_LIMIT: 100000
|
| 146 |
+
torch._dynamo.config.optimize_ddp: python_reducer
|
| 147 |
+
torch._dynamo.config.compiled_autograd: true
|
| 148 |
+
compile: false
|