File size: 6,016 Bytes
389925d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f925703
389925d
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
{
  "_hub_mixin_config": {
    "val_split": null,
    "image_size": 96,
    "patch_size": 8,
    "in_channels": 3,
    "hidden_size": 384,
    "num_hidden_layers": 12,
    "num_attention_heads": 6,
    "qkv_bias": true,
    "intermediate_size": 1536,
    "dropout_hidden": 0.0,
    "dropout_attention": 0.0,
    "dropout_path": 0.0,
    "dino_out_dim": 16384,
    "dino_use_bn": true,
    "dino_norm_last_layer": true,
    "dino_num_layers": 3,
    "dino_hidden_dim": 2048,
    "dino_bottleneck_dim": 256,
    "dino_base_teacher_temp": 0.04,
    "dino_final_teacher_temp": 0.04,
    "dino_warmup_epochs": 0,
    "num_local_crops": 4,
    "local_crop_size": 48,
    "global_crops_scale": [
      0.7,
      1.0
    ],
    "local_crops_scale": [
      0.3,
      0.7
    ],
    "checkpoint": null,
    "batch_size": 256,
    "num_epochs": 100,
    "learning_rate": 0.0002,
    "optimizer_class": "adamw",
    "base_wd": 0.04,
    "final_wd": 0.4,
    "base_momentum": 0.996,
    "final_momentum": 1.0,
    "lr_scheduler_class": "cosine",
    "warmup_ratio": 0.1,
    "log_interval_steps": 15,
    "save_interval_steps": 315,
    "save_dir": "./saved_models/vit-s8-highOutDim",
    "save_latest": true,
    "save_best": true,
    "loss_metric_for_best_model": "train",
    "use_wandb": true,
    "wandb_entity": "image-ssl",
    "wandb_project": "pretraining",
    "wandb_name": "vit-s8-highOutDim",
    "upload_model_to_hub": true,
    "repo_id": "image-ssl/vit-s8-highOutDim",
    "device": "cuda:0",
    "seed": 42,
    "total_steps": 195300
  },
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x14e641e1e600>",
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x14e642ade690>",
  "wandb_table": null,
  "optimizer": "AdamW (\nParameter Group 0\n    amsgrad: False\n    betas: (0.9, 0.999)\n    capturable: False\n    decoupled_weight_decay: True\n    differentiable: False\n    eps: 1e-08\n    foreach: None\n    fused: None\n    initial_lr: 0.0002\n    lr: 2.179999999999991e-05\n    maximize: False\n    weight_decay: 0.0400887282083115\n)",
  "lr_scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x14e642129e50>",
  "wd_scheduler": "<trainers.schedulers.weight_decay.WeightDecayScheduler object at 0x14e642129f40>",
  "momentum_scheduler": "<trainers.schedulers.momentum.MomentumScheduler object at 0x14e642129e80>",
  "optimizer_class": "adamw",
  "lr_scheduler_class": "cosine",
  "student_model": "VisionTransformerWithPretrainingHeads(\n  (encoder): VisionTransformer(\n    (patch_embed): PatchEmbedding(\n      (proj): Conv2d(3, 384, kernel_size=(8, 8), stride=(8, 8))\n    )\n    (pos_drop): Dropout(p=0.0, inplace=False)\n    (blocks): ModuleList(\n      (0-11): 12 x TransformerBlock(\n        (norm1): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (attn): Attention(\n          (qkv): Linear(in_features=384, out_features=1152, bias=True)\n          (proj): Linear(in_features=384, out_features=384, bias=True)\n          (proj_drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_attn): Identity()\n        (norm2): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (mlp): MLP(\n          (fc1): Linear(in_features=384, out_features=1536, bias=True)\n          (act): GELU(approximate='none')\n          (fc2): Linear(in_features=1536, out_features=384, bias=True)\n          (drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_mlp): Identity()\n      )\n    )\n    (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n  )\n  (heads): ModuleDict(\n    (dino): DINOHead(\n      (mlp): Sequential(\n        (0): Linear(in_features=384, out_features=2048, bias=True)\n        (1): BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (2): GELU(approximate='none')\n        (3): Linear(in_features=2048, out_features=2048, bias=True)\n        (4): BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (5): GELU(approximate='none')\n        (6): Linear(in_features=2048, out_features=256, bias=True)\n      )\n      (last_layer): Linear(in_features=256, out_features=16384, bias=False)\n    )\n  )\n)",
  "teacher_model": "VisionTransformerWithPretrainingHeads(\n  (encoder): VisionTransformer(\n    (patch_embed): PatchEmbedding(\n      (proj): Conv2d(3, 384, kernel_size=(8, 8), stride=(8, 8))\n    )\n    (pos_drop): Dropout(p=0.0, inplace=False)\n    (blocks): ModuleList(\n      (0-11): 12 x TransformerBlock(\n        (norm1): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (attn): Attention(\n          (qkv): Linear(in_features=384, out_features=1152, bias=True)\n          (proj): Linear(in_features=384, out_features=384, bias=True)\n          (proj_drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_attn): Identity()\n        (norm2): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (mlp): MLP(\n          (fc1): Linear(in_features=384, out_features=1536, bias=True)\n          (act): GELU(approximate='none')\n          (fc2): Linear(in_features=1536, out_features=384, bias=True)\n          (drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_mlp): Identity()\n      )\n    )\n    (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n  )\n  (heads): ModuleDict(\n    (dino): DINOHead(\n      (mlp): Sequential(\n        (0): Linear(in_features=384, out_features=2048, bias=True)\n        (1): BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (2): GELU(approximate='none')\n        (3): Linear(in_features=2048, out_features=2048, bias=True)\n        (4): BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (5): GELU(approximate='none')\n        (6): Linear(in_features=2048, out_features=256, bias=True)\n      )\n      (last_layer): Linear(in_features=256, out_features=16384, bias=False)\n    )\n  )\n)",
  "learning_rate": 0.0002,
  "_dino_loss": "DINOLoss()"
}