File size: 7,640 Bytes
024d929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f51cd1
024d929
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
{
  "_hub_mixin_config": {
    "val_split": null,
    "image_size": 96,
    "patch_size": 4,
    "in_channels": 3,
    "hidden_size": 384,
    "num_hidden_layers": 12,
    "num_attention_heads": 3,
    "qkv_bias": true,
    "intermediate_size": 768,
    "dropout_hidden": 0.0,
    "dropout_attention": 0.0,
    "dropout_path": 0.05,
    "dino_out_dim": 4096,
    "dino_use_bn": true,
    "dino_norm_last_layer": true,
    "dino_num_layers": 3,
    "dino_hidden_dim": 1024,
    "dino_bottleneck_dim": 256,
    "dino_base_teacher_temp": 0.04,
    "dino_final_teacher_temp": 0.04,
    "dino_warmup_epochs": 0,
    "num_local_crops": 6,
    "local_crop_size": 48,
    "global_crops_scale": [
      0.4,
      1.0
    ],
    "local_crops_scale": [
      0.05,
      0.4
    ],
    "solarization": 0.2,
    "gaussian": [
      1.0,
      0.5
    ],
    "checkpoint": null,
    "batch_size": 64,
    "num_epochs": 100,
    "learning_rate": 0.000125,
    "optimizer_class": "adamw",
    "base_wd": 0.04,
    "final_wd": 0.4,
    "base_momentum": 0.996,
    "final_momentum": 1.0,
    "lr_scheduler_class": "cosine",
    "warmup_ratio": 0.1,
    "log_interval_steps": 15,
    "save_interval_steps": 315,
    "save_dir": "./saved_modelcls/vit-t4-chatgpt-hyperparams",
    "save_latest": true,
    "save_best": true,
    "loss_metric_for_best_model": "train",
    "use_wandb": true,
    "wandb_entity": "image-ssl",
    "wandb_project": "pretraining",
    "wandb_name": "vit-t4-chatgpt-hyperparams",
    "upload_model_to_hub": true,
    "repo_id": "image-ssl/vit-t4-chatgpt-hyperparams",
    "device": "cuda:0",
    "seed": 42,
    "total_steps": 781200
  },
  "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x14e5868dd3a0>",
  "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x14e57f3612e0>",
  "wandb_table": null,
  "optimizer": "AdamW (\nParameter Group 0\n    amsgrad: False\n    betas: (0.9, 0.999)\n    capturable: False\n    decoupled_weight_decay: True\n    differentiable: False\n    eps: 1e-08\n    foreach: None\n    fused: None\n    initial_lr: 0.000125\n    lr: 3.7449596774193655e-06\n    maximize: False\n    weight_decay: 0.040003606000574454\n)",
  "lr_scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x14e586a1dca0>",
  "wd_scheduler": "<trainers.schedulers.weight_decay.WeightDecayScheduler object at 0x14e57e241820>",
  "momentum_scheduler": "<trainers.schedulers.momentum.MomentumScheduler object at 0x14e57e241850>",
  "optimizer_class": "adamw",
  "lr_scheduler_class": "cosine",
  "student_model": "VisionTransformerWithPretrainingHeads(\n  (encoder): VisionTransformer(\n    (patch_embed): PatchEmbedding(\n      (proj): Conv2d(3, 384, kernel_size=(4, 4), stride=(4, 4))\n    )\n    (pos_drop): Dropout(p=0.0, inplace=False)\n    (blocks): ModuleList(\n      (0): TransformerBlock(\n        (norm1): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (attn): Attention(\n          (qkv): Linear(in_features=384, out_features=1152, bias=True)\n          (proj): Linear(in_features=384, out_features=384, bias=True)\n          (proj_drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_attn): Identity()\n        (norm2): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (mlp): MLP(\n          (fc1): Linear(in_features=384, out_features=768, bias=True)\n          (act): GELU(approximate='none')\n          (fc2): Linear(in_features=768, out_features=384, bias=True)\n          (drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_mlp): Identity()\n      )\n      (1-11): 11 x TransformerBlock(\n        (norm1): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (attn): Attention(\n          (qkv): Linear(in_features=384, out_features=1152, bias=True)\n          (proj): Linear(in_features=384, out_features=384, bias=True)\n          (proj_drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_attn): DropPath()\n        (norm2): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (mlp): MLP(\n          (fc1): Linear(in_features=384, out_features=768, bias=True)\n          (act): GELU(approximate='none')\n          (fc2): Linear(in_features=768, out_features=384, bias=True)\n          (drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_mlp): DropPath()\n      )\n    )\n    (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n  )\n  (heads): ModuleDict(\n    (dino): DINOHead(\n      (mlp): Sequential(\n        (0): Linear(in_features=384, out_features=1024, bias=True)\n        (1): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (2): GELU(approximate='none')\n        (3): Linear(in_features=1024, out_features=1024, bias=True)\n        (4): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (5): GELU(approximate='none')\n        (6): Linear(in_features=1024, out_features=256, bias=True)\n      )\n      (last_layer): Linear(in_features=256, out_features=4096, bias=False)\n    )\n  )\n)",
  "teacher_model": "VisionTransformerWithPretrainingHeads(\n  (encoder): VisionTransformer(\n    (patch_embed): PatchEmbedding(\n      (proj): Conv2d(3, 384, kernel_size=(4, 4), stride=(4, 4))\n    )\n    (pos_drop): Dropout(p=0.0, inplace=False)\n    (blocks): ModuleList(\n      (0): TransformerBlock(\n        (norm1): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (attn): Attention(\n          (qkv): Linear(in_features=384, out_features=1152, bias=True)\n          (proj): Linear(in_features=384, out_features=384, bias=True)\n          (proj_drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_attn): Identity()\n        (norm2): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (mlp): MLP(\n          (fc1): Linear(in_features=384, out_features=768, bias=True)\n          (act): GELU(approximate='none')\n          (fc2): Linear(in_features=768, out_features=384, bias=True)\n          (drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_mlp): Identity()\n      )\n      (1-11): 11 x TransformerBlock(\n        (norm1): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (attn): Attention(\n          (qkv): Linear(in_features=384, out_features=1152, bias=True)\n          (proj): Linear(in_features=384, out_features=384, bias=True)\n          (proj_drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_attn): DropPath()\n        (norm2): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n        (mlp): MLP(\n          (fc1): Linear(in_features=384, out_features=768, bias=True)\n          (act): GELU(approximate='none')\n          (fc2): Linear(in_features=768, out_features=384, bias=True)\n          (drop): Dropout(p=0.0, inplace=False)\n        )\n        (drop_path_mlp): DropPath()\n      )\n    )\n    (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n  )\n  (heads): ModuleDict(\n    (dino): DINOHead(\n      (mlp): Sequential(\n        (0): Linear(in_features=384, out_features=1024, bias=True)\n        (1): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (2): GELU(approximate='none')\n        (3): Linear(in_features=1024, out_features=1024, bias=True)\n        (4): BatchNorm1d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n        (5): GELU(approximate='none')\n        (6): Linear(in_features=1024, out_features=256, bias=True)\n      )\n      (last_layer): Linear(in_features=256, out_features=4096, bias=False)\n    )\n  )\n)",
  "learning_rate": 0.000125,
  "_dino_loss": "DINOLoss()"
}