File size: 1,523 Bytes
aa1b2bb
8328d9a
5b5fc33
82174e8
e7ec29f
 
 
 
aa1b2bb
e7ec29f
 
 
 
 
 
aa1b2bb
 
5b5fc33
8e2b17e
8328d9a
aa1b2bb
78fa204
129a1b7
879f366
5b5fc33
 
 
aa1b2bb
78fa204
aa1b2bb
85e1ed1
8328d9a
aa1b2bb
 
 
5b5fc33
d03de48
129a1b7
760e9d4
aa1b2bb
 
 
 
 
 
e7ec29f
5b5fc33
 
 
 
aa1b2bb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
{
  "model_name": "David-decoupled-cantor_scale",
  "run_id": "20251104_154540",
  "timestamp": "2025-11-04T15:57:33.702682",
  "best_val_acc": 78.904,
  "best_epoch": 4,
  "final_train_acc": 86.63156325443911,
  "final_train_loss": 1.4550095543688029,
  "scale_accuracies": {
    "256": 74.616,
    "512": 77.184,
    "768": 77.982,
    "1024": 77.986,
    "2048": 77.906,
    "4096": 77.97
  },
  "architecture": {
    "preset": "clip_vit_b16_cantor_big_window",
    "sharing_mode": "decoupled",
    "fusion_mode": "cantor_scale",
    "scales": [
      256,
      512,
      768,
      1024,
      2048,
      4096
    ],
    "feature_dim": 512,
    "num_classes": 1000,
    "use_belly": true,
    "belly_expand": 2.0
  },
  "training": {
    "dataset": "AbstractPhil/imagenet-clip-features-orderly",
    "model_variant": "clip_vit_b16",
    "num_epochs": 5,
    "batch_size": 512,
    "learning_rate": 0.001,
    "rose_weight": "0.1\u21920.5",
    "cayley_loss": false,
    "optimizer": "AdamW",
    "scheduler": "cosine_restarts"
  },
  "files": {
    "weights_safetensors": "weights/David-decoupled-cantor_scale/20251104_154540/best_model_acc78.90.safetensors",
    "weights_pytorch": "weights/David-decoupled-cantor_scale/20251104_154540/best_model.pth",
    "config": "weights/David-decoupled-cantor_scale/20251104_154540/david_config.json",
    "training_config": "weights/David-decoupled-cantor_scale/20251104_154540/train_config.json",
    "tensorboard": "runs/David-decoupled-cantor_scale/20251104_154540/"
  }
}