File size: 1,483 Bytes
81834f9
0e67ec6
 
 
 
 
 
 
81834f9
0e67ec6
81834f9
 
0e67ec6
 
 
81834f9
 
 
 
0e67ec6
 
81834f9
 
 
 
0e67ec6
81834f9
 
 
 
8fa679e
1f29e6a
 
81834f9
 
 
 
 
1f29e6a
81834f9
 
 
 
0e67ec6
 
 
 
 
81834f9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
{
  "model_name": "David-decoupled-deep_efficiency",
  "run_id": "20251012_221046",
  "timestamp": "2025-10-12T22:15:33.176762",
  "best_val_acc": 62.524,
  "best_epoch": 0,
  "final_train_acc": 56.42811072509152,
  "final_train_loss": 2.9751985085156605,
  "scale_accuracies": {
    "256": 62.524
  },
  "architecture": {
    "preset": "high_accuracy",
    "sharing_mode": "decoupled",
    "fusion_mode": "deep_efficiency",
    "scales": [
      256,
      512,
      768,
      1024,
      1280
    ],
    "feature_dim": 512,
    "num_classes": 1000,
    "use_belly": true,
    "belly_expand": 2.5
  },
  "training": {
    "dataset": "AbstractPhil/imagenet-clip-features-orderly",
    "model_variant": [
      "clip_vit_b16",
      "clip_vit_laion_b32",
      "clip_vit_b32"
    ],
    "num_epochs": 10,
    "batch_size": 1024,
    "learning_rate": 0.01,
    "rose_weight": "0.2\u21920.8",
    "cayley_loss": false,
    "optimizer": "AdamW",
    "scheduler": "cosine_restarts"
  },
  "files": {
    "weights_safetensors": "weights/David-decoupled-deep_efficiency/20251012_221046/best_model_acc62.52.safetensors",
    "weights_pytorch": "weights/David-decoupled-deep_efficiency/20251012_221046/best_model.pth",
    "config": "weights/David-decoupled-deep_efficiency/20251012_221046/david_config.json",
    "training_config": "weights/David-decoupled-deep_efficiency/20251012_221046/train_config.json",
    "tensorboard": "runs/David-decoupled-deep_efficiency/20251012_221046/"
  }
}