AbstractPhil commited on
Commit
78fa204
·
verified ·
1 Parent(s): af30264

Update metrics - Run 20251104_151233

Browse files
Files changed (1) hide show
  1. best_model.json +24 -30
best_model.json CHANGED
@@ -1,43 +1,37 @@
1
  {
2
- "model_name": "David-decoupled-cantor_scale",
3
- "run_id": "20251104_144102",
4
- "timestamp": "2025-11-04T15:01:55.958968",
5
- "best_val_acc": 85.25,
6
- "best_epoch": 4,
7
- "final_train_acc": 91.28685019205146,
8
- "final_train_loss": 0.8936501936678214,
9
  "scale_accuracies": {
10
- "384": 83.992,
11
- "512": 84.384,
12
- "768": 84.716,
13
- "1024": 84.668,
14
- "1280": 84.458,
15
- "1536": 84.162,
16
- "1792": 84.038,
17
- "2048": 84.234
18
  },
19
  "architecture": {
20
- "preset": "clip_vit_bigg14_cantor_decoupled",
21
  "sharing_mode": "decoupled",
22
- "fusion_mode": "cantor_scale",
23
  "scales": [
24
- 384,
25
  512,
26
  768,
27
  1024,
28
- 1280,
29
- 1536,
30
- 1792,
31
- 2048
32
  ],
33
- "feature_dim": 1280,
34
  "num_classes": 1000,
35
  "use_belly": true,
36
- "belly_expand": 2.0
37
  },
38
  "training": {
39
  "dataset": "AbstractPhil/imagenet-clip-features-orderly",
40
- "model_variant": "clip_vit_laion_bigg14",
41
  "num_epochs": 5,
42
  "batch_size": 512,
43
  "learning_rate": 0.001,
@@ -47,10 +41,10 @@
47
  "scheduler": "cosine_restarts"
48
  },
49
  "files": {
50
- "weights_safetensors": "weights/David-decoupled-cantor_scale/20251104_144102/best_model_acc85.25.safetensors",
51
- "weights_pytorch": "weights/David-decoupled-cantor_scale/20251104_144102/best_model.pth",
52
- "config": "weights/David-decoupled-cantor_scale/20251104_144102/david_config.json",
53
- "training_config": "weights/David-decoupled-cantor_scale/20251104_144102/train_config.json",
54
- "tensorboard": "runs/David-decoupled-cantor_scale/20251104_144102/"
55
  }
56
  }
 
1
  {
2
+ "model_name": "David-decoupled-deep_efficiency",
3
+ "run_id": "20251104_151233",
4
+ "timestamp": "2025-11-04T15:17:06.764735",
5
+ "best_val_acc": 73.576,
6
+ "best_epoch": 0,
7
+ "final_train_acc": 71.94823157324534,
8
+ "final_train_loss": 3.0711968830000433,
9
  "scale_accuracies": {
10
+ "256": 69.484,
11
+ "512": 72.486,
12
+ "768": 73.576,
13
+ "1024": 73.7,
14
+ "1280": 73.708
 
 
 
15
  },
16
  "architecture": {
17
+ "preset": "high_accuracy",
18
  "sharing_mode": "decoupled",
19
+ "fusion_mode": "deep_efficiency",
20
  "scales": [
21
+ 256,
22
  512,
23
  768,
24
  1024,
25
+ 1280
 
 
 
26
  ],
27
+ "feature_dim": 512,
28
  "num_classes": 1000,
29
  "use_belly": true,
30
+ "belly_expand": 2.5
31
  },
32
  "training": {
33
  "dataset": "AbstractPhil/imagenet-clip-features-orderly",
34
+ "model_variant": "clip_vit_laion_b32",
35
  "num_epochs": 5,
36
  "batch_size": 512,
37
  "learning_rate": 0.001,
 
41
  "scheduler": "cosine_restarts"
42
  },
43
  "files": {
44
+ "weights_safetensors": "weights/David-decoupled-deep_efficiency/20251104_151233/best_model_acc73.58.safetensors",
45
+ "weights_pytorch": "weights/David-decoupled-deep_efficiency/20251104_151233/best_model.pth",
46
+ "config": "weights/David-decoupled-deep_efficiency/20251104_151233/david_config.json",
47
+ "training_config": "weights/David-decoupled-deep_efficiency/20251104_151233/train_config.json",
48
+ "tensorboard": "runs/David-decoupled-deep_efficiency/20251104_151233/"
49
  }
50
  }