File size: 1,638 Bytes
aac32b4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
{
"model_type": "tigas",
"architectures": ["TIGASModel"],
"task_type": "image-classification",
"framework": "pytorch",
"model_config": {
"img_size": 256,
"in_channels": 3,
"feature_dim": 256,
"base_channels": 32,
"num_scales": 4,
"fast_mode": false
},
"training_config": {
"epochs_trained": 3,
"batch_size": 8,
"learning_rate": 0.0001,
"optimizer": "adamw",
"scheduler": "cosine",
"mixed_precision": true,
"warmup_epochs": 5
},
"dataset_info": {
"train_samples": 128776,
"val_samples": 14167,
"test_samples": 14126,
"total_samples": 157069,
"real_ratio": 0.458,
"fake_ratio": 0.542
},
"metrics": {
"best_val_loss": 0.3079,
"best_val_accuracy": 0.6555,
"final_train_loss": 0.3506
},
"input_spec": {
"type": "image",
"channels": 3,
"height": 256,
"width": 256,
"normalization": {
"mean": [0.5, 0.5, 0.5],
"std": [0.5, 0.5, 0.5],
"range": [-1, 1]
}
},
"output_spec": {
"type": "score",
"range": [0, 1],
"interpretation": {
"1.0": "real/natural image",
"0.0": "fake/generated image"
}
},
"checkpoint_info": {
"format": "pytorch",
"keys": [
"model_state_dict",
"optimizer_state_dict",
"scheduler_state_dict",
"scaler_state_dict",
"epoch",
"global_step",
"best_val_loss",
"train_history",
"val_history"
]
},
"version": "0.1.0",
"library_name": "tigas",
"github_repo": "https://github.com/H1merka/TIGAS"
}
|