File size: 1,283 Bytes
54097f9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | {
"model_type": "RetNet",
"task": "text-classification",
"architecture": "ProductionRetNet",
"vocab_size": 50257,
"model_dim": 512,
"num_layers": 6,
"num_heads": 8,
"num_classes": 7,
"max_length": 512,
"labels": [
"EXPLICIT-DISCLAIMER",
"EXPLICIT-OFFENSIVE",
"EXPLICIT-SEXUAL",
"EXPLICIT-VIOLENT",
"NON-EXPLICIT",
"SEXUAL-REFERENCE",
"SUGGESTIVE"
],
"label_to_id": {
"EXPLICIT-DISCLAIMER": 0,
"EXPLICIT-OFFENSIVE": 1,
"EXPLICIT-SEXUAL": 2,
"EXPLICIT-VIOLENT": 3,
"NON-EXPLICIT": 4,
"SEXUAL-REFERENCE": 5,
"SUGGESTIVE": 6
},
"id_to_label": {
"0": "EXPLICIT-DISCLAIMER",
"1": "EXPLICIT-OFFENSIVE",
"2": "EXPLICIT-SEXUAL",
"3": "EXPLICIT-VIOLENT",
"4": "NON-EXPLICIT",
"5": "SEXUAL-REFERENCE",
"6": "SUGGESTIVE"
},
"tokenizer": "gpt2",
"performance": {
"holdout_accuracy": 0.7441,
"holdout_macro_f1": 0.639,
"inference_speed": "1574 paragraphs/sec",
"parameters": 45029943
},
"training": {
"dataset_size": 119023,
"train_samples": 101771,
"val_samples": 11304,
"holdout_samples": 5948,
"epochs": 5,
"training_time_hours": 4.9,
"focal_loss_gamma": 2.0
},
"model_file": "model.safetensors",
"format": "safetensors"
} |