| { | |
| "model_type": "bilstm", | |
| "architecture": "BiLSTM + Self-Attention (Namgung et al. 2021)", | |
| "vocab_size": 40, | |
| "embedding_dim": 32, | |
| "maxlen": 75, | |
| "bilstm_dim": 128, | |
| "fc_hidden": 64, | |
| "dropout": 0.5, | |
| "num_classes": 2, | |
| "id2label": {"0": "legit", "1": "dga"}, | |
| "label2id": {"legit": 0, "dga": 1}, | |
| "framework": "pytorch", | |
| "weights_file": "bilstm_best.pth", | |
| "train_families": 54, | |
| "train_rows": 845639 | |
| } | |