File size: 1,274 Bytes
a99240b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
{
  "architectures": ["GPTNeoXForCausalLM"],
  "model_type": "gpt-neox",
  "base_model": "gpt-oss:20b",
  "base_model_size": "20B",
  "adapter_type": "lora",
  "lora_config": {
    "r": 16,
    "lora_alpha": 32,
    "lora_dropout": 0.1,
    "target_modules": [
      "q_proj",
      "k_proj",
      "v_proj",
      "o_proj",
      "gate_proj",
      "up_proj",
      "down_proj"
    ]
  },
  "mev_classifier": {
    "parameters": 315151,
    "input_dim": 240,
    "hidden_dim": 512,
    "dropout_rate": 0.3,
    "num_labels": 4,
    "label_map": {
      "0": "normal",
      "1": "arbitrage",
      "2": "sandwich",
      "3": "liquidation"
    }
  },
  "training_info": {
    "dataset_size": 700805,
    "validation_accuracy": 0.993,
    "validation_loss": 0.0174,
    "device": "mps",
    "optimizer": "AdamW",
    "learning_rate": 0.001,
    "weight_decay": 0.01
  },
  "task": "mev-detection-generation",
  "capabilities": [
    "arbitrage_detection",
    "sandwich_attack_detection",
    "liquidation_detection",
    "profit_estimation",
    "text_generation",
    "transaction_analysis"
  ],
  "total_model_size": "~13GB",
  "adapter_size": "1.2MB",
  "inference_requirements": {
    "min_ram": "16GB",
    "recommended_ram": "32GB",
    "quantization": "Q4_K_M"
  }
}