Spaces:
Runtime error
Runtime error
File size: 3,138 Bytes
6f6483e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | [
{
"name": "HAPPO",
"type": "On-Policy",
"family": "HA",
"policy": "Stochastic",
"action_space": "Discrete/Continuous",
"key_feature": "Heterogeneous-Agent PPO with sequential policy update"
},
{
"name": "HATRPO",
"type": "On-Policy",
"family": "HA",
"policy": "Stochastic",
"action_space": "Discrete/Continuous",
"key_feature": "Trust region optimization with Krylov subspace"
},
{
"name": "HAA2C",
"type": "On-Policy",
"family": "HA",
"policy": "Stochastic",
"action_space": "Discrete/Continuous",
"key_feature": "Advantage Actor-Critic with heterogeneous agents"
},
{
"name": "MAPPO",
"type": "On-Policy",
"family": "MA",
"policy": "Stochastic",
"action_space": "Discrete/Continuous",
"key_feature": "Multi-Agent PPO with centralized value function"
},
{
"name": "SN-MAPPO",
"type": "On-Policy",
"family": "MA",
"policy": "Stochastic",
"action_space": "Discrete/Continuous",
"key_feature": "MAPPO with stochastic network architecture"
},
{
"name": "DAN-HAPPO",
"type": "On-Policy",
"family": "HA",
"policy": "Stochastic",
"action_space": "Discrete/Continuous",
"key_feature": "Dynamic attention network for agent communication"
},
{
"name": "HADDPG",
"type": "Off-Policy",
"family": "HA",
"policy": "Deterministic",
"action_space": "Continuous",
"key_feature": "Heterogeneous-Agent DDPG with experience replay"
},
{
"name": "HATD3",
"type": "Off-Policy",
"family": "HA",
"policy": "Deterministic",
"action_space": "Continuous",
"key_feature": "Twin delayed DDPG for reduced overestimation"
},
{
"name": "HASAC",
"type": "Off-Policy",
"family": "HA",
"policy": "Stochastic",
"action_space": "Continuous",
"key_feature": "Maximum entropy RL with automatic temperature tuning"
},
{
"name": "MADDPG",
"type": "Off-Policy",
"family": "MA",
"policy": "Deterministic",
"action_space": "Continuous",
"key_feature": "Centralized critic with decentralized actors"
},
{
"name": "MATD3",
"type": "Off-Policy",
"family": "MA",
"policy": "Deterministic",
"action_space": "Continuous",
"key_feature": "Multi-Agent TD3 with clipped double Q-learning"
},
{
"name": "QMIX",
"type": "Value-Based",
"family": "MA",
"policy": "Greedy",
"action_space": "Discrete",
"key_feature": "Monotonic value factorization via mixing network"
},
{
"name": "HAD3QN",
"type": "Value-Based",
"family": "HA",
"policy": "Greedy",
"action_space": "Discrete",
"key_feature": "Dueling Double DQN for heterogeneous agents"
},
{
"name": "SHOM",
"type": "Hybrid",
"family": "MA",
"policy": "Mixed",
"action_space": "Hybrid",
"key_feature": "Shared heterogeneous observation model"
},
{
"name": "2TS-VVC",
"type": "Two-Timescale",
"family": "Special",
"policy": "Mixed",
"action_space": "Hybrid",
"key_feature": "Slow SACD + Fast DDPG for VVC coordination"
}
] |