File size: 11,854 Bytes
a78de94 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | {
"name": "root",
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694972153",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --resume --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694972163"
},
"total": 9.933911819000059,
"count": 1,
"self": 0.6767273009991186,
"children": {
"run_training.setup": {
"total": 0.08190131100127473,
"count": 1,
"self": 0.08190131100127473
},
"TrainerController.start_learning": {
"total": 9.175283206999666,
"count": 1,
"self": 0.005385002003094996,
"children": {
"TrainerController._reset_env": {
"total": 2.3441390540010616,
"count": 1,
"self": 2.3441390540010616
},
"TrainerController.advance": {
"total": 6.818325450996781,
"count": 136,
"self": 0.005066982987045776,
"children": {
"env_step": {
"total": 6.608461055013322,
"count": 136,
"self": 6.256147617012175,
"children": {
"SubprocessEnvManager._take_step": {
"total": 0.349497553999754,
"count": 136,
"self": 0.016071361000285833,
"children": {
"TorchPolicy.evaluate": {
"total": 0.3334261929994682,
"count": 133,
"self": 0.3334261929994682
}
}
},
"workers": {
"total": 0.0028158840013929876,
"count": 135,
"self": 0.0,
"children": {
"worker_root": {
"total": 9.206072279008367,
"count": 135,
"is_parallel": true,
"self": 3.473757925003156,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003669351001008181,
"count": 1,
"is_parallel": true,
"self": 0.0009375949994137045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027317560015944764,
"count": 8,
"is_parallel": true,
"self": 0.0027317560015944764
}
}
},
"UnityEnvironment.step": {
"total": 0.10612607200164348,
"count": 1,
"is_parallel": true,
"self": 0.000676117000693921,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005360030008887406,
"count": 1,
"is_parallel": true,
"self": 0.0005360030008887406
},
"communicator.exchange": {
"total": 0.10161445300036576,
"count": 1,
"is_parallel": true,
"self": 0.10161445300036576
},
"steps_from_proto": {
"total": 0.0032994989996950608,
"count": 1,
"is_parallel": true,
"self": 0.000455426998087205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0028440720016078558,
"count": 8,
"is_parallel": true,
"self": 0.0028440720016078558
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5.7323143540052115,
"count": 134,
"is_parallel": true,
"self": 0.09183909499370202,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.05065112101146951,
"count": 134,
"is_parallel": true,
"self": 0.05065112101146951
},
"communicator.exchange": {
"total": 5.301017501995375,
"count": 134,
"is_parallel": true,
"self": 5.301017501995375
},
"steps_from_proto": {
"total": 0.28880663600466505,
"count": 134,
"is_parallel": true,
"self": 0.061104609038011404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.22770202696665365,
"count": 1072,
"is_parallel": true,
"self": 0.22770202696665365
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 0.2047974129964132,
"count": 135,
"self": 0.00481761298578931,
"children": {
"process_trajectory": {
"total": 0.19997980001062388,
"count": 135,
"self": 0.19997980001062388
}
}
}
}
},
"trainer_threads": {
"total": 2.360999133088626e-06,
"count": 1,
"self": 2.360999133088626e-06
},
"TrainerController._save_models": {
"total": 0.007431338999595027,
"count": 1,
"self": 3.6792998798773624e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.007394546000796254,
"count": 1,
"self": 0.007394546000796254
}
}
}
}
}
}
} |