JoBuettner's picture
First Push
93e49a2
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0007920265197754,
"min": 0.9904081225395203,
"max": 2.846792459487915,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9588.5888671875,
"min": 9588.5888671875,
"max": 28211.767578125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 9992.0,
"max": 199960.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 9992.0,
"max": 199960.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.901126861572266,
"min": 0.518122673034668,
"max": 12.901126861572266,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2515.7197265625,
"min": 78.75464630126953,
"max": 2608.99169921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 6567.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07139767223926873,
"min": 0.05977864206418304,
"max": 0.07565576074158148,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2855906889570749,
"min": 0.17933592619254912,
"max": 0.36397708985068855,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20328949581758649,
"min": 0.1392637832619834,
"max": 0.28390184138335434,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8131579832703459,
"min": 0.41779134978595023,
"max": 1.4195092069167716,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.214097262000005e-06,
"min": 8.214097262000005e-06,
"max": 0.00029036400321200004,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.285638904800002e-05,
"min": 3.285638904800002e-05,
"max": 0.0013858200380599999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10273800000000001,
"min": 0.10273800000000001,
"max": 0.19678800000000005,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41095200000000004,
"min": 0.41095200000000004,
"max": 0.96194,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001466262000000001,
"min": 0.0001466262000000001,
"max": 0.0048397212,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005865048000000004,
"min": 0.0005865048000000004,
"max": 0.023100806,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.931818181818183,
"min": 3.696969696969697,
"max": 25.704545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1097.0,
"min": 122.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.931818181818183,
"min": 3.696969696969697,
"max": 25.704545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1097.0,
"min": 122.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683798992",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683799437"
},
"total": 445.09787358799997,
"count": 1,
"self": 0.4761364830000048,
"children": {
"run_training.setup": {
"total": 0.03790906699998686,
"count": 1,
"self": 0.03790906699998686
},
"TrainerController.start_learning": {
"total": 444.583828038,
"count": 1,
"self": 0.5291882940000505,
"children": {
"TrainerController._reset_env": {
"total": 3.551607507999961,
"count": 1,
"self": 3.551607507999961
},
"TrainerController.advance": {
"total": 440.2882425910001,
"count": 18020,
"self": 0.25911114100188115,
"children": {
"env_step": {
"total": 440.0291314499982,
"count": 18020,
"self": 320.5083315040056,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.27592303199697,
"count": 18020,
"self": 1.5515728249891936,
"children": {
"TorchPolicy.evaluate": {
"total": 117.72435020700777,
"count": 18020,
"self": 117.72435020700777
}
}
},
"workers": {
"total": 0.24487691399565392,
"count": 18020,
"self": 0.0,
"children": {
"worker_root": {
"total": 442.9634877259964,
"count": 18020,
"is_parallel": true,
"self": 206.48813358698908,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017461039999489003,
"count": 1,
"is_parallel": true,
"self": 0.0005239469995785839,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012221570003703164,
"count": 10,
"is_parallel": true,
"self": 0.0012221570003703164
}
}
},
"UnityEnvironment.step": {
"total": 0.07242847999998503,
"count": 1,
"is_parallel": true,
"self": 0.0005953859999863198,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037285999997038743,
"count": 1,
"is_parallel": true,
"self": 0.00037285999997038743
},
"communicator.exchange": {
"total": 0.06636252500004503,
"count": 1,
"is_parallel": true,
"self": 0.06636252500004503
},
"steps_from_proto": {
"total": 0.005097708999983297,
"count": 1,
"is_parallel": true,
"self": 0.00039590299991232314,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004701806000070974,
"count": 10,
"is_parallel": true,
"self": 0.004701806000070974
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 236.47535413900732,
"count": 18019,
"is_parallel": true,
"self": 9.366331168022157,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.024456875985493,
"count": 18019,
"is_parallel": true,
"self": 5.024456875985493
},
"communicator.exchange": {
"total": 191.01347006299795,
"count": 18019,
"is_parallel": true,
"self": 191.01347006299795
},
"steps_from_proto": {
"total": 31.07109603200172,
"count": 18019,
"is_parallel": true,
"self": 5.876847774983162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.194248257018558,
"count": 180190,
"is_parallel": true,
"self": 25.194248257018558
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011714599986589747,
"count": 1,
"self": 0.00011714599986589747,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 436.9830594290147,
"count": 394164,
"is_parallel": true,
"self": 8.971961243031615,
"children": {
"process_trajectory": {
"total": 241.90219536998438,
"count": 394164,
"is_parallel": true,
"self": 240.33145313498449,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5707422349998978,
"count": 4,
"is_parallel": true,
"self": 1.5707422349998978
}
}
},
"_update_policy": {
"total": 186.1089028159987,
"count": 89,
"is_parallel": true,
"self": 69.05325165699219,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.05565115900652,
"count": 4536,
"is_parallel": true,
"self": 117.05565115900652
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21467249900001661,
"count": 1,
"self": 0.00215925700013031,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2125132419998863,
"count": 1,
"self": 0.2125132419998863
}
}
}
}
}
}
}