Marcus Koppelmann
First Push
4954464 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6729865670204163,
"min": 0.6472216248512268,
"max": 2.8557047843933105,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6892.05517578125,
"min": 6250.86669921875,
"max": 29245.2734375,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.448219299316406,
"min": 0.365354984998703,
"max": 13.45572280883789,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2743.436767578125,
"min": 70.87886810302734,
"max": 2743.436767578125,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06619823626950201,
"min": 0.06123076111383424,
"max": 0.07490687091612887,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.33099118134751004,
"min": 0.24492304445533697,
"max": 0.3714489368306329,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19463252974491493,
"min": 0.12061632424250573,
"max": 0.2865791454472963,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9731626487245747,
"min": 0.48246529697002294,
"max": 1.3578365112052244,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.050764903999999e-06,
"min": 7.050764903999999e-06,
"max": 0.00039278400180400006,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5253824519999994e-05,
"min": 3.5253824519999994e-05,
"max": 0.0018979200255199997,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666667,
"min": 0.10176266666666667,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333333,
"min": 0.42025066666666666,
"max": 0.97448,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804000000006,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999998,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.472727272727273,
"min": 3.7045454545454546,
"max": 26.89090909090909,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1456.0,
"min": 163.0,
"max": 1479.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.472727272727273,
"min": 3.7045454545454546,
"max": 26.89090909090909,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1456.0,
"min": 163.0,
"max": 1479.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705881368",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705882028"
},
"total": 659.8470961680002,
"count": 1,
"self": 0.4244807520001359,
"children": {
"run_training.setup": {
"total": 0.04562145899990355,
"count": 1,
"self": 0.04562145899990355
},
"TrainerController.start_learning": {
"total": 659.3769939570002,
"count": 1,
"self": 0.9044696469670725,
"children": {
"TrainerController._reset_env": {
"total": 3.0482468079999308,
"count": 1,
"self": 3.0482468079999308
},
"TrainerController.advance": {
"total": 655.3296563490333,
"count": 27331,
"self": 0.3900883840476581,
"children": {
"env_step": {
"total": 654.9395679649856,
"count": 27331,
"self": 431.0735551139967,
"children": {
"SubprocessEnvManager._take_step": {
"total": 223.44232366995038,
"count": 27331,
"self": 2.1397067859579693,
"children": {
"TorchPolicy.evaluate": {
"total": 221.3026168839924,
"count": 27331,
"self": 221.3026168839924
}
}
},
"workers": {
"total": 0.42368918103852593,
"count": 27331,
"self": 0.0,
"children": {
"worker_root": {
"total": 657.6289119209796,
"count": 27331,
"is_parallel": true,
"self": 322.03988371798596,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004622899999958463,
"count": 1,
"is_parallel": true,
"self": 0.0033851389998744708,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012377610000839923,
"count": 10,
"is_parallel": true,
"self": 0.0012377610000839923
}
}
},
"UnityEnvironment.step": {
"total": 0.03574263800010158,
"count": 1,
"is_parallel": true,
"self": 0.0006144109997876512,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003981170000315615,
"count": 1,
"is_parallel": true,
"self": 0.0003981170000315615
},
"communicator.exchange": {
"total": 0.03277873900015038,
"count": 1,
"is_parallel": true,
"self": 0.03277873900015038
},
"steps_from_proto": {
"total": 0.001951371000131985,
"count": 1,
"is_parallel": true,
"self": 0.00040438599990011426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015469850002318708,
"count": 10,
"is_parallel": true,
"self": 0.0015469850002318708
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 335.58902820299363,
"count": 27330,
"is_parallel": true,
"self": 15.799550830927274,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.884986012036961,
"count": 27330,
"is_parallel": true,
"self": 7.884986012036961
},
"communicator.exchange": {
"total": 262.3759865830059,
"count": 27330,
"is_parallel": true,
"self": 262.3759865830059
},
"steps_from_proto": {
"total": 49.5285047770235,
"count": 27330,
"is_parallel": true,
"self": 9.117938148010353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.41056662901315,
"count": 273300,
"is_parallel": true,
"self": 40.41056662901315
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00043149200018888223,
"count": 1,
"self": 0.00043149200018888223,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 647.2878848579469,
"count": 1002231,
"is_parallel": true,
"self": 20.572105820019715,
"children": {
"process_trajectory": {
"total": 355.4448174389263,
"count": 1002231,
"is_parallel": true,
"self": 354.3901640659267,
"children": {
"RLTrainer._checkpoint": {
"total": 1.05465337299961,
"count": 6,
"is_parallel": true,
"self": 1.05465337299961
}
}
},
"_update_policy": {
"total": 271.2709615990009,
"count": 136,
"is_parallel": true,
"self": 83.58021376900183,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.69074782999905,
"count": 6933,
"is_parallel": true,
"self": 187.69074782999905
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09418966099974568,
"count": 1,
"self": 0.001101792999634199,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09308786800011148,
"count": 1,
"self": 0.09308786800011148
}
}
}
}
}
}
}