miiwater's picture
First Push
e7001cc verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8565448522567749,
"min": 0.8565448522567749,
"max": 2.851303815841675,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8140.60205078125,
"min": 8140.60205078125,
"max": 29106.109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.166740417480469,
"min": 0.41101160645484924,
"max": 13.166740417480469,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2567.514404296875,
"min": 79.73625183105469,
"max": 2685.171142578125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06581678814583546,
"min": 0.06331634182711508,
"max": 0.0761093535465097,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2632671525833418,
"min": 0.25952589719592795,
"max": 0.38049370377077996,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.193873315903486,
"min": 0.14212290774437364,
"max": 0.27456939680611386,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.775493263613944,
"min": 0.5684916309774946,
"max": 1.3047064212607402,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.59090909090909,
"min": 3.772727272727273,
"max": 25.945454545454545,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1126.0,
"min": 166.0,
"max": 1427.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.59090909090909,
"min": 3.772727272727273,
"max": 25.945454545454545,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1126.0,
"min": 166.0,
"max": 1427.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1760369096",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1760369518"
},
"total": 421.55487386799996,
"count": 1,
"self": 0.4312255769999638,
"children": {
"run_training.setup": {
"total": 0.04014523500006817,
"count": 1,
"self": 0.04014523500006817
},
"TrainerController.start_learning": {
"total": 421.0835030559999,
"count": 1,
"self": 0.31619073799106445,
"children": {
"TrainerController._reset_env": {
"total": 3.3740059839999503,
"count": 1,
"self": 3.3740059839999503
},
"TrainerController.advance": {
"total": 417.32051623000916,
"count": 18192,
"self": 0.3486229310137787,
"children": {
"env_step": {
"total": 299.1838517789989,
"count": 18192,
"self": 235.04474604199345,
"children": {
"SubprocessEnvManager._take_step": {
"total": 63.94367601200031,
"count": 18192,
"self": 1.1581637750010714,
"children": {
"TorchPolicy.evaluate": {
"total": 62.78551223699924,
"count": 18192,
"self": 62.78551223699924
}
}
},
"workers": {
"total": 0.19542972500516953,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 419.20670946900077,
"count": 18192,
"is_parallel": true,
"self": 212.17584009200584,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006081977999997434,
"count": 1,
"is_parallel": true,
"self": 0.0036745120000887255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002407465999908709,
"count": 10,
"is_parallel": true,
"self": 0.002407465999908709
}
}
},
"UnityEnvironment.step": {
"total": 0.035097113000006175,
"count": 1,
"is_parallel": true,
"self": 0.0006181310000101803,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040138200006367697,
"count": 1,
"is_parallel": true,
"self": 0.00040138200006367697
},
"communicator.exchange": {
"total": 0.03232224800001404,
"count": 1,
"is_parallel": true,
"self": 0.03232224800001404
},
"steps_from_proto": {
"total": 0.0017553519999182754,
"count": 1,
"is_parallel": true,
"self": 0.00035000499997295265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014053469999453228,
"count": 10,
"is_parallel": true,
"self": 0.0014053469999453228
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 207.03086937699493,
"count": 18191,
"is_parallel": true,
"self": 9.860101359999703,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.077139846997966,
"count": 18191,
"is_parallel": true,
"self": 5.077139846997966
},
"communicator.exchange": {
"total": 158.26192416899778,
"count": 18191,
"is_parallel": true,
"self": 158.26192416899778
},
"steps_from_proto": {
"total": 33.83170400099948,
"count": 18191,
"is_parallel": true,
"self": 6.193184137000117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.638519863999363,
"count": 181910,
"is_parallel": true,
"self": 27.638519863999363
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 117.78804151999645,
"count": 18192,
"self": 0.3998353889862756,
"children": {
"process_trajectory": {
"total": 25.81775269800994,
"count": 18192,
"self": 25.331812363010044,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48594033499989564,
"count": 4,
"self": 0.48594033499989564
}
}
},
"_update_policy": {
"total": 91.57045343300024,
"count": 90,
"self": 38.44838026499997,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.12207316800027,
"count": 4587,
"self": 53.12207316800027
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.799998679227429e-07,
"count": 1,
"self": 9.799998679227429e-07
},
"TrainerController._save_models": {
"total": 0.07278912399988258,
"count": 1,
"self": 0.0006747959998847364,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07211432799999784,
"count": 1,
"self": 0.07211432799999784
}
}
}
}
}
}
}