amine-01's picture
First Push
bb4c274 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6359524130821228,
"min": 0.6358556151390076,
"max": 2.876389265060425,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6156.01953125,
"min": 6102.1943359375,
"max": 29457.103515625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.770383834838867,
"min": 0.17997859418392181,
"max": 13.956993103027344,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2822.9287109375,
"min": 34.91584777832031,
"max": 2861.18359375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06527954467900973,
"min": 0.05941065163196803,
"max": 0.07736969259236574,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3263977233950487,
"min": 0.2376426065278721,
"max": 0.38684846296182873,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17763330349150824,
"min": 0.09384790241521984,
"max": 0.2875668560172997,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8881665174575413,
"min": 0.37539160966087937,
"max": 1.4378342800864985,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.8,
"min": 2.8181818181818183,
"max": 27.418181818181818,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1474.0,
"min": 124.0,
"max": 1508.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.8,
"min": 2.8181818181818183,
"max": 27.418181818181818,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1474.0,
"min": 124.0,
"max": 1508.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713463506",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713464705"
},
"total": 1199.4827040960001,
"count": 1,
"self": 0.43701968100026534,
"children": {
"run_training.setup": {
"total": 0.0612470259999327,
"count": 1,
"self": 0.0612470259999327
},
"TrainerController.start_learning": {
"total": 1198.984437389,
"count": 1,
"self": 1.4987640889987688,
"children": {
"TrainerController._reset_env": {
"total": 2.827450395000028,
"count": 1,
"self": 2.827450395000028
},
"TrainerController.advance": {
"total": 1194.5488428930012,
"count": 45476,
"self": 0.7279532800259858,
"children": {
"env_step": {
"total": 1193.8208896129752,
"count": 45476,
"self": 777.4037776889785,
"children": {
"SubprocessEnvManager._take_step": {
"total": 415.64098730199817,
"count": 45476,
"self": 3.928888072988343,
"children": {
"TorchPolicy.evaluate": {
"total": 411.7120992290098,
"count": 45476,
"self": 411.7120992290098
}
}
},
"workers": {
"total": 0.7761246219985196,
"count": 45476,
"self": 0.0,
"children": {
"worker_root": {
"total": 1195.8442483689676,
"count": 45476,
"is_parallel": true,
"self": 589.0925008989893,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006238444000018717,
"count": 1,
"is_parallel": true,
"self": 0.004567462999943928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016709810000747893,
"count": 10,
"is_parallel": true,
"self": 0.0016709810000747893
}
}
},
"UnityEnvironment.step": {
"total": 0.041914448999932574,
"count": 1,
"is_parallel": true,
"self": 0.0007357749999528096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004478470000321977,
"count": 1,
"is_parallel": true,
"self": 0.0004478470000321977
},
"communicator.exchange": {
"total": 0.038497888999927454,
"count": 1,
"is_parallel": true,
"self": 0.038497888999927454
},
"steps_from_proto": {
"total": 0.0022329380000201127,
"count": 1,
"is_parallel": true,
"self": 0.00042085999973551225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018120780002846004,
"count": 10,
"is_parallel": true,
"self": 0.0018120780002846004
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 606.7517474699782,
"count": 45475,
"is_parallel": true,
"self": 27.573571297919216,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.84042958302939,
"count": 45475,
"is_parallel": true,
"self": 14.84042958302939
},
"communicator.exchange": {
"total": 470.8209572760078,
"count": 45475,
"is_parallel": true,
"self": 470.8209572760078
},
"steps_from_proto": {
"total": 93.51678931302183,
"count": 45475,
"is_parallel": true,
"self": 17.50592319003215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.01086612298968,
"count": 454750,
"is_parallel": true,
"self": 76.01086612298968
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.2332999985083006e-05,
"count": 1,
"self": 5.2332999985083006e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1179.0923655130455,
"count": 1785548,
"is_parallel": true,
"self": 39.19354562213857,
"children": {
"process_trajectory": {
"total": 650.9530849969068,
"count": 1785548,
"is_parallel": true,
"self": 648.9978835069069,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9552014899999222,
"count": 10,
"is_parallel": true,
"self": 1.9552014899999222
}
}
},
"_update_policy": {
"total": 488.9457348940002,
"count": 227,
"is_parallel": true,
"self": 143.58477777398537,
"children": {
"TorchPPOOptimizer.update": {
"total": 345.36095712001486,
"count": 11568,
"is_parallel": true,
"self": 345.36095712001486
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10932767899998908,
"count": 1,
"self": 0.001041014999827894,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10828666400016118,
"count": 1,
"self": 0.10828666400016118
}
}
}
}
}
}
}