Rujord's picture
First Push
3ea41f0 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8540120124816895,
"min": 0.8540120124816895,
"max": 2.8858306407928467,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8802.3017578125,
"min": 8562.8828125,
"max": 27583.31640625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199936.0,
"min": 9944.0,
"max": 199936.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199936.0,
"min": 9944.0,
"max": 199936.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.656551361083984,
"min": 0.18059249222278595,
"max": 12.656551361083984,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2581.9365234375,
"min": 11.91910457611084,
"max": 2581.9365234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 2189.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07136784260427723,
"min": 0.060519698533336816,
"max": 0.0776025354086111,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3568392130213861,
"min": 0.0776025354086111,
"max": 0.3615068913398621,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19341688781392338,
"min": 0.1399279706034006,
"max": 0.2803054235729517,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9670844390696169,
"min": 0.1573007811481754,
"max": 1.3856700293573678,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.828097724000001e-06,
"min": 6.828097724000001e-06,
"max": 0.00028732800422399995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.4140488620000006e-05,
"min": 3.4140488620000006e-05,
"max": 0.0013871400376199998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10227599999999999,
"min": 0.10227599999999999,
"max": 0.19577599999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.51138,
"min": 0.19577599999999998,
"max": 0.96238,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00012357240000000002,
"min": 0.00012357240000000002,
"max": 0.0047892224,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0006178620000000001,
"min": 0.0006178620000000001,
"max": 0.023122762000000005,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.163636363636364,
"min": 3.8181818181818183,
"max": 25.40909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1384.0,
"min": 42.0,
"max": 1384.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.163636363636364,
"min": 3.8181818181818183,
"max": 25.40909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1384.0,
"min": 42.0,
"max": 1384.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1768142057",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/ruedi/miniconda3/envs/deep-rl-unit5/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1768142459"
},
"total": 401.4754173329984,
"count": 1,
"self": 0.3207430079983169,
"children": {
"run_training.setup": {
"total": 0.026407552000819123,
"count": 1,
"self": 0.026407552000819123
},
"TrainerController.start_learning": {
"total": 401.12826677299927,
"count": 1,
"self": 0.5006365171302605,
"children": {
"TrainerController._reset_env": {
"total": 2.4367559460006305,
"count": 1,
"self": 2.4367559460006305
},
"TrainerController.advance": {
"total": 398.10597254486856,
"count": 17672,
"self": 0.22331890193163417,
"children": {
"env_step": {
"total": 397.88265364293693,
"count": 17672,
"self": 252.57161869101765,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.09750342501138,
"count": 17672,
"self": 1.2005272492133372,
"children": {
"TorchPolicy.evaluate": {
"total": 143.89697617579805,
"count": 17672,
"self": 143.89697617579805
}
}
},
"workers": {
"total": 0.21353152690790012,
"count": 17672,
"self": 0.0,
"children": {
"worker_root": {
"total": 400.40890377600954,
"count": 17672,
"is_parallel": true,
"self": 213.52285997986473,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028197029987495625,
"count": 1,
"is_parallel": true,
"self": 0.0005475709949678276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002272132003781735,
"count": 10,
"is_parallel": true,
"self": 0.002272132003781735
}
}
},
"UnityEnvironment.step": {
"total": 0.026516744999753428,
"count": 1,
"is_parallel": true,
"self": 0.0009888280019367812,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046113799908198416,
"count": 1,
"is_parallel": true,
"self": 0.00046113799908198416
},
"communicator.exchange": {
"total": 0.023347678999925847,
"count": 1,
"is_parallel": true,
"self": 0.023347678999925847
},
"steps_from_proto": {
"total": 0.0017190999988088151,
"count": 1,
"is_parallel": true,
"self": 0.00037836600131413434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013407339974946808,
"count": 10,
"is_parallel": true,
"self": 0.0013407339974946808
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 186.8860437961448,
"count": 17671,
"is_parallel": true,
"self": 9.02714470023966,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.126338234051218,
"count": 17671,
"is_parallel": true,
"self": 5.126338234051218
},
"communicator.exchange": {
"total": 143.59413328394112,
"count": 17671,
"is_parallel": true,
"self": 143.59413328394112
},
"steps_from_proto": {
"total": 29.138427577912807,
"count": 17671,
"is_parallel": true,
"self": 5.871253660063303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.267173917849505,
"count": 176710,
"is_parallel": true,
"self": 23.267173917849505
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0005407490007200977,
"count": 1,
"self": 0.0005407490007200977,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 394.9872860514297,
"count": 467334,
"is_parallel": true,
"self": 8.543531348177567,
"children": {
"process_trajectory": {
"total": 216.9490221782562,
"count": 467334,
"is_parallel": true,
"self": 216.4556301782577,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4933919999984937,
"count": 4,
"is_parallel": true,
"self": 0.4933919999984937
}
}
},
"_update_policy": {
"total": 169.49473252499592,
"count": 88,
"is_parallel": true,
"self": 35.334572824034694,
"children": {
"TorchPPOOptimizer.update": {
"total": 134.16015970096123,
"count": 4485,
"is_parallel": true,
"self": 134.16015970096123
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08436101599909307,
"count": 1,
"self": 0.000911858000108623,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08344915799898445,
"count": 1,
"self": 0.08344915799898445
}
}
}
}
}
}
}