JanGr's picture
First Push
00a152e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8989839553833008,
"min": 0.8926671147346497,
"max": 2.873791217803955,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8583.4990234375,
"min": 8583.4990234375,
"max": 29462.107421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.0027494430542,
"min": 0.2838077247142792,
"max": 13.0027494430542,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2535.5361328125,
"min": 55.05869674682617,
"max": 2651.9599609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0682236898756659,
"min": 0.06426349910964513,
"max": 0.07485243668905277,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2728947595026636,
"min": 0.2570539964385805,
"max": 0.37426218344526385,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22072793353422016,
"min": 0.1389522687075477,
"max": 0.3001127037785801,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8829117341368806,
"min": 0.5558090748301908,
"max": 1.3958979839203405,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.727272727272727,
"min": 3.8181818181818183,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1132.0,
"min": 168.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.727272727272727,
"min": 3.8181818181818183,
"max": 25.727272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1132.0,
"min": 168.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697478526",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697479015"
},
"total": 488.4904223340001,
"count": 1,
"self": 0.42758824900005266,
"children": {
"run_training.setup": {
"total": 0.04429675200003658,
"count": 1,
"self": 0.04429675200003658
},
"TrainerController.start_learning": {
"total": 488.018537333,
"count": 1,
"self": 0.5805556229914828,
"children": {
"TrainerController._reset_env": {
"total": 7.387602178000009,
"count": 1,
"self": 7.387602178000009
},
"TrainerController.advance": {
"total": 479.9575998230084,
"count": 18200,
"self": 0.28018377900622227,
"children": {
"env_step": {
"total": 479.6774160440022,
"count": 18200,
"self": 328.441204559,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.94366368399875,
"count": 18200,
"self": 1.597794879003061,
"children": {
"TorchPolicy.evaluate": {
"total": 149.3458688049957,
"count": 18200,
"self": 149.3458688049957
}
}
},
"workers": {
"total": 0.2925478010034226,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 486.7001861849945,
"count": 18200,
"is_parallel": true,
"self": 235.96398038899014,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005293655999992097,
"count": 1,
"is_parallel": true,
"self": 0.0037663510000811584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001527304999910939,
"count": 10,
"is_parallel": true,
"self": 0.001527304999910939
}
}
},
"UnityEnvironment.step": {
"total": 0.05249473099996749,
"count": 1,
"is_parallel": true,
"self": 0.0007473569999660867,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032465200001752237,
"count": 1,
"is_parallel": true,
"self": 0.00032465200001752237
},
"communicator.exchange": {
"total": 0.049316583999996055,
"count": 1,
"is_parallel": true,
"self": 0.049316583999996055
},
"steps_from_proto": {
"total": 0.0021061379999878227,
"count": 1,
"is_parallel": true,
"self": 0.0005382699999927354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015678679999950873,
"count": 10,
"is_parallel": true,
"self": 0.0015678679999950873
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 250.73620579600436,
"count": 18199,
"is_parallel": true,
"self": 10.926181783028255,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.760972756990043,
"count": 18199,
"is_parallel": true,
"self": 5.760972756990043
},
"communicator.exchange": {
"total": 197.76475009699084,
"count": 18199,
"is_parallel": true,
"self": 197.76475009699084
},
"steps_from_proto": {
"total": 36.28430115899522,
"count": 18199,
"is_parallel": true,
"self": 6.98530362900641,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.29899752998881,
"count": 181990,
"is_parallel": true,
"self": 29.29899752998881
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014949800004160352,
"count": 1,
"self": 0.00014949800004160352,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 475.91363643394726,
"count": 500474,
"is_parallel": true,
"self": 10.927658789978125,
"children": {
"process_trajectory": {
"total": 270.9346155379692,
"count": 500474,
"is_parallel": true,
"self": 270.19589846396906,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7387170740001352,
"count": 4,
"is_parallel": true,
"self": 0.7387170740001352
}
}
},
"_update_policy": {
"total": 194.05136210599994,
"count": 90,
"is_parallel": true,
"self": 62.752149075998034,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.2992130300019,
"count": 4587,
"is_parallel": true,
"self": 131.2992130300019
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09263021100002788,
"count": 1,
"self": 0.0008904759999950329,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09173973500003285,
"count": 1,
"self": 0.09173973500003285
}
}
}
}
}
}
}