lowrollr's picture
first commit
d917c5e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7305637001991272,
"min": 0.7305637001991272,
"max": 2.8621654510498047,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7505.8115234375,
"min": 7334.4609375,
"max": 29279.953125,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.513615608215332,
"min": 0.33571872115135193,
"max": 13.559456825256348,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2756.777587890625,
"min": 65.12943267822266,
"max": 2766.7548828125,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06914882831065461,
"min": 0.061512426951986866,
"max": 0.07748536229745356,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34574414155327304,
"min": 0.24604970780794747,
"max": 0.3874268114872678,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19988607197415595,
"min": 0.11649945887221096,
"max": 0.2923641713956992,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9994303598707798,
"min": 0.4659978354888438,
"max": 1.260748864681113,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333331e-06,
"min": 5.288098237333331e-06,
"max": 0.00029458800180399996,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6440491186666655e-05,
"min": 2.6440491186666655e-05,
"max": 0.0014234400255199997,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666667,
"min": 0.10176266666666667,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333333,
"min": 0.42025066666666666,
"max": 0.97448,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804000000006,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999998,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.70909090909091,
"min": 3.0681818181818183,
"max": 26.70909090909091,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1469.0,
"min": 135.0,
"max": 1469.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.70909090909091,
"min": 3.0681818181818183,
"max": 26.70909090909091,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1469.0,
"min": 135.0,
"max": 1469.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684196396",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684197074"
},
"total": 678.1408604330001,
"count": 1,
"self": 0.5489128000000392,
"children": {
"run_training.setup": {
"total": 0.045395863999942776,
"count": 1,
"self": 0.045395863999942776
},
"TrainerController.start_learning": {
"total": 677.5465517690001,
"count": 1,
"self": 0.7910375159800651,
"children": {
"TrainerController._reset_env": {
"total": 3.7693801789999952,
"count": 1,
"self": 3.7693801789999952
},
"TrainerController.advance": {
"total": 672.83785889802,
"count": 27333,
"self": 0.3846337510135527,
"children": {
"env_step": {
"total": 672.4532251470065,
"count": 27333,
"self": 489.2900723660223,
"children": {
"SubprocessEnvManager._take_step": {
"total": 182.79215565696916,
"count": 27333,
"self": 2.5480498499846362,
"children": {
"TorchPolicy.evaluate": {
"total": 180.24410580698452,
"count": 27333,
"self": 180.24410580698452
}
}
},
"workers": {
"total": 0.37099712401504803,
"count": 27333,
"self": 0.0,
"children": {
"worker_root": {
"total": 675.3592491329981,
"count": 27333,
"is_parallel": true,
"self": 323.60249233701245,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005525625000018408,
"count": 1,
"is_parallel": true,
"self": 0.004116432999808239,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014091920002101688,
"count": 10,
"is_parallel": true,
"self": 0.0014091920002101688
}
}
},
"UnityEnvironment.step": {
"total": 0.0445023659999606,
"count": 1,
"is_parallel": true,
"self": 0.0006353700000545359,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003466099999513972,
"count": 1,
"is_parallel": true,
"self": 0.0003466099999513972
},
"communicator.exchange": {
"total": 0.041487619000008635,
"count": 1,
"is_parallel": true,
"self": 0.041487619000008635
},
"steps_from_proto": {
"total": 0.002032766999946034,
"count": 1,
"is_parallel": true,
"self": 0.00039050300006238103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001642263999883653,
"count": 10,
"is_parallel": true,
"self": 0.001642263999883653
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 351.75675679598567,
"count": 27332,
"is_parallel": true,
"self": 14.064434390006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.567250330996671,
"count": 27332,
"is_parallel": true,
"self": 7.567250330996671
},
"communicator.exchange": {
"total": 283.006092146976,
"count": 27332,
"is_parallel": true,
"self": 283.006092146976
},
"steps_from_proto": {
"total": 47.11897992800698,
"count": 27332,
"is_parallel": true,
"self": 8.844523120916051,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.27445680709093,
"count": 273320,
"is_parallel": true,
"self": 38.27445680709093
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00047122599994509073,
"count": 1,
"self": 0.00047122599994509073,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 668.0261540690105,
"count": 606536,
"is_parallel": true,
"self": 13.423965355962878,
"children": {
"process_trajectory": {
"total": 360.96228784204743,
"count": 606536,
"is_parallel": true,
"self": 359.19928177104737,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7630060710000635,
"count": 6,
"is_parallel": true,
"self": 1.7630060710000635
}
}
},
"_update_policy": {
"total": 293.63990087100024,
"count": 136,
"is_parallel": true,
"self": 108.79800959500562,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.8418912759946,
"count": 6933,
"is_parallel": true,
"self": 184.8418912759946
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.147803950000025,
"count": 1,
"self": 0.0011492129999624012,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1466547370000626,
"count": 1,
"self": 0.1466547370000626
}
}
}
}
}
}
}