mfidabel's picture
First Push
3de2bb0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6634334921836853,
"min": 0.641828715801239,
"max": 2.866987705230713,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6838.00927734375,
"min": 6141.22607421875,
"max": 29360.822265625,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.34647274017334,
"min": 0.37393680214881897,
"max": 14.475143432617188,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2941.02685546875,
"min": 72.54373931884766,
"max": 2964.31884765625,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06501371963908348,
"min": 0.05886456950233254,
"max": 0.07852357224328443,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3250685981954174,
"min": 0.23545827800933017,
"max": 0.3886483893056364,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14584077248970667,
"min": 0.12276420525421261,
"max": 0.282452096249543,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7292038624485334,
"min": 0.49105682101685044,
"max": 1.3863459430342797,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.454545454545453,
"min": 3.7045454545454546,
"max": 28.863636363636363,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1565.0,
"min": 163.0,
"max": 1571.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.454545454545453,
"min": 3.7045454545454546,
"max": 28.863636363636363,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1565.0,
"min": 163.0,
"max": 1571.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684699526",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684704414"
},
"total": 4888.010331072,
"count": 1,
"self": 0.42702333599936537,
"children": {
"run_training.setup": {
"total": 0.05120498000007956,
"count": 1,
"self": 0.05120498000007956
},
"TrainerController.start_learning": {
"total": 4887.532102756,
"count": 1,
"self": 6.510532601965679,
"children": {
"TrainerController._reset_env": {
"total": 3.967202446000101,
"count": 1,
"self": 3.967202446000101
},
"TrainerController.advance": {
"total": 4876.909944452035,
"count": 181873,
"self": 3.2874289989777026,
"children": {
"env_step": {
"total": 4873.6225154530575,
"count": 181873,
"self": 3574.061502290067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1296.4293983599473,
"count": 181873,
"self": 19.95705304470539,
"children": {
"TorchPolicy.evaluate": {
"total": 1276.472345315242,
"count": 181873,
"self": 1276.472345315242
}
}
},
"workers": {
"total": 3.131614803043135,
"count": 181873,
"self": 0.0,
"children": {
"worker_root": {
"total": 4870.361253249812,
"count": 181873,
"is_parallel": true,
"self": 2225.784815118709,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00555857200004084,
"count": 1,
"is_parallel": true,
"self": 0.003972413000155939,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015861589998849013,
"count": 10,
"is_parallel": true,
"self": 0.0015861589998849013
}
}
},
"UnityEnvironment.step": {
"total": 0.03554727499999899,
"count": 1,
"is_parallel": true,
"self": 0.0006123790001311136,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003265279999595805,
"count": 1,
"is_parallel": true,
"self": 0.0003265279999595805
},
"communicator.exchange": {
"total": 0.03251848399997925,
"count": 1,
"is_parallel": true,
"self": 0.03251848399997925
},
"steps_from_proto": {
"total": 0.002089883999929043,
"count": 1,
"is_parallel": true,
"self": 0.00037770699975681055,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017121770001722325,
"count": 10,
"is_parallel": true,
"self": 0.0017121770001722325
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2644.576438131103,
"count": 181872,
"is_parallel": true,
"self": 105.50157134186975,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 55.83054491708299,
"count": 181872,
"is_parallel": true,
"self": 55.83054491708299
},
"communicator.exchange": {
"total": 2133.3856643481035,
"count": 181872,
"is_parallel": true,
"self": 2133.3856643481035
},
"steps_from_proto": {
"total": 349.8586575240464,
"count": 181872,
"is_parallel": true,
"self": 69.21503273591361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 280.6436247881328,
"count": 1818720,
"is_parallel": true,
"self": 280.6436247881328
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002896469995903317,
"count": 1,
"self": 0.0002896469995903317,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4840.939598168381,
"count": 4675398,
"is_parallel": true,
"self": 102.22214749818068,
"children": {
"process_trajectory": {
"total": 2650.67207227021,
"count": 4675398,
"is_parallel": true,
"self": 2639.8904858092096,
"children": {
"RLTrainer._checkpoint": {
"total": 10.781586461000074,
"count": 40,
"is_parallel": true,
"self": 10.781586461000074
}
}
},
"_update_policy": {
"total": 2088.0453783999906,
"count": 909,
"is_parallel": true,
"self": 774.886163004931,
"children": {
"TorchPPOOptimizer.update": {
"total": 1313.1592153950596,
"count": 46353,
"is_parallel": true,
"self": 1313.1592153950596
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14413360899925465,
"count": 1,
"self": 0.0009275879992856062,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14320602099996904,
"count": 1,
"self": 0.14320602099996904
}
}
}
}
}
}
}