ppo-PyramidsRND / run_logs /timers.json
codeSpaghetti's picture
First Push
baf4463
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5270171761512756,
"min": 0.5270171761512756,
"max": 1.4450328350067139,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15726.193359375,
"min": 15726.193359375,
"max": 43836.515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.13805966079235077,
"min": -0.1779879331588745,
"max": 0.16919615864753723,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 34.65297317504883,
"min": -42.18313980102539,
"max": 41.96064758300781,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.39009371399879456,
"min": 0.00314098852686584,
"max": 0.44155728816986084,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 97.91352081298828,
"min": 0.778965175151825,
"max": 104.64907836914062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07152501562959515,
"min": 0.06573262309464946,
"max": 0.07396274499480499,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0728752344439272,
"min": 0.471465181512156,
"max": 1.0728752344439272,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0392220358272122,
"min": 7.248318703928536e-05,
"max": 0.0392220358272122,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.588330537408183,
"min": 0.0009422814315107098,
"max": 0.588330537408183,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.451337516253331e-06,
"min": 7.451337516253331e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011177006274379996,
"min": 0.00011177006274379996,
"max": 0.0032529842156720002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248374666666667,
"min": 0.10248374666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372562,
"min": 1.3691136000000002,
"max": 2.3843280000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000258126292,
"min": 0.000258126292,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00387189438,
"min": 0.00387189438,
"max": 0.1084543672,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011943025514483452,
"min": 0.011943025514483452,
"max": 0.40851807594299316,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17914538085460663,
"min": 0.16891570389270782,
"max": 2.859626531600952,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 721.5555555555555,
"min": 651.8048780487804,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32470.0,
"min": 15984.0,
"max": 32673.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.43375996814833745,
"min": -1.0000000521540642,
"max": 0.7000097161749514,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 19.519198566675186,
"min": -32.000001668930054,
"max": 28.700398363173008,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.43375996814833745,
"min": -1.0000000521540642,
"max": 0.7000097161749514,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 19.519198566675186,
"min": -32.000001668930054,
"max": 28.700398363173008,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08969367239882962,
"min": 0.0855944419301869,
"max": 7.983822038397193,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.036215257947333,
"min": 3.5949665610678494,
"max": 127.74115261435509,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679238438",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679240473"
},
"total": 2035.458046636,
"count": 1,
"self": 0.4747825769998144,
"children": {
"run_training.setup": {
"total": 0.11155841200002214,
"count": 1,
"self": 0.11155841200002214
},
"TrainerController.start_learning": {
"total": 2034.8717056470002,
"count": 1,
"self": 1.2883004660079678,
"children": {
"TrainerController._reset_env": {
"total": 7.373638748000076,
"count": 1,
"self": 7.373638748000076
},
"TrainerController.advance": {
"total": 2026.1182603929924,
"count": 63144,
"self": 1.3729439880901282,
"children": {
"env_step": {
"total": 1413.1011373429446,
"count": 63144,
"self": 1306.301700953062,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.97809919294127,
"count": 63144,
"self": 4.579055563914153,
"children": {
"TorchPolicy.evaluate": {
"total": 101.39904362902712,
"count": 62547,
"self": 101.39904362902712
}
}
},
"workers": {
"total": 0.8213371969413856,
"count": 63144,
"self": 0.0,
"children": {
"worker_root": {
"total": 2030.3679327029192,
"count": 63144,
"is_parallel": true,
"self": 837.1560540369408,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021352670000851504,
"count": 1,
"is_parallel": true,
"self": 0.0008559710006466048,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012792959994385456,
"count": 8,
"is_parallel": true,
"self": 0.0012792959994385456
}
}
},
"UnityEnvironment.step": {
"total": 0.04622301899985359,
"count": 1,
"is_parallel": true,
"self": 0.0003636449998793978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004470769999898039,
"count": 1,
"is_parallel": true,
"self": 0.0004470769999898039
},
"communicator.exchange": {
"total": 0.043950329999915994,
"count": 1,
"is_parallel": true,
"self": 0.043950329999915994
},
"steps_from_proto": {
"total": 0.0014619670000683982,
"count": 1,
"is_parallel": true,
"self": 0.00034252900059073,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011194379994776682,
"count": 8,
"is_parallel": true,
"self": 0.0011194379994776682
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1193.2118786659785,
"count": 63143,
"is_parallel": true,
"self": 30.89016477000837,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.797925846957924,
"count": 63143,
"is_parallel": true,
"self": 22.797925846957924
},
"communicator.exchange": {
"total": 1049.6650756840163,
"count": 63143,
"is_parallel": true,
"self": 1049.6650756840163
},
"steps_from_proto": {
"total": 89.85871236499588,
"count": 63143,
"is_parallel": true,
"self": 19.06462322102857,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.79408914396731,
"count": 505144,
"is_parallel": true,
"self": 70.79408914396731
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 611.6441790619576,
"count": 63144,
"self": 2.37041552600067,
"children": {
"process_trajectory": {
"total": 112.59317905096009,
"count": 63144,
"self": 112.38349474695974,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20968430400034777,
"count": 2,
"self": 0.20968430400034777
}
}
},
"_update_policy": {
"total": 496.68058448499687,
"count": 434,
"self": 315.68082650801944,
"children": {
"TorchPPOOptimizer.update": {
"total": 180.99975797697743,
"count": 22812,
"self": 180.99975797697743
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.309998520417139e-07,
"count": 1,
"self": 8.309998520417139e-07
},
"TrainerController._save_models": {
"total": 0.09150520899993353,
"count": 1,
"self": 0.0014545419999194564,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09005066700001407,
"count": 1,
"self": 0.09005066700001407
}
}
}
}
}
}
}