ppo-PyramidsRND / run_logs /timers.json
kasunw's picture
First Push
55bfcc7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47353658080101013,
"min": 0.47353658080101013,
"max": 1.4519610404968262,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14213.673828125,
"min": 14128.1923828125,
"max": 44046.69140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989919.0,
"min": 29897.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989919.0,
"min": 29897.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40372467041015625,
"min": -0.10758207738399506,
"max": 0.4199679493904114,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 108.19821166992188,
"min": -25.92728042602539,
"max": 112.13143920898438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -1.747574806213379,
"min": -1.747574806213379,
"max": 0.4807884693145752,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -468.35003662109375,
"min": -468.35003662109375,
"max": 113.94686889648438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06987175734466494,
"min": 0.06622611479840244,
"max": 0.07398406066797583,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9782046028253092,
"min": 0.5033006010745683,
"max": 1.0681712726974522,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.42207790994938427,
"min": 0.0003429664413930363,
"max": 0.42207790994938427,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 5.90909073929138,
"min": 0.004801530179502508,
"max": 5.90909073929138,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.554483196157139e-06,
"min": 7.554483196157139e-06,
"max": 0.0002952352730168143,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010576276474619994,
"min": 0.00010576276474619994,
"max": 0.003331755689414799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251812857142859,
"min": 0.10251812857142859,
"max": 0.19841175714285714,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352538000000001,
"min": 1.3888823,
"max": 2.5277117000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026156104428571415,
"min": 0.00026156104428571415,
"max": 0.009841334538571429,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036618546199999984,
"min": 0.0036618546199999984,
"max": 0.11106746148,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01011080015450716,
"min": 0.01011080015450716,
"max": 0.42718544602394104,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1415511965751648,
"min": 0.1415511965751648,
"max": 2.99029803276062,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 427.57142857142856,
"min": 427.57142857142856,
"max": 996.3125,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29930.0,
"min": 16696.0,
"max": 33599.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4009599696312631,
"min": -0.9347313002217561,
"max": 1.4009599696312631,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 98.06719787418842,
"min": -29.911401607096195,
"max": 98.84119851142168,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4009599696312631,
"min": -0.9347313002217561,
"max": 1.4009599696312631,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 98.06719787418842,
"min": -29.911401607096195,
"max": 98.84119851142168,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.045105562023153266,
"min": 0.045105562023153266,
"max": 9.413788940100108,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1573893416207284,
"min": 3.1573893416207284,
"max": 160.03441198170185,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683999957",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684002078"
},
"total": 2120.334343196,
"count": 1,
"self": 0.4751523140007521,
"children": {
"run_training.setup": {
"total": 0.037476475999937975,
"count": 1,
"self": 0.037476475999937975
},
"TrainerController.start_learning": {
"total": 2119.8217144059995,
"count": 1,
"self": 1.6343922130122337,
"children": {
"TrainerController._reset_env": {
"total": 3.784178335999968,
"count": 1,
"self": 3.784178335999968
},
"TrainerController.advance": {
"total": 2114.312077743987,
"count": 63492,
"self": 1.5902375030468647,
"children": {
"env_step": {
"total": 1466.6501948949804,
"count": 63492,
"self": 1347.3609882509036,
"children": {
"SubprocessEnvManager._take_step": {
"total": 118.35331251702928,
"count": 63492,
"self": 5.05964539703109,
"children": {
"TorchPolicy.evaluate": {
"total": 113.29366711999819,
"count": 62557,
"self": 113.29366711999819
}
}
},
"workers": {
"total": 0.9358941270475043,
"count": 63492,
"self": 0.0,
"children": {
"worker_root": {
"total": 2114.343356566943,
"count": 63492,
"is_parallel": true,
"self": 886.661819986944,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017414379999536322,
"count": 1,
"is_parallel": true,
"self": 0.0005408349998106132,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001200603000143019,
"count": 8,
"is_parallel": true,
"self": 0.001200603000143019
}
}
},
"UnityEnvironment.step": {
"total": 0.047490417000062735,
"count": 1,
"is_parallel": true,
"self": 0.0005182050008443184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004964109998581989,
"count": 1,
"is_parallel": true,
"self": 0.0004964109998581989
},
"communicator.exchange": {
"total": 0.04442400199968688,
"count": 1,
"is_parallel": true,
"self": 0.04442400199968688
},
"steps_from_proto": {
"total": 0.0020517989996733377,
"count": 1,
"is_parallel": true,
"self": 0.0004039679988636635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016478310008096742,
"count": 8,
"is_parallel": true,
"self": 0.0016478310008096742
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1227.681536579999,
"count": 63491,
"is_parallel": true,
"self": 32.39106075385553,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.084779710036855,
"count": 63491,
"is_parallel": true,
"self": 23.084779710036855
},
"communicator.exchange": {
"total": 1072.492355054047,
"count": 63491,
"is_parallel": true,
"self": 1072.492355054047
},
"steps_from_proto": {
"total": 99.7133410620595,
"count": 63491,
"is_parallel": true,
"self": 20.964373608922415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.74896745313708,
"count": 507928,
"is_parallel": true,
"self": 78.74896745313708
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.07164534596,
"count": 63492,
"self": 2.9929602330116722,
"children": {
"process_trajectory": {
"total": 108.67970381494433,
"count": 63492,
"self": 108.47196045994406,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2077433550002752,
"count": 2,
"self": 0.2077433550002752
}
}
},
"_update_policy": {
"total": 534.398981298004,
"count": 449,
"self": 343.03543414700516,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.36354715099878,
"count": 22791,
"self": 191.36354715099878
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.149998732027598e-07,
"count": 1,
"self": 9.149998732027598e-07
},
"TrainerController._save_models": {
"total": 0.09106519800025126,
"count": 1,
"self": 0.0013718460004383815,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08969335199981288,
"count": 1,
"self": 0.08969335199981288
}
}
}
}
}
}
}