rna-Pyramids / run_logs /timers.json
cgr28's picture
First Push
5064993
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37851184606552124,
"min": 0.3752221465110779,
"max": 1.5031319856643677,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11379.580078125,
"min": 11124.5859375,
"max": 45599.01171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5378025770187378,
"min": -0.2273787558078766,
"max": 0.5378025770187378,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 147.35791015625,
"min": -53.888763427734375,
"max": 147.35791015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007671942003071308,
"min": -0.04471804201602936,
"max": 0.3118646442890167,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.102112054824829,
"min": -12.163307189941406,
"max": 73.91191864013672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06823058169434912,
"min": 0.06386201131923953,
"max": 0.07402533408533975,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9552281437208877,
"min": 0.4955372136406033,
"max": 1.0507477650392627,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015349060517716376,
"min": 0.0001241916693842741,
"max": 0.01604114818716577,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21488684724802926,
"min": 0.0016144917019955633,
"max": 0.22457607462032078,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.626911743442854e-06,
"min": 7.626911743442854e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010677676440819996,
"min": 0.00010677676440819996,
"max": 0.003375774774741799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254227142857142,
"min": 0.10254227142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355917999999999,
"min": 1.3886848,
"max": 2.4252582,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002639729157142856,
"min": 0.0002639729157142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036956208199999986,
"min": 0.0036956208199999986,
"max": 0.11254329418,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007933268323540688,
"min": 0.007933268323540688,
"max": 0.34015053510665894,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11106576025485992,
"min": 0.11106576025485992,
"max": 2.3810536861419678,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 353.35714285714283,
"min": 350.94117647058823,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29682.0,
"min": 15984.0,
"max": 32339.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5990142612939788,
"min": -1.0000000521540642,
"max": 1.5990142612939788,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.31719794869423,
"min": -31.998801693320274,
"max": 134.31719794869423,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5990142612939788,
"min": -1.0000000521540642,
"max": 1.5990142612939788,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.31719794869423,
"min": -31.998801693320274,
"max": 134.31719794869423,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028750281483336863,
"min": 0.028750281483336863,
"max": 7.313518545590341,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4150236446002964,
"min": 2.4150236446002964,
"max": 117.01629672944546,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689822176",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689824363"
},
"total": 2186.711004625,
"count": 1,
"self": 0.48269131200004267,
"children": {
"run_training.setup": {
"total": 0.0475750670002526,
"count": 1,
"self": 0.0475750670002526
},
"TrainerController.start_learning": {
"total": 2186.1807382459997,
"count": 1,
"self": 1.342350037979486,
"children": {
"TrainerController._reset_env": {
"total": 5.1781724290003694,
"count": 1,
"self": 5.1781724290003694
},
"TrainerController.advance": {
"total": 2179.564462146019,
"count": 63625,
"self": 1.3837121629612739,
"children": {
"env_step": {
"total": 1504.620007562948,
"count": 63625,
"self": 1396.8218963300756,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.00243876199784,
"count": 63625,
"self": 4.71773182105062,
"children": {
"TorchPolicy.evaluate": {
"total": 102.28470694094722,
"count": 62562,
"self": 102.28470694094722
}
}
},
"workers": {
"total": 0.795672470874706,
"count": 63625,
"self": 0.0,
"children": {
"worker_root": {
"total": 2181.3918014019696,
"count": 63625,
"is_parallel": true,
"self": 895.5113775699751,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025771829996301676,
"count": 1,
"is_parallel": true,
"self": 0.0007210119993033004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018561710003268672,
"count": 8,
"is_parallel": true,
"self": 0.0018561710003268672
}
}
},
"UnityEnvironment.step": {
"total": 0.0768943240000226,
"count": 1,
"is_parallel": true,
"self": 0.0007364729999608244,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005928890000177489,
"count": 1,
"is_parallel": true,
"self": 0.0005928890000177489
},
"communicator.exchange": {
"total": 0.07347017800020694,
"count": 1,
"is_parallel": true,
"self": 0.07347017800020694
},
"steps_from_proto": {
"total": 0.002094783999837091,
"count": 1,
"is_parallel": true,
"self": 0.0004342669994912285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016605170003458625,
"count": 8,
"is_parallel": true,
"self": 0.0016605170003458625
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1285.8804238319944,
"count": 63624,
"is_parallel": true,
"self": 34.021857145067315,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.41696516900629,
"count": 63624,
"is_parallel": true,
"self": 23.41696516900629
},
"communicator.exchange": {
"total": 1128.2645958019393,
"count": 63624,
"is_parallel": true,
"self": 1128.2645958019393
},
"steps_from_proto": {
"total": 100.17700571598152,
"count": 63624,
"is_parallel": true,
"self": 19.82227824415895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.35472747182257,
"count": 508992,
"is_parallel": true,
"self": 80.35472747182257
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 673.5607424201098,
"count": 63625,
"self": 2.308553580149237,
"children": {
"process_trajectory": {
"total": 109.8469799499594,
"count": 63625,
"self": 109.64408902696005,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20289092299935874,
"count": 2,
"self": 0.20289092299935874
}
}
},
"_update_policy": {
"total": 561.4052088900012,
"count": 443,
"self": 365.2058633520478,
"children": {
"TorchPPOOptimizer.update": {
"total": 196.19934553795338,
"count": 22827,
"self": 196.19934553795338
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.820006769383326e-07,
"count": 1,
"self": 8.820006769383326e-07
},
"TrainerController._save_models": {
"total": 0.09575275099996361,
"count": 1,
"self": 0.0012842859996453626,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09446846500031825,
"count": 1,
"self": 0.09446846500031825
}
}
}
}
}
}
}