ppo-PyramidsRND / run_logs /timers.json
Dsfajardob's picture
First Push
1294299
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4416777491569519,
"min": 0.4416777491569519,
"max": 1.479617714881897,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13335.134765625,
"min": 13335.134765625,
"max": 44885.68359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989960.0,
"min": 29952.0,
"max": 989960.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989960.0,
"min": 29952.0,
"max": 989960.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5572459101676941,
"min": -0.11181166023015976,
"max": 0.5572459101676941,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 156.0288543701172,
"min": -27.058422088623047,
"max": 156.0288543701172,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005018020514398813,
"min": -0.010089101269841194,
"max": 0.21104928851127625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.405045747756958,
"min": -2.713968276977539,
"max": 50.65182876586914,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06539313033313697,
"min": 0.06436432344378859,
"max": 0.07502054994482696,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9808969549970546,
"min": 0.47931415063568894,
"max": 1.0580783874651316,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016923402143422413,
"min": 0.0009923664614992305,
"max": 0.016923402143422413,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2538510321513362,
"min": 0.010916031076491535,
"max": 0.2538510321513362,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4951975016333324e-06,
"min": 7.4951975016333324e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011242796252449999,
"min": 0.00011242796252449999,
"max": 0.0032566961144346995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249836666666669,
"min": 0.10249836666666669,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374755000000002,
"min": 1.3886848,
"max": 2.5275387,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002595868299999999,
"min": 0.0002595868299999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038938024499999987,
"min": 0.0038938024499999987,
"max": 0.10857797347,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0073545570485293865,
"min": 0.006939154118299484,
"max": 0.37552642822265625,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11031835526227951,
"min": 0.09714815765619278,
"max": 2.6286849975585938,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 318.3010752688172,
"min": 318.3010752688172,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29602.0,
"min": 15984.0,
"max": 33833.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6171612709920893,
"min": -1.0000000521540642,
"max": 1.6171612709920893,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 150.3959982022643,
"min": -31.999601677060127,
"max": 150.3959982022643,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6171612709920893,
"min": -1.0000000521540642,
"max": 1.6171612709920893,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 150.3959982022643,
"min": -31.999601677060127,
"max": 150.3959982022643,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.024508718781421083,
"min": 0.024508718781421083,
"max": 7.8514409037306905,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.2793108466721606,
"min": 2.108992171521095,
"max": 125.62305445969105,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682443652",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682446337"
},
"total": 2685.616686757,
"count": 1,
"self": 0.43924392299959436,
"children": {
"run_training.setup": {
"total": 0.09988479700041353,
"count": 1,
"self": 0.09988479700041353
},
"TrainerController.start_learning": {
"total": 2685.077558037,
"count": 1,
"self": 1.92493240105523,
"children": {
"TrainerController._reset_env": {
"total": 0.7986447540006338,
"count": 1,
"self": 0.7986447540006338
},
"TrainerController.advance": {
"total": 2682.276490483944,
"count": 63993,
"self": 1.7712543108300451,
"children": {
"env_step": {
"total": 1551.0266571381062,
"count": 63993,
"self": 1420.490764406848,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.29176634126088,
"count": 63993,
"self": 4.558439135093067,
"children": {
"TorchPolicy.evaluate": {
"total": 124.73332720616781,
"count": 62572,
"self": 124.73332720616781
}
}
},
"workers": {
"total": 1.244126389997291,
"count": 63993,
"self": 0.0,
"children": {
"worker_root": {
"total": 2679.9082534698955,
"count": 63993,
"is_parallel": true,
"self": 1386.8890398986587,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003500740000163205,
"count": 1,
"is_parallel": true,
"self": 0.0008780100006333669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002622729999529838,
"count": 8,
"is_parallel": true,
"self": 0.002622729999529838
}
}
},
"UnityEnvironment.step": {
"total": 0.0600206689996412,
"count": 1,
"is_parallel": true,
"self": 0.0006200999987413525,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005974599998808117,
"count": 1,
"is_parallel": true,
"self": 0.0005974599998808117
},
"communicator.exchange": {
"total": 0.056597319000502466,
"count": 1,
"is_parallel": true,
"self": 0.056597319000502466
},
"steps_from_proto": {
"total": 0.002205790000516572,
"count": 1,
"is_parallel": true,
"self": 0.0005607299999610404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016450600005555316,
"count": 8,
"is_parallel": true,
"self": 0.0016450600005555316
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1293.0192135712368,
"count": 63992,
"is_parallel": true,
"self": 33.75553056911576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.99033287214843,
"count": 63992,
"is_parallel": true,
"self": 18.99033287214843
},
"communicator.exchange": {
"total": 1144.8941080941486,
"count": 63992,
"is_parallel": true,
"self": 1144.8941080941486
},
"steps_from_proto": {
"total": 95.37924203582406,
"count": 63992,
"is_parallel": true,
"self": 21.21593925208981,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.16330278373425,
"count": 511936,
"is_parallel": true,
"self": 74.16330278373425
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1129.478579035008,
"count": 63993,
"self": 3.9275680691807793,
"children": {
"process_trajectory": {
"total": 125.2479259508309,
"count": 63993,
"self": 125.02865221383217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21927373699872987,
"count": 2,
"self": 0.21927373699872987
}
}
},
"_update_policy": {
"total": 1000.3030850149962,
"count": 445,
"self": 289.39026563901643,
"children": {
"TorchPPOOptimizer.update": {
"total": 710.9128193759798,
"count": 22815,
"self": 710.9128193759798
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.799997885944322e-07,
"count": 1,
"self": 8.799997885944322e-07
},
"TrainerController._save_models": {
"total": 0.07748951800022041,
"count": 1,
"self": 0.0015975599999364931,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07589195800028392,
"count": 1,
"self": 0.07589195800028392
}
}
}
}
}
}
}