ppo-Pyramids / run_logs /timers.json
OlejnikM's picture
First Push
83006e4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8152309060096741,
"min": 0.8152309060096741,
"max": 1.437196135520935,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 24417.796875,
"min": 24417.796875,
"max": 43598.78125,
"count": 10
},
"Pyramids.Step.mean": {
"value": 299972.0,
"min": 29952.0,
"max": 299972.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 299972.0,
"min": 29952.0,
"max": 299972.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09126388281583786,
"min": -0.1276295930147171,
"max": -0.02452116273343563,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -21.903331756591797,
"min": -30.631103515625,
"max": -5.909600257873535,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027795342728495598,
"min": 0.027795342728495598,
"max": 0.2621821165084839,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.670882225036621,
"min": 6.670882225036621,
"max": 62.923709869384766,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06778884891422886,
"min": 0.0669969811358289,
"max": 0.07124440591736071,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.949043884799204,
"min": 0.498710841421525,
"max": 0.972430426251901,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0011166394092201866,
"min": 0.0002213806678322293,
"max": 0.006897937184804729,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.015632951729082613,
"min": 0.0024351873461545223,
"max": 0.0482855602936331,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5235594921499998e-05,
"min": 1.5235594921499998e-05,
"max": 0.0002838354339596191,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00021329832890099997,
"min": 0.00021329832890099997,
"max": 0.002246072251309333,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1050785,
"min": 0.1050785,
"max": 0.19461180952380958,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4710990000000002,
"min": 1.362282666666667,
"max": 2.030262666666667,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005173421500000001,
"min": 0.0005173421500000001,
"max": 0.00946171977142857,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.007242790100000001,
"min": 0.007242790100000001,
"max": 0.0748941976,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02649138681590557,
"min": 0.02649138681590557,
"max": 0.4339198172092438,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3708794116973877,
"min": 0.3708794116973877,
"max": 3.0374386310577393,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 968.6060606060606,
"min": 935.15625,
"max": 999.0,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31964.0,
"min": 15984.0,
"max": 32710.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8432813030667603,
"min": -1.0000000521540642,
"max": -0.575014867440418,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.98500169813633,
"min": -30.999201610684395,
"max": -15.525401420891285,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8432813030667603,
"min": -1.0000000521540642,
"max": -0.575014867440418,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.98500169813633,
"min": -30.999201610684395,
"max": -15.525401420891285,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.2738676644221414,
"min": 0.2738676644221414,
"max": 9.160875851288438,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 8.763765261508524,
"min": 8.28425006614998,
"max": 146.574013620615,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1774977141",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/envs/mlagagents_env/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1774977748"
},
"total": 606.9129199470003,
"count": 1,
"self": 0.8038385650006603,
"children": {
"run_training.setup": {
"total": 0.026060799999868323,
"count": 1,
"self": 0.026060799999868323
},
"TrainerController.start_learning": {
"total": 606.0830205819998,
"count": 1,
"self": 0.33793641804732033,
"children": {
"TrainerController._reset_env": {
"total": 2.4863899209999545,
"count": 1,
"self": 2.4863899209999545
},
"TrainerController.advance": {
"total": 603.1090291959526,
"count": 18912,
"self": 0.35363654894854335,
"children": {
"env_step": {
"total": 412.9961225230031,
"count": 18912,
"self": 368.33733089498264,
"children": {
"SubprocessEnvManager._take_step": {
"total": 44.44815019101725,
"count": 18912,
"self": 1.29031875804003,
"children": {
"TorchPolicy.evaluate": {
"total": 43.15783143297722,
"count": 18821,
"self": 43.15783143297722
}
}
},
"workers": {
"total": 0.21064143700323257,
"count": 18912,
"self": 0.0,
"children": {
"worker_root": {
"total": 603.6563013390055,
"count": 18912,
"is_parallel": true,
"self": 269.08942739098984,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017151470001408597,
"count": 1,
"is_parallel": true,
"self": 0.0005347710002752137,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001180375999865646,
"count": 8,
"is_parallel": true,
"self": 0.001180375999865646
}
}
},
"UnityEnvironment.step": {
"total": 0.04720998700031487,
"count": 1,
"is_parallel": true,
"self": 0.000542467000286706,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047370199990837136,
"count": 1,
"is_parallel": true,
"self": 0.00047370199990837136
},
"communicator.exchange": {
"total": 0.04457522599977892,
"count": 1,
"is_parallel": true,
"self": 0.04457522599977892
},
"steps_from_proto": {
"total": 0.0016185920003408683,
"count": 1,
"is_parallel": true,
"self": 0.0003913620007551799,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012272299995856883,
"count": 8,
"is_parallel": true,
"self": 0.0012272299995856883
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 334.56687394801565,
"count": 18911,
"is_parallel": true,
"self": 9.939385102084998,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.759905776940286,
"count": 18911,
"is_parallel": true,
"self": 6.759905776940286
},
"communicator.exchange": {
"total": 286.37858040300216,
"count": 18911,
"is_parallel": true,
"self": 286.37858040300216
},
"steps_from_proto": {
"total": 31.48900266598821,
"count": 18911,
"is_parallel": true,
"self": 6.601600551946831,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.88740211404138,
"count": 151288,
"is_parallel": true,
"self": 24.88740211404138
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 189.75927012400098,
"count": 18912,
"self": 0.5216219530357193,
"children": {
"process_trajectory": {
"total": 34.719873870965785,
"count": 18912,
"self": 34.719873870965785
},
"_update_policy": {
"total": 154.51777429999947,
"count": 116,
"self": 85.48118848700506,
"children": {
"TorchPPOOptimizer.update": {
"total": 69.03658581299442,
"count": 6864,
"self": 69.03658581299442
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0190001376031432e-06,
"count": 1,
"self": 1.0190001376031432e-06
},
"TrainerController._save_models": {
"total": 0.14966402799973366,
"count": 1,
"self": 0.0012621479995686968,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14840188000016497,
"count": 1,
"self": 0.14840188000016497
}
}
}
}
}
}
}