Pyramids / run_logs /timers.json
carro's picture
hi
aa99206
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7622014880180359,
"min": 0.7622014880180359,
"max": 1.4947810173034668,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 22866.044921875,
"min": 22866.044921875,
"max": 45345.67578125,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479958.0,
"min": 29952.0,
"max": 479958.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479958.0,
"min": 29952.0,
"max": 479958.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.11931455135345459,
"min": -0.10810833424329758,
"max": 0.11931455135345459,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 29.470693588256836,
"min": -26.054107666015625,
"max": 29.470693588256836,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02010197564959526,
"min": 0.016924135386943817,
"max": 0.33674222230911255,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.965188026428223,
"min": 4.163337230682373,
"max": 79.80790710449219,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06957831286629475,
"min": 0.06743640038898559,
"max": 0.07298853299369869,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9740963801281265,
"min": 0.49715757342028677,
"max": 1.0224803745091493,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00888321748767201,
"min": 0.00041434131570072444,
"max": 0.00888321748767201,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.12436504482740814,
"min": 0.004143413157007244,
"max": 0.12436504482740814,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.1146407236942858e-05,
"min": 2.1146407236942858e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002960497013172,
"min": 0.0002960497013172,
"max": 0.0028181530606158005,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10704877142857143,
"min": 0.10704877142857143,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4986828,
"min": 1.3773696000000002,
"max": 2.3393842,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007141722657142861,
"min": 0.0007141722657142861,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009998411720000005,
"min": 0.009998411720000005,
"max": 0.09398448157999999,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01726623997092247,
"min": 0.01726623997092247,
"max": 0.4138633608818054,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.24172736704349518,
"min": 0.24172736704349518,
"max": 2.897043466567993,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 684.95,
"min": 684.95,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27398.0,
"min": 15984.0,
"max": 32856.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.5148349590599537,
"min": -1.0000000521540642,
"max": 0.5148349590599537,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 20.593398362398148,
"min": -31.998801663517952,
"max": 20.593398362398148,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.5148349590599537,
"min": -1.0000000521540642,
"max": 0.5148349590599537,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 20.593398362398148,
"min": -31.998801663517952,
"max": 20.593398362398148,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.12102305980224629,
"min": 0.12102305980224629,
"max": 8.450660973787308,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.840922392089851,
"min": 4.840922392089851,
"max": 135.21057558059692,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679810446",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679811465"
},
"total": 1019.049810386,
"count": 1,
"self": 0.4790944699998363,
"children": {
"run_training.setup": {
"total": 0.11295210300022518,
"count": 1,
"self": 0.11295210300022518
},
"TrainerController.start_learning": {
"total": 1018.4577638129999,
"count": 1,
"self": 0.6879317299494687,
"children": {
"TrainerController._reset_env": {
"total": 6.233225589000085,
"count": 1,
"self": 6.233225589000085
},
"TrainerController.advance": {
"total": 1011.4427589380507,
"count": 31617,
"self": 0.7098029719404622,
"children": {
"env_step": {
"total": 692.4130513320588,
"count": 31617,
"self": 638.2096046250845,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.80304936900984,
"count": 31617,
"self": 2.3255115189690514,
"children": {
"TorchPolicy.evaluate": {
"total": 51.47753785004079,
"count": 31301,
"self": 51.47753785004079
}
}
},
"workers": {
"total": 0.40039733796447763,
"count": 31617,
"self": 0.0,
"children": {
"worker_root": {
"total": 1016.3022656439971,
"count": 31617,
"is_parallel": true,
"self": 433.40431974001876,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020456089996514493,
"count": 1,
"is_parallel": true,
"self": 0.0006285420008680376,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014170669987834117,
"count": 8,
"is_parallel": true,
"self": 0.0014170669987834117
}
}
},
"UnityEnvironment.step": {
"total": 0.05060372600019036,
"count": 1,
"is_parallel": true,
"self": 0.000529204000486061,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005144489996382617,
"count": 1,
"is_parallel": true,
"self": 0.0005144489996382617
},
"communicator.exchange": {
"total": 0.048002083000028506,
"count": 1,
"is_parallel": true,
"self": 0.048002083000028506
},
"steps_from_proto": {
"total": 0.0015579900000375346,
"count": 1,
"is_parallel": true,
"self": 0.00035045100003117113,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012075390000063635,
"count": 8,
"is_parallel": true,
"self": 0.0012075390000063635
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 582.8979459039783,
"count": 31616,
"is_parallel": true,
"self": 15.498552362925693,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.705513485012489,
"count": 31616,
"is_parallel": true,
"self": 11.705513485012489
},
"communicator.exchange": {
"total": 510.2215349060202,
"count": 31616,
"is_parallel": true,
"self": 510.2215349060202
},
"steps_from_proto": {
"total": 45.47234515001992,
"count": 31616,
"is_parallel": true,
"self": 9.530273194005531,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.94207195601439,
"count": 252928,
"is_parallel": true,
"self": 35.94207195601439
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 318.31990463405145,
"count": 31617,
"self": 1.1105961770404065,
"children": {
"process_trajectory": {
"total": 58.72250292601257,
"count": 31617,
"self": 58.61604151901247,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1064614070000971,
"count": 1,
"self": 0.1064614070000971
}
}
},
"_update_policy": {
"total": 258.4868055309985,
"count": 209,
"self": 163.51999171598482,
"children": {
"TorchPPOOptimizer.update": {
"total": 94.96681381501367,
"count": 11403,
"self": 94.96681381501367
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.440000212634914e-07,
"count": 1,
"self": 9.440000212634914e-07
},
"TrainerController._save_models": {
"total": 0.09384661199965194,
"count": 1,
"self": 0.0014019869995536283,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09244462500009831,
"count": 1,
"self": 0.09244462500009831
}
}
}
}
}
}
}