Pyramids / run_logs /timers.json
Brandulio's picture
First Push
cee394e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5927454829216003,
"min": 0.5927454829216003,
"max": 1.452760934829712,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17706.4921875,
"min": 17706.4921875,
"max": 44070.95703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989906.0,
"min": 29952.0,
"max": 989906.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989906.0,
"min": 29952.0,
"max": 989906.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19176076352596283,
"min": -0.09941884875297546,
"max": 0.21609137952327728,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 48.89899444580078,
"min": -23.959941864013672,
"max": 54.671119689941406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.04191944748163223,
"min": -0.1827625036239624,
"max": 0.3753773868083954,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -10.689458847045898,
"min": -46.238914489746094,
"max": 88.96443939208984,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06878598744710984,
"min": 0.06499108200831175,
"max": 0.07537872847369678,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9630038242595378,
"min": 0.4941736328336624,
"max": 1.0733031960165438,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011718092248881656,
"min": 6.754747757999555e-05,
"max": 0.016625062556465144,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1640532914843432,
"min": 0.0008781172085399422,
"max": 0.24937593834697716,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.453683229757138e-06,
"min": 7.453683229757138e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010435156521659993,
"min": 0.00010435156521659993,
"max": 0.0035073488308837992,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248452857142858,
"min": 0.10248452857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347834000000002,
"min": 1.3886848,
"max": 2.5691162,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002582044042857141,
"min": 0.0002582044042857141,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036148616599999974,
"min": 0.0036148616599999974,
"max": 0.11693470838,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010299699380993843,
"min": 0.009478597901761532,
"max": 0.3961232304573059,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1441957950592041,
"min": 0.1327003687620163,
"max": 2.772862672805786,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 571.1,
"min": 571.1,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28555.0,
"min": 15984.0,
"max": 33541.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8287239743769169,
"min": -1.0000000521540642,
"max": 0.86749410205612,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 41.436198718845844,
"min": -29.662001617252827,
"max": 44.24219920486212,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8287239743769169,
"min": -1.0000000521540642,
"max": 0.86749410205612,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 41.436198718845844,
"min": -29.662001617252827,
"max": 44.24219920486212,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06187239394610515,
"min": 0.06187239394610515,
"max": 7.601030504330993,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0936196973052574,
"min": 2.880719599343138,
"max": 121.61648806929588,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687469652",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687471717"
},
"total": 2065.069347914,
"count": 1,
"self": 0.48867584600020564,
"children": {
"run_training.setup": {
"total": 0.04091304199999968,
"count": 1,
"self": 0.04091304199999968
},
"TrainerController.start_learning": {
"total": 2064.539759026,
"count": 1,
"self": 1.2579615349945925,
"children": {
"TrainerController._reset_env": {
"total": 4.272525770000016,
"count": 1,
"self": 4.272525770000016
},
"TrainerController.advance": {
"total": 2058.915385005005,
"count": 63364,
"self": 1.2925478439665312,
"children": {
"env_step": {
"total": 1422.766875102028,
"count": 63364,
"self": 1316.1694399200198,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.85858768297976,
"count": 63364,
"self": 4.572296290018755,
"children": {
"TorchPolicy.evaluate": {
"total": 101.286291392961,
"count": 62556,
"self": 101.286291392961
}
}
},
"workers": {
"total": 0.7388474990284521,
"count": 63364,
"self": 0.0,
"children": {
"worker_root": {
"total": 2059.876323041987,
"count": 63364,
"is_parallel": true,
"self": 852.1608897919875,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006677615999990394,
"count": 1,
"is_parallel": true,
"self": 0.005307806999951481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001369809000038913,
"count": 8,
"is_parallel": true,
"self": 0.001369809000038913
}
}
},
"UnityEnvironment.step": {
"total": 0.05084357200001932,
"count": 1,
"is_parallel": true,
"self": 0.000631691000023693,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005054569999742853,
"count": 1,
"is_parallel": true,
"self": 0.0005054569999742853
},
"communicator.exchange": {
"total": 0.04784047600003305,
"count": 1,
"is_parallel": true,
"self": 0.04784047600003305
},
"steps_from_proto": {
"total": 0.0018659479999882933,
"count": 1,
"is_parallel": true,
"self": 0.0003489590001208853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001516988999867408,
"count": 8,
"is_parallel": true,
"self": 0.001516988999867408
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1207.7154332499993,
"count": 63363,
"is_parallel": true,
"self": 32.647147088046495,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.71731862400094,
"count": 63363,
"is_parallel": true,
"self": 21.71731862400094
},
"communicator.exchange": {
"total": 1056.2559336779757,
"count": 63363,
"is_parallel": true,
"self": 1056.2559336779757
},
"steps_from_proto": {
"total": 97.09503385997624,
"count": 63363,
"is_parallel": true,
"self": 18.576198878972093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.51883498100415,
"count": 506904,
"is_parallel": true,
"self": 78.51883498100415
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.8559620590106,
"count": 63364,
"self": 2.449839063023319,
"children": {
"process_trajectory": {
"total": 105.17589807798532,
"count": 63364,
"self": 104.91589518098505,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2600028970002768,
"count": 2,
"self": 0.2600028970002768
}
}
},
"_update_policy": {
"total": 527.230224918002,
"count": 449,
"self": 338.990401202005,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.23982371599703,
"count": 22833,
"self": 188.23982371599703
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3899998521083035e-06,
"count": 1,
"self": 1.3899998521083035e-06
},
"TrainerController._save_models": {
"total": 0.09388532600041799,
"count": 1,
"self": 0.001457357000617776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09242796899980021,
"count": 1,
"self": 0.09242796899980021
}
}
}
}
}
}
}