ppo-Pyramids / run_logs /timers.json
omerfarukonge's picture
First Push
1d81746 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5698174238204956,
"min": 0.5698174238204956,
"max": 1.4425628185272217,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17149.224609375,
"min": 17149.224609375,
"max": 43761.5859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989930.0,
"min": 29999.0,
"max": 989930.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989930.0,
"min": 29999.0,
"max": 989930.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.20035213232040405,
"min": -0.12577135860919952,
"max": 0.25842031836509705,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 50.889442443847656,
"min": -30.310895919799805,
"max": 66.15560150146484,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.056648217141628265,
"min": -0.056648217141628265,
"max": 0.42602410912513733,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -14.388647079467773,
"min": -14.388647079467773,
"max": 101.39373779296875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06754398916936702,
"min": 0.06491427004245155,
"max": 0.07267964011338161,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9456158483711383,
"min": 0.5459778077402202,
"max": 1.0439964044809469,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010748632971669957,
"min": 0.0007395420238763296,
"max": 0.012239639442885251,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1504808616033794,
"min": 0.00827360357536779,
"max": 0.1713549522003935,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.58381890066429e-06,
"min": 7.58381890066429e-06,
"max": 0.00029500575166475,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010617346460930006,
"min": 0.00010617346460930006,
"max": 0.003255975514674899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252790714285716,
"min": 0.10252790714285716,
"max": 0.19833524999999996,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353907000000001,
"min": 1.4353907000000001,
"max": 2.4428687,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002625379235714288,
"min": 0.0002625379235714288,
"max": 0.009833691475,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036755309300000026,
"min": 0.0036755309300000026,
"max": 0.10855397749000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014334074221551418,
"min": 0.01417706348001957,
"max": 0.5550456047058105,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2006770372390747,
"min": 0.19847889244556427,
"max": 4.440364837646484,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 592.7924528301887,
"min": 572.0,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31418.0,
"min": 16158.0,
"max": 34263.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7655093976349201,
"min": -1.0000000521540642,
"max": 1.0866851563806887,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 40.571998074650764,
"min": -32.000001668930054,
"max": 58.68099844455719,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7655093976349201,
"min": -1.0000000521540642,
"max": 1.0866851563806887,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 40.571998074650764,
"min": -32.000001668930054,
"max": 58.68099844455719,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08756237174452937,
"min": 0.08454849987282176,
"max": 11.402140800567235,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.640805702460057,
"min": 4.3793974336003885,
"max": 193.83639360964298,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742468233",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742470331"
},
"total": 2097.316742059,
"count": 1,
"self": 0.4756840129994089,
"children": {
"run_training.setup": {
"total": 0.019198450999965644,
"count": 1,
"self": 0.019198450999965644
},
"TrainerController.start_learning": {
"total": 2096.821859595,
"count": 1,
"self": 1.2852677898504226,
"children": {
"TrainerController._reset_env": {
"total": 2.110498382999822,
"count": 1,
"self": 2.110498382999822
},
"TrainerController.advance": {
"total": 2093.33040271915,
"count": 63329,
"self": 1.35192169218044,
"children": {
"env_step": {
"total": 1428.9610587799461,
"count": 63329,
"self": 1280.7124160308645,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.52215137498683,
"count": 63329,
"self": 4.530815396937442,
"children": {
"TorchPolicy.evaluate": {
"total": 142.9913359780494,
"count": 62570,
"self": 142.9913359780494
}
}
},
"workers": {
"total": 0.7264913740948487,
"count": 63329,
"self": 0.0,
"children": {
"worker_root": {
"total": 2092.4390858220177,
"count": 63329,
"is_parallel": true,
"self": 919.2057557180372,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020065579999481997,
"count": 1,
"is_parallel": true,
"self": 0.0007123699997464428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001294188000201757,
"count": 8,
"is_parallel": true,
"self": 0.001294188000201757
}
}
},
"UnityEnvironment.step": {
"total": 0.053939465999974345,
"count": 1,
"is_parallel": true,
"self": 0.0005187389999719016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004675499999393651,
"count": 1,
"is_parallel": true,
"self": 0.0004675499999393651
},
"communicator.exchange": {
"total": 0.05125669399990329,
"count": 1,
"is_parallel": true,
"self": 0.05125669399990329
},
"steps_from_proto": {
"total": 0.0016964830001597875,
"count": 1,
"is_parallel": true,
"self": 0.0003780819999974483,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013184010001623392,
"count": 8,
"is_parallel": true,
"self": 0.0013184010001623392
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1173.2333301039805,
"count": 63328,
"is_parallel": true,
"self": 31.26716715395878,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.82900604507313,
"count": 63328,
"is_parallel": true,
"self": 22.82900604507313
},
"communicator.exchange": {
"total": 1024.625767373964,
"count": 63328,
"is_parallel": true,
"self": 1024.625767373964
},
"steps_from_proto": {
"total": 94.51138953098462,
"count": 63328,
"is_parallel": true,
"self": 18.93692297001826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.57446656096636,
"count": 506624,
"is_parallel": true,
"self": 75.57446656096636
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 663.017422247023,
"count": 63329,
"self": 2.409980374038696,
"children": {
"process_trajectory": {
"total": 124.5537598599849,
"count": 63329,
"self": 124.34986303598498,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20389682399991216,
"count": 2,
"self": 0.20389682399991216
}
}
},
"_update_policy": {
"total": 536.0536820129994,
"count": 449,
"self": 294.9333767510291,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.12030526197032,
"count": 22755,
"self": 241.12030526197032
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.989998943638057e-07,
"count": 1,
"self": 9.989998943638057e-07
},
"TrainerController._save_models": {
"total": 0.09568970400005128,
"count": 1,
"self": 0.0015359399999397283,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09415376400011155,
"count": 1,
"self": 0.09415376400011155
}
}
}
}
}
}
}