PyramidsRDN / run_logs /timers.json
niratpatel's picture
First Commit
99d18d6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.738794207572937,
"min": 0.738794207572937,
"max": 1.4520542621612549,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 22199.2890625,
"min": 22199.2890625,
"max": 44049.51953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989923.0,
"min": 29913.0,
"max": 989923.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989923.0,
"min": 29913.0,
"max": 989923.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3829733729362488,
"min": -0.10160267353057861,
"max": 0.39602038264274597,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 102.6368637084961,
"min": -24.486244201660156,
"max": 104.94540405273438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.08976131677627563,
"min": -0.08976131677627563,
"max": 0.5205562710762024,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -24.056032180786133,
"min": -24.056032180786133,
"max": 123.37184143066406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06810544748911161,
"min": 0.06468911192264103,
"max": 0.07184554738201068,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0215817123366742,
"min": 0.6454602630634363,
"max": 1.0776832107301602,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014580314401312718,
"min": 0.0004623772602906033,
"max": 0.014580314401312718,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21870471601969077,
"min": 0.006010904383777843,
"max": 0.21870471601969077,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.533717488793334e-06,
"min": 7.533717488793334e-06,
"max": 0.00029478956840347777,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001130057623319,
"min": 0.0001130057623319,
"max": 0.0036090774969741992,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251120666666667,
"min": 0.10251120666666667,
"max": 0.1982631888888889,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376681,
"min": 1.4784678999999996,
"max": 2.5695717000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000260869546,
"min": 0.000260869546,
"max": 0.00982649257,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00391304319,
"min": 0.00391304319,
"max": 0.12031227742,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007368649821728468,
"min": 0.007368649821728468,
"max": 0.4125029742717743,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11052975058555603,
"min": 0.10342450439929962,
"max": 3.712526798248291,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 460.33846153846156,
"min": 460.33846153846156,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29922.0,
"min": 17607.0,
"max": 33196.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3549722760915757,
"min": -0.9999500522390008,
"max": 1.3931103209997047,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 88.07319794595242,
"min": -31.998401671648026,
"max": 88.07319794595242,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3549722760915757,
"min": -0.9999500522390008,
"max": 1.3931103209997047,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 88.07319794595242,
"min": -31.998401671648026,
"max": 88.07319794595242,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03510226650754563,
"min": 0.03510226650754563,
"max": 8.346237605644596,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.281647322990466,
"min": 2.281647322990466,
"max": 150.23227690160275,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745927096",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1745929168"
},
"total": 2071.555237027,
"count": 1,
"self": 0.810721678000391,
"children": {
"run_training.setup": {
"total": 0.020619382999939262,
"count": 1,
"self": 0.020619382999939262
},
"TrainerController.start_learning": {
"total": 2070.723895966,
"count": 1,
"self": 1.2870804299536758,
"children": {
"TrainerController._reset_env": {
"total": 3.0998348840000745,
"count": 1,
"self": 3.0998348840000745
},
"TrainerController.advance": {
"total": 2066.2112741260466,
"count": 63423,
"self": 1.3811120400041546,
"children": {
"env_step": {
"total": 1399.9945715180309,
"count": 63423,
"self": 1250.3491638200849,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.88243431298247,
"count": 63423,
"self": 4.457244530048683,
"children": {
"TorchPolicy.evaluate": {
"total": 144.42518978293378,
"count": 62551,
"self": 144.42518978293378
}
}
},
"workers": {
"total": 0.7629733849635159,
"count": 63423,
"self": 0.0,
"children": {
"worker_root": {
"total": 2065.7964937230654,
"count": 63423,
"is_parallel": true,
"self": 923.3793409991251,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002423921999934464,
"count": 1,
"is_parallel": true,
"self": 0.0007147649996568362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001709157000277628,
"count": 8,
"is_parallel": true,
"self": 0.001709157000277628
}
}
},
"UnityEnvironment.step": {
"total": 0.05063362700002472,
"count": 1,
"is_parallel": true,
"self": 0.0005287579999730951,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005012480000914366,
"count": 1,
"is_parallel": true,
"self": 0.0005012480000914366
},
"communicator.exchange": {
"total": 0.0480475800000022,
"count": 1,
"is_parallel": true,
"self": 0.0480475800000022
},
"steps_from_proto": {
"total": 0.001556040999957986,
"count": 1,
"is_parallel": true,
"self": 0.00034479300006751146,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012112479998904746,
"count": 8,
"is_parallel": true,
"self": 0.0012112479998904746
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1142.4171527239403,
"count": 63422,
"is_parallel": true,
"self": 31.18260735005515,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.463243120928382,
"count": 63422,
"is_parallel": true,
"self": 22.463243120928382
},
"communicator.exchange": {
"total": 995.7341018119748,
"count": 63422,
"is_parallel": true,
"self": 995.7341018119748
},
"steps_from_proto": {
"total": 93.0372004409819,
"count": 63422,
"is_parallel": true,
"self": 18.503335067896387,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.53386537308552,
"count": 507376,
"is_parallel": true,
"self": 74.53386537308552
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 664.8355905680116,
"count": 63423,
"self": 2.5289919580136484,
"children": {
"process_trajectory": {
"total": 123.3249099159998,
"count": 63423,
"self": 123.09534723499928,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22956268100051602,
"count": 2,
"self": 0.22956268100051602
}
}
},
"_update_policy": {
"total": 538.9816886939982,
"count": 455,
"self": 298.0867833819941,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.89490531200408,
"count": 22770,
"self": 240.89490531200408
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1889997040270828e-06,
"count": 1,
"self": 1.1889997040270828e-06
},
"TrainerController._save_models": {
"total": 0.12570533699999942,
"count": 1,
"self": 0.0015936770000735123,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1241116599999259,
"count": 1,
"self": 0.1241116599999259
}
}
}
}
}
}
}