Pyramids / run_logs /timers.json
NishithR's picture
First commit
a891092 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4484938085079193,
"min": 0.4484938085079193,
"max": 1.507041573524475,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13361.52734375,
"min": 13361.52734375,
"max": 45717.61328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989890.0,
"min": 29952.0,
"max": 989890.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989890.0,
"min": 29952.0,
"max": 989890.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.24449053406715393,
"min": -0.08303222805261612,
"max": 0.34761524200439453,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 62.34508514404297,
"min": -19.927734375,
"max": 90.37995910644531,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -1.879557490348816,
"min": -1.9528666734695435,
"max": 2.180330753326416,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -479.28717041015625,
"min": -509.6982116699219,
"max": 566.885986328125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0711138052494524,
"min": 0.06491783739918554,
"max": 0.07402172435731044,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9955932734923337,
"min": 0.48381534922532143,
"max": 1.0571641896830817,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.523038732927196,
"min": 0.00012919346758292347,
"max": 0.523038732927196,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 7.322542260980745,
"min": 0.0018087085461609286,
"max": 7.322542260980745,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.289090427478574e-06,
"min": 7.289090427478574e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010204726598470004,
"min": 0.00010204726598470004,
"max": 0.0032558954147015994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242966428571428,
"min": 0.10242966428571428,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340152999999998,
"min": 1.3691136000000002,
"max": 2.4431072000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002527234621428573,
"min": 0.0002527234621428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003538128470000002,
"min": 0.003538128470000002,
"max": 0.10855131016000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009125393815338612,
"min": 0.009125393815338612,
"max": 0.41734614968299866,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12775550782680511,
"min": 0.12775550782680511,
"max": 2.9214229583740234,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 571.2307692307693,
"min": 495.52542372881356,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29704.0,
"min": 15984.0,
"max": 32747.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0825345877271433,
"min": -1.0000000521540642,
"max": 1.2685573508993524,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 56.29179856181145,
"min": -32.000001668930054,
"max": 77.3819984048605,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0825345877271433,
"min": -1.0000000521540642,
"max": 1.2685573508993524,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 56.29179856181145,
"min": -32.000001668930054,
"max": 77.3819984048605,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.054566108950754054,
"min": 0.05042585650493284,
"max": 8.928653911687434,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.837437665439211,
"min": 2.837437665439211,
"max": 142.85846258699894,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1750681963",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1750684028"
},
"total": 2064.788200636,
"count": 1,
"self": 0.5462420210001255,
"children": {
"run_training.setup": {
"total": 0.023352799999997842,
"count": 1,
"self": 0.023352799999997842
},
"TrainerController.start_learning": {
"total": 2064.218605815,
"count": 1,
"self": 1.2372683429771314,
"children": {
"TrainerController._reset_env": {
"total": 3.893654857999991,
"count": 1,
"self": 3.893654857999991
},
"TrainerController.advance": {
"total": 2059.007950086023,
"count": 63415,
"self": 1.3015567399934298,
"children": {
"env_step": {
"total": 1410.5149525910017,
"count": 63415,
"self": 1266.0507895819883,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.72092754401172,
"count": 63415,
"self": 4.353773891001765,
"children": {
"TorchPolicy.evaluate": {
"total": 139.36715365300995,
"count": 62571,
"self": 139.36715365300995
}
}
},
"workers": {
"total": 0.7432354650015895,
"count": 63415,
"self": 0.0,
"children": {
"worker_root": {
"total": 2059.430335816015,
"count": 63415,
"is_parallel": true,
"self": 900.7864853950146,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006355205999966529,
"count": 1,
"is_parallel": true,
"self": 0.00448609399995803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018691120000084993,
"count": 8,
"is_parallel": true,
"self": 0.0018691120000084993
}
}
},
"UnityEnvironment.step": {
"total": 0.046859011000037754,
"count": 1,
"is_parallel": true,
"self": 0.0005552520000264849,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004497239999636804,
"count": 1,
"is_parallel": true,
"self": 0.0004497239999636804
},
"communicator.exchange": {
"total": 0.04431933000000754,
"count": 1,
"is_parallel": true,
"self": 0.04431933000000754
},
"steps_from_proto": {
"total": 0.0015347050000400486,
"count": 1,
"is_parallel": true,
"self": 0.0003312390000473897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001203465999992659,
"count": 8,
"is_parallel": true,
"self": 0.001203465999992659
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1158.6438504210005,
"count": 63414,
"is_parallel": true,
"self": 31.204137168012494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.797103267958278,
"count": 63414,
"is_parallel": true,
"self": 21.797103267958278
},
"communicator.exchange": {
"total": 1014.5155729170086,
"count": 63414,
"is_parallel": true,
"self": 1014.5155729170086
},
"steps_from_proto": {
"total": 91.12703706802108,
"count": 63414,
"is_parallel": true,
"self": 17.963086770020368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.16395029800071,
"count": 507312,
"is_parallel": true,
"self": 73.16395029800071
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 647.1914407550277,
"count": 63415,
"self": 2.358597632058718,
"children": {
"process_trajectory": {
"total": 121.70408041196868,
"count": 63415,
"self": 121.42279381996866,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28128659200001493,
"count": 2,
"self": 0.28128659200001493
}
}
},
"_update_policy": {
"total": 523.1287627110003,
"count": 443,
"self": 290.450171928984,
"children": {
"TorchPPOOptimizer.update": {
"total": 232.67859078201627,
"count": 22788,
"self": 232.67859078201627
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0579997251625173e-06,
"count": 1,
"self": 1.0579997251625173e-06
},
"TrainerController._save_models": {
"total": 0.07973147000029712,
"count": 1,
"self": 0.0012926150006933312,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07843885499960379,
"count": 1,
"self": 0.07843885499960379
}
}
}
}
}
}
}