AmirZeraati's picture
First Push
9af3bee verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.06137547269463539,
"min": 0.050522513687610626,
"max": 1.4060906171798706,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 1838.318115234375,
"min": 1493.9769287109375,
"max": 42655.1640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08396873623132706,
"min": -0.26935309171676636,
"max": -0.03601357713341713,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -20.236465454101562,
"min": -63.8366813659668,
"max": -8.679271697998047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.4639581441879272,
"min": 0.3903111219406128,
"max": 1.49992835521698,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 352.81390380859375,
"min": 92.50373840332031,
"max": 361.48272705078125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07107425820902877,
"min": 0.06510305120041714,
"max": 0.0724374112231863,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9950396149264027,
"min": 0.5070618785623041,
"max": 1.0094984276334333,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007296895039048866,
"min": 0.005370183632630408,
"max": 0.009928878643871996,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10215653054668412,
"min": 0.05564978032781182,
"max": 0.12907542237033595,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.755268843514292e-06,
"min": 7.755268843514292e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001085737638092001,
"min": 0.0001085737638092001,
"max": 0.003507260630913199,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258505714285715,
"min": 0.10258505714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361908,
"min": 1.3886848,
"max": 2.5690868000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002682472085714288,
"min": 0.0002682472085714288,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037554609200000032,
"min": 0.0037554609200000032,
"max": 0.11693177131999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 1.481525182723999,
"min": 0.5790585279464722,
"max": 1.5078221559524536,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 20.741352081298828,
"min": 4.053409576416016,
"max": 21.10951042175293,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 960.7,
"min": 954.4375,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28821.0,
"min": 15984.0,
"max": 32687.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8278400468329589,
"min": -1.0000000521540642,
"max": -0.689110394438793,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -24.835201404988766,
"min": -31.992801636457443,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8278400468329589,
"min": -1.0000000521540642,
"max": -0.689110394438793,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -24.835201404988766,
"min": -31.992801636457443,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 13.982780611515045,
"min": 8.151590535812305,
"max": 14.856133223542322,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 419.48341834545135,
"min": 131.05368986725807,
"max": 469.9335271801101,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753888422",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1.2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753890110"
},
"total": 1687.435682141,
"count": 1,
"self": 0.32523584899990965,
"children": {
"run_training.setup": {
"total": 0.030050496000058047,
"count": 1,
"self": 0.030050496000058047
},
"TrainerController.start_learning": {
"total": 1687.080395796,
"count": 1,
"self": 1.4621887789587618,
"children": {
"TrainerController._reset_env": {
"total": 2.323750393999944,
"count": 1,
"self": 2.323750393999944
},
"TrainerController.advance": {
"total": 1683.2044586260413,
"count": 63153,
"self": 1.4010175761266055,
"children": {
"env_step": {
"total": 1050.020917751944,
"count": 63153,
"self": 895.337604806866,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.79925989404046,
"count": 63153,
"self": 4.632093157054442,
"children": {
"TorchPolicy.evaluate": {
"total": 149.16716673698602,
"count": 62570,
"self": 149.16716673698602
}
}
},
"workers": {
"total": 0.8840530510374265,
"count": 63153,
"self": 0.0,
"children": {
"worker_root": {
"total": 1684.6595662649954,
"count": 63153,
"is_parallel": true,
"self": 885.3223912709818,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002013746999864452,
"count": 1,
"is_parallel": true,
"self": 0.0006444959999498678,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001369250999914584,
"count": 8,
"is_parallel": true,
"self": 0.001369250999914584
}
}
},
"UnityEnvironment.step": {
"total": 0.04483826199998475,
"count": 1,
"is_parallel": true,
"self": 0.000355892000015956,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003101449999576289,
"count": 1,
"is_parallel": true,
"self": 0.0003101449999576289
},
"communicator.exchange": {
"total": 0.04310467499999504,
"count": 1,
"is_parallel": true,
"self": 0.04310467499999504
},
"steps_from_proto": {
"total": 0.0010675500000161264,
"count": 1,
"is_parallel": true,
"self": 0.0002616599995235447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008058900004925817,
"count": 8,
"is_parallel": true,
"self": 0.0008058900004925817
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 799.3371749940136,
"count": 63152,
"is_parallel": true,
"self": 20.261309982113062,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.050121337920928,
"count": 63152,
"is_parallel": true,
"self": 14.050121337920928
},
"communicator.exchange": {
"total": 704.265375135963,
"count": 63152,
"is_parallel": true,
"self": 704.265375135963
},
"steps_from_proto": {
"total": 60.760368538016564,
"count": 63152,
"is_parallel": true,
"self": 13.176876046171401,
"children": {
"_process_rank_one_or_two_observation": {
"total": 47.58349249184516,
"count": 505216,
"is_parallel": true,
"self": 47.58349249184516
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.7825232979708,
"count": 63153,
"self": 2.6800112389055357,
"children": {
"process_trajectory": {
"total": 116.37301919906213,
"count": 63153,
"self": 116.16728786006252,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2057313389996125,
"count": 2,
"self": 0.2057313389996125
}
}
},
"_update_policy": {
"total": 512.7294928600031,
"count": 443,
"self": 283.04561531001127,
"children": {
"TorchPPOOptimizer.update": {
"total": 229.68387754999185,
"count": 22767,
"self": 229.68387754999185
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.830000635702163e-07,
"count": 1,
"self": 9.830000635702163e-07
},
"TrainerController._save_models": {
"total": 0.08999701399989135,
"count": 1,
"self": 0.0011957709998569044,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08880124300003445,
"count": 1,
"self": 0.08880124300003445
}
}
}
}
}
}
}