Kostyade's picture
First Push
47ea3c7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6080633997917175,
"min": 0.5794950723648071,
"max": 1.3749773502349854,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18261.359375,
"min": 17403.396484375,
"max": 41711.3125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29981.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29981.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5271888971328735,
"min": -0.0967899039387703,
"max": 0.5271888971328735,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 142.34100341796875,
"min": -23.326366424560547,
"max": 142.34100341796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03477299585938454,
"min": 0.0020750879775732756,
"max": 0.39330095052719116,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.38870906829834,
"min": 0.5664989948272705,
"max": 93.95552062988281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06833529743375401,
"min": 0.06421989302239114,
"max": 0.07439208805242031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.956694164072556,
"min": 0.5067249919065734,
"max": 1.0788671257323585,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013019752658440383,
"min": 9.594691497062908e-05,
"max": 0.013210731247382845,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18227653721816536,
"min": 0.001151362979647549,
"max": 0.18227653721816536,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.688883151357148e-06,
"min": 7.688883151357148e-06,
"max": 0.0002952371587304714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010764436411900006,
"min": 0.00010764436411900006,
"max": 0.0033316527894490995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256292857142858,
"min": 0.10256292857142858,
"max": 0.1984123857142857,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358810000000002,
"min": 1.3888867,
"max": 2.3590103,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026603656428571444,
"min": 0.00026603656428571444,
"max": 0.009841397332857143,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037245119000000023,
"min": 0.0037245119000000023,
"max": 0.11106403491000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00990848708897829,
"min": 0.00990848708897829,
"max": 0.5971961617469788,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13871881365776062,
"min": 0.13871881365776062,
"max": 4.180373191833496,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 382.8181818181818,
"min": 382.8181818181818,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29477.0,
"min": 16652.0,
"max": 33536.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5418640837455406,
"min": -0.9999500517733395,
"max": 1.5418640837455406,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 120.26539853215218,
"min": -31.998401656746864,
"max": 120.26539853215218,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5418640837455406,
"min": -0.9999500517733395,
"max": 1.5418640837455406,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 120.26539853215218,
"min": -31.998401656746864,
"max": 120.26539853215218,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03879514770456948,
"min": 0.03879514770456948,
"max": 11.882982588866177,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0260215209564194,
"min": 3.0260215209564194,
"max": 202.01070401072502,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1775497757",
"python_version": "3.10.12 (main, Mar 3 2026, 11:56:32) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1775500050"
},
"total": 2293.0654471510006,
"count": 1,
"self": 0.7802521339999657,
"children": {
"run_training.setup": {
"total": 0.021837672000401653,
"count": 1,
"self": 0.021837672000401653
},
"TrainerController.start_learning": {
"total": 2292.2633573450003,
"count": 1,
"self": 1.6101357810475747,
"children": {
"TrainerController._reset_env": {
"total": 2.1668558359997405,
"count": 1,
"self": 2.1668558359997405
},
"TrainerController.advance": {
"total": 2288.408423199953,
"count": 63564,
"self": 1.650957412050957,
"children": {
"env_step": {
"total": 1598.2374452760068,
"count": 63564,
"self": 1435.0295568751908,
"children": {
"SubprocessEnvManager._take_step": {
"total": 162.28292497788607,
"count": 63564,
"self": 5.043274802073029,
"children": {
"TorchPolicy.evaluate": {
"total": 157.23965017581304,
"count": 62575,
"self": 157.23965017581304
}
}
},
"workers": {
"total": 0.9249634229299772,
"count": 63564,
"self": 0.0,
"children": {
"worker_root": {
"total": 2285.914404635807,
"count": 63564,
"is_parallel": true,
"self": 976.4977078129332,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001891985999463941,
"count": 1,
"is_parallel": true,
"self": 0.0006545079995703418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012374779998935992,
"count": 8,
"is_parallel": true,
"self": 0.0012374779998935992
}
}
},
"UnityEnvironment.step": {
"total": 0.053762614000334,
"count": 1,
"is_parallel": true,
"self": 0.000595501999669068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005065980003564619,
"count": 1,
"is_parallel": true,
"self": 0.0005065980003564619
},
"communicator.exchange": {
"total": 0.05084130399973219,
"count": 1,
"is_parallel": true,
"self": 0.05084130399973219
},
"steps_from_proto": {
"total": 0.0018192100005762768,
"count": 1,
"is_parallel": true,
"self": 0.00041600600252422737,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014032039980520494,
"count": 8,
"is_parallel": true,
"self": 0.0014032039980520494
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1309.4166968228737,
"count": 63563,
"is_parallel": true,
"self": 35.55065125681085,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.18237793308799,
"count": 63563,
"is_parallel": true,
"self": 25.18237793308799
},
"communicator.exchange": {
"total": 1129.8040881090246,
"count": 63563,
"is_parallel": true,
"self": 1129.8040881090246
},
"steps_from_proto": {
"total": 118.87957952395027,
"count": 63563,
"is_parallel": true,
"self": 24.95178377085631,
"children": {
"_process_rank_one_or_two_observation": {
"total": 93.92779575309396,
"count": 508504,
"is_parallel": true,
"self": 93.92779575309396
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 688.5200205118954,
"count": 63564,
"self": 2.7500540260080015,
"children": {
"process_trajectory": {
"total": 132.56792192287776,
"count": 63564,
"self": 132.37545110787778,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19247081499997876,
"count": 2,
"self": 0.19247081499997876
}
}
},
"_update_policy": {
"total": 553.2020445630096,
"count": 441,
"self": 306.96609290804554,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.23595165496408,
"count": 22839,
"self": 246.23595165496408
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.309997039963491e-07,
"count": 1,
"self": 9.309997039963491e-07
},
"TrainerController._save_models": {
"total": 0.07794159700006276,
"count": 1,
"self": 0.0010060080003313487,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07693558899973141,
"count": 1,
"self": 0.07693558899973141
}
}
}
}
}
}
}