fedorl's picture
First commit
c333b56 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.24668242037296295,
"min": 0.24668242037296295,
"max": 1.2579538822174072,
"count": 20
},
"Pyramids.Policy.Entropy.sum": {
"value": 12409.1123046875,
"min": 12409.1123046875,
"max": 64346.859375,
"count": 20
},
"Pyramids.Step.mean": {
"value": 999916.0,
"min": 49957.0,
"max": 999916.0,
"count": 20
},
"Pyramids.Step.sum": {
"value": 999916.0,
"min": 49957.0,
"max": 999916.0,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7086575031280518,
"min": -0.07414591312408447,
"max": 0.7315652370452881,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 340.8642578125,
"min": -29.880802154541016,
"max": 355.54071044921875,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.010692160576581955,
"min": -0.010692160576581955,
"max": 0.34117385745048523,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -5.1429290771484375,
"min": -5.1429290771484375,
"max": 136.46954345703125,
"count": 20
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0679390475152639,
"min": 0.06626810547153371,
"max": 0.07353501287139605,
"count": 20
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.6305371403663336,
"min": 1.0791275430993996,
"max": 1.7622327263627942,
"count": 20
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015101047742753662,
"min": 0.0027364381209231793,
"max": 0.016077966317122144,
"count": 20
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.36242514582608787,
"min": 0.054728762418463583,
"max": 0.3858711916109314,
"count": 20
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.604409965229165e-06,
"min": 7.604409965229165e-06,
"max": 0.0002918289827236733,
"count": 20
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00018250583916549996,
"min": 0.00018250583916549996,
"max": 0.0057718694760436,
"count": 20
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253477083333334,
"min": 0.10253477083333334,
"max": 0.19727632666666664,
"count": 20
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4608345000000003,
"min": 2.4608345000000003,
"max": 4.2617467,
"count": 20
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026322360625,
"min": 0.00026322360625,
"max": 0.009727905034000001,
"count": 20
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00631736655,
"min": 0.00631736655,
"max": 0.19242324435999997,
"count": 20
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015212714672088623,
"min": 0.015212714672088623,
"max": 0.4177984595298767,
"count": 20
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.36510515213012695,
"min": 0.36510515213012695,
"max": 6.266976833343506,
"count": 20
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 271.5614973262032,
"min": 258.11052631578946,
"max": 975.7708333333334,
"count": 20
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 50782.0,
"min": 46837.0,
"max": 51998.0,
"count": 20
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6856416961486964,
"min": -0.8933833842165768,
"max": 1.7318586776761904,
"count": 20
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 315.21499717980623,
"min": -42.88240244239569,
"max": 328.9583965986967,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6856416961486964,
"min": -0.8933833842165768,
"max": 1.7318586776761904,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 315.21499717980623,
"min": -42.88240244239569,
"max": 328.9583965986967,
"count": 20
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04275094057621003,
"min": 0.042744482944008746,
"max": 5.815797447382162,
"count": 20
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.994425887751277,
"min": 7.994425887751277,
"max": 279.1582774743438,
"count": 20
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743801802",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743804093"
},
"total": 2290.688320848,
"count": 1,
"self": 0.5261833869994916,
"children": {
"run_training.setup": {
"total": 0.019846672000085164,
"count": 1,
"self": 0.019846672000085164
},
"TrainerController.start_learning": {
"total": 2290.142290789,
"count": 1,
"self": 1.3949358769400533,
"children": {
"TrainerController._reset_env": {
"total": 2.3629358149999007,
"count": 1,
"self": 2.3629358149999007
},
"TrainerController.advance": {
"total": 2286.29330108006,
"count": 64407,
"self": 1.405489437045162,
"children": {
"env_step": {
"total": 1607.2559343209098,
"count": 64407,
"self": 1455.1704075719522,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.29976198798272,
"count": 64407,
"self": 4.5462923869235965,
"children": {
"TorchPolicy.evaluate": {
"total": 146.75346960105912,
"count": 62567,
"self": 146.75346960105912
}
}
},
"workers": {
"total": 0.785764760974871,
"count": 64407,
"self": 0.0,
"children": {
"worker_root": {
"total": 2285.0306836100135,
"count": 64407,
"is_parallel": true,
"self": 942.1802628700536,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002022891000024174,
"count": 1,
"is_parallel": true,
"self": 0.0006682800001271971,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013546109998969769,
"count": 8,
"is_parallel": true,
"self": 0.0013546109998969769
}
}
},
"UnityEnvironment.step": {
"total": 0.075030833000028,
"count": 1,
"is_parallel": true,
"self": 0.0005218080000304326,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041435100001763203,
"count": 1,
"is_parallel": true,
"self": 0.00041435100001763203
},
"communicator.exchange": {
"total": 0.07265721299995676,
"count": 1,
"is_parallel": true,
"self": 0.07265721299995676
},
"steps_from_proto": {
"total": 0.001437461000023177,
"count": 1,
"is_parallel": true,
"self": 0.0002982659996177972,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011391950004053797,
"count": 8,
"is_parallel": true,
"self": 0.0011391950004053797
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1342.8504207399599,
"count": 64406,
"is_parallel": true,
"self": 31.338438933004227,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.595218572024578,
"count": 64406,
"is_parallel": true,
"self": 22.595218572024578
},
"communicator.exchange": {
"total": 1195.605007430976,
"count": 64406,
"is_parallel": true,
"self": 1195.605007430976
},
"steps_from_proto": {
"total": 93.31175580395507,
"count": 64406,
"is_parallel": true,
"self": 18.458678955874348,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.85307684808072,
"count": 515248,
"is_parallel": true,
"self": 74.85307684808072
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 677.6318773221049,
"count": 64407,
"self": 2.7412890771131515,
"children": {
"process_trajectory": {
"total": 127.80212548799796,
"count": 64407,
"self": 127.60675298999763,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1953724980003244,
"count": 2,
"self": 0.1953724980003244
}
}
},
"_update_policy": {
"total": 547.0884627569937,
"count": 459,
"self": 300.78365559697,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.30480716002376,
"count": 22833,
"self": 246.30480716002376
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.660002433520276e-07,
"count": 1,
"self": 9.660002433520276e-07
},
"TrainerController._save_models": {
"total": 0.09111705099985556,
"count": 1,
"self": 0.001316256999871257,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08980079399998431,
"count": 1,
"self": 0.08980079399998431
}
}
}
}
}
}
}