dhadheechi's picture
First Push
ef6242c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.382222980260849,
"min": 0.3685995936393738,
"max": 1.5163438320159912,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11552.3076171875,
"min": 11158.2470703125,
"max": 45999.8046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989925.0,
"min": 29919.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989925.0,
"min": 29919.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6070719361305237,
"min": -0.10329162329435349,
"max": 0.651115894317627,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 169.37307739257812,
"min": -24.893281936645508,
"max": 186.21914672851562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018805991858243942,
"min": -0.01231059618294239,
"max": 0.2700022757053375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.2468719482421875,
"min": -3.5208306312561035,
"max": 65.07054901123047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06535971821202631,
"min": 0.06503575617140098,
"max": 0.07408791914239364,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9150360549683683,
"min": 0.48723406551757714,
"max": 1.037230867993511,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01479435163006807,
"min": 0.0005748000768456695,
"max": 0.015708520390381595,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20712092282095299,
"min": 0.0068976009221480335,
"max": 0.22244536131220943,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.447168946214288e-06,
"min": 7.447168946214288e-06,
"max": 0.00029523433015998574,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010426036524700002,
"min": 0.00010426036524700002,
"max": 0.0036332527889158,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248235714285715,
"min": 0.10248235714285715,
"max": 0.19841144285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347530000000002,
"min": 1.3888801,
"max": 2.6110841999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025798747857142863,
"min": 0.00025798747857142863,
"max": 0.009841303141428571,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003611824700000001,
"min": 0.003611824700000001,
"max": 0.12112731158,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012114865705370903,
"min": 0.012114865705370903,
"max": 0.46486884355545044,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16960811614990234,
"min": 0.16960811614990234,
"max": 3.254081964492798,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 323.2022471910112,
"min": 307.2755102040816,
"max": 988.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28765.0,
"min": 16590.0,
"max": 32773.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5868808800417387,
"min": -0.9263250506483018,
"max": 1.6707599868899898,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 141.23239832371473,
"min": -30.566001765429974,
"max": 161.76559845358133,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5868808800417387,
"min": -0.9263250506483018,
"max": 1.6707599868899898,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 141.23239832371473,
"min": -30.566001765429974,
"max": 161.76559845358133,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04002579233855705,
"min": 0.03954708131195916,
"max": 8.807659726809053,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5622955181315774,
"min": 3.5622955181315774,
"max": 149.7302153557539,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749139628",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749141872"
},
"total": 2243.657465064,
"count": 1,
"self": 0.48762975999989067,
"children": {
"run_training.setup": {
"total": 0.019730649999928573,
"count": 1,
"self": 0.019730649999928573
},
"TrainerController.start_learning": {
"total": 2243.150104654,
"count": 1,
"self": 1.386777178025568,
"children": {
"TrainerController._reset_env": {
"total": 2.1813588250001885,
"count": 1,
"self": 2.1813588250001885
},
"TrainerController.advance": {
"total": 2239.496813683975,
"count": 64000,
"self": 1.3988191008534159,
"children": {
"env_step": {
"total": 1576.320627216035,
"count": 64000,
"self": 1427.3780495341148,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.15462456298746,
"count": 64000,
"self": 4.6041927159831175,
"children": {
"TorchPolicy.evaluate": {
"total": 143.55043184700435,
"count": 62555,
"self": 143.55043184700435
}
}
},
"workers": {
"total": 0.787953118932819,
"count": 64000,
"self": 0.0,
"children": {
"worker_root": {
"total": 2238.4020726520334,
"count": 64000,
"is_parallel": true,
"self": 922.9094765210871,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020065749999957916,
"count": 1,
"is_parallel": true,
"self": 0.0006564400000570458,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013501349999387458,
"count": 8,
"is_parallel": true,
"self": 0.0013501349999387458
}
}
},
"UnityEnvironment.step": {
"total": 0.052381229999809875,
"count": 1,
"is_parallel": true,
"self": 0.0005662270000357239,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045589599994855234,
"count": 1,
"is_parallel": true,
"self": 0.00045589599994855234
},
"communicator.exchange": {
"total": 0.04969967499982886,
"count": 1,
"is_parallel": true,
"self": 0.04969967499982886
},
"steps_from_proto": {
"total": 0.001659431999996741,
"count": 1,
"is_parallel": true,
"self": 0.000344998999935342,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001314433000061399,
"count": 8,
"is_parallel": true,
"self": 0.001314433000061399
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1315.4925961309464,
"count": 63999,
"is_parallel": true,
"self": 31.395340530777503,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.18149692112729,
"count": 63999,
"is_parallel": true,
"self": 23.18149692112729
},
"communicator.exchange": {
"total": 1165.847895227988,
"count": 63999,
"is_parallel": true,
"self": 1165.847895227988
},
"steps_from_proto": {
"total": 95.06786345105365,
"count": 63999,
"is_parallel": true,
"self": 19.159649037015697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.90821441403796,
"count": 511992,
"is_parallel": true,
"self": 75.90821441403796
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 661.7773673670863,
"count": 64000,
"self": 2.574374621076913,
"children": {
"process_trajectory": {
"total": 128.79938437901637,
"count": 64000,
"self": 128.58598501801544,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21339936100093837,
"count": 2,
"self": 0.21339936100093837
}
}
},
"_update_policy": {
"total": 530.403608366993,
"count": 454,
"self": 294.7804177040357,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.6231906629573,
"count": 22821,
"self": 235.6231906629573
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.389996193931438e-07,
"count": 1,
"self": 9.389996193931438e-07
},
"TrainerController._save_models": {
"total": 0.08515402800003358,
"count": 1,
"self": 0.0018216950002170051,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08333233299981657,
"count": 1,
"self": 0.08333233299981657
}
}
}
}
}
}
}