Dione25's picture
First Push
2b6ee67 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.562525749206543,
"min": 0.562525749206543,
"max": 1.4559292793273926,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16830.76953125,
"min": 16830.76953125,
"max": 44167.0703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989990.0,
"min": 29952.0,
"max": 989990.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989990.0,
"min": 29952.0,
"max": 989990.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40472978353500366,
"min": -0.11554054915904999,
"max": 0.436197429895401,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.84866333007812,
"min": -27.845272064208984,
"max": 118.20950317382812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.029258280992507935,
"min": -0.0019323076121509075,
"max": 0.298096626996994,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.724185943603516,
"min": -0.5004676580429077,
"max": 71.5431900024414,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06723367137760569,
"min": 0.06571216490953678,
"max": 0.07371734836651078,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9412713992864797,
"min": 0.5028324511327684,
"max": 1.0382576385528925,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016242393619028385,
"min": 0.0006252622632228618,
"max": 0.016282647733273085,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2273935106663974,
"min": 0.007503147158674342,
"max": 0.23855995883059222,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.421347526250001e-06,
"min": 7.421347526250001e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010389886536750002,
"min": 0.00010389886536750002,
"max": 0.0033827861724047004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247375,
"min": 0.10247375,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346325,
"min": 1.3886848,
"max": 2.5275952999999998,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025712762500000006,
"min": 0.00025712762500000006,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003599786750000001,
"min": 0.003599786750000001,
"max": 0.11278677046999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01092385221272707,
"min": 0.010473465546965599,
"max": 0.45272132754325867,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15293392539024353,
"min": 0.1466285139322281,
"max": 3.1690492630004883,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 434.44444444444446,
"min": 398.2027027027027,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31280.0,
"min": 15984.0,
"max": 32725.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.398827746613986,
"min": -1.0000000521540642,
"max": 1.576646130818587,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 100.71559775620699,
"min": -30.60900169610977,
"max": 110.32479821145535,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.398827746613986,
"min": -1.0000000521540642,
"max": 1.576646130818587,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 100.71559775620699,
"min": -30.60900169610977,
"max": 110.32479821145535,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04907534867622113,
"min": 0.043942668246479154,
"max": 9.056108340620995,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5334251046879217,
"min": 2.9748629983514547,
"max": 144.8977334499359,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749856762",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749858852"
},
"total": 2089.420731524,
"count": 1,
"self": 0.4755173410003408,
"children": {
"run_training.setup": {
"total": 0.019855641000049218,
"count": 1,
"self": 0.019855641000049218
},
"TrainerController.start_learning": {
"total": 2088.9253585419997,
"count": 1,
"self": 1.2335819049671954,
"children": {
"TrainerController._reset_env": {
"total": 2.120048579000013,
"count": 1,
"self": 2.120048579000013
},
"TrainerController.advance": {
"total": 2085.492948556033,
"count": 63607,
"self": 1.2802605769752518,
"children": {
"env_step": {
"total": 1441.848214070998,
"count": 63607,
"self": 1298.9854345100125,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.10062411490708,
"count": 63607,
"self": 4.476734122826656,
"children": {
"TorchPolicy.evaluate": {
"total": 137.62388999208042,
"count": 62573,
"self": 137.62388999208042
}
}
},
"workers": {
"total": 0.7621554460783955,
"count": 63607,
"self": 0.0,
"children": {
"worker_root": {
"total": 2084.244649999122,
"count": 63607,
"is_parallel": true,
"self": 891.5408599061002,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001797280000118917,
"count": 1,
"is_parallel": true,
"self": 0.0005781610002486559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012191189998702612,
"count": 8,
"is_parallel": true,
"self": 0.0012191189998702612
}
}
},
"UnityEnvironment.step": {
"total": 0.0466224440001497,
"count": 1,
"is_parallel": true,
"self": 0.0005168970001250273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004810270002053585,
"count": 1,
"is_parallel": true,
"self": 0.0004810270002053585
},
"communicator.exchange": {
"total": 0.044020380999882036,
"count": 1,
"is_parallel": true,
"self": 0.044020380999882036
},
"steps_from_proto": {
"total": 0.0016041389999372768,
"count": 1,
"is_parallel": true,
"self": 0.00031790699995326577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001286231999984011,
"count": 8,
"is_parallel": true,
"self": 0.001286231999984011
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1192.7037900930218,
"count": 63606,
"is_parallel": true,
"self": 30.632594557920584,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.78031107113702,
"count": 63606,
"is_parallel": true,
"self": 21.78031107113702
},
"communicator.exchange": {
"total": 1047.935016312927,
"count": 63606,
"is_parallel": true,
"self": 1047.935016312927
},
"steps_from_proto": {
"total": 92.35586815103716,
"count": 63606,
"is_parallel": true,
"self": 18.189192229017408,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.16667592201975,
"count": 508848,
"is_parallel": true,
"self": 74.16667592201975
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 642.3644739080596,
"count": 63607,
"self": 2.477997992978999,
"children": {
"process_trajectory": {
"total": 120.48169768208254,
"count": 63607,
"self": 120.29553334008187,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18616434200066578,
"count": 2,
"self": 0.18616434200066578
}
}
},
"_update_policy": {
"total": 519.4047782329981,
"count": 446,
"self": 292.7043091290575,
"children": {
"TorchPPOOptimizer.update": {
"total": 226.70046910394058,
"count": 22821,
"self": 226.70046910394058
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.329996828455478e-07,
"count": 1,
"self": 9.329996828455478e-07
},
"TrainerController._save_models": {
"total": 0.07877856899995095,
"count": 1,
"self": 0.0013476120002451353,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07743095699970581,
"count": 1,
"self": 0.07743095699970581
}
}
}
}
}
}
}