GustavoDLRA's picture
First Push
a7027d7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3237864375114441,
"min": 0.31810370087623596,
"max": 1.588344931602478,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 3310.392578125,
"min": 3119.961181640625,
"max": 16264.65234375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 999945.0,
"min": 9984.0,
"max": 999945.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 999945.0,
"min": 9984.0,
"max": 999945.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4371621310710907,
"min": -0.14251695573329926,
"max": 0.7079083919525146,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 38.90742874145508,
"min": -11.11632251739502,
"max": 65.12757110595703,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011771360412240028,
"min": 0.000479597772937268,
"max": 0.4647774398326874,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.0476510524749756,
"min": 0.0422046035528183,
"max": 36.25263977050781,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07231817814366272,
"min": 0.06107394570911613,
"max": 0.07849415072679049,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.3615908907183136,
"min": 0.14153848851371245,
"max": 0.37971370184095576,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010330927182803862,
"min": 0.00015641979250648712,
"max": 0.01931714837216229,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.051654635914019316,
"min": 0.0006256791700259485,
"max": 0.0898490241910194,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.6335994554999963e-06,
"min": 1.6335994554999963e-06,
"max": 0.0002981568006144,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 8.167997277499981e-06,
"min": 8.167997277499981e-06,
"max": 0.0013125621624793002,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10054450000000001,
"min": 0.10054450000000001,
"max": 0.1993856,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.5027225000000001,
"min": 0.39717119999999995,
"max": 0.9375207000000001,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.439554999999989e-05,
"min": 6.439554999999989e-05,
"max": 0.00993862144,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0003219777499999994,
"min": 0.0003219777499999994,
"max": 0.04375831793,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010893860831856728,
"min": 0.01076428685337305,
"max": 0.8226414918899536,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.05446930602192879,
"min": 0.0430571474134922,
"max": 1.6452829837799072,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 403.4,
"min": 309.84375,
"max": 999.0,
"count": 98
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 8068.0,
"min": 999.0,
"max": 16202.0,
"count": 98
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4825684036079205,
"min": -1.0000000521540642,
"max": 1.6639999449253082,
"count": 98
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 28.16879966855049,
"min": -16.000000834465027,
"max": 50.084399193525314,
"count": 98
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4825684036079205,
"min": -1.0000000521540642,
"max": 1.6639999449253082,
"count": 98
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 28.16879966855049,
"min": -16.000000834465027,
"max": 50.084399193525314,
"count": 98
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04826814494414353,
"min": 0.03412850837587569,
"max": 9.804138541221619,
"count": 98
},
"Pyramids.Policy.RndReward.sum": {
"value": 0.9170947539387271,
"min": 0.17582036554813385,
"max": 156.8662166595459,
"count": 98
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1762757995",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1762760323"
},
"total": 2327.7597867570003,
"count": 1,
"self": 1.2822127410004214,
"children": {
"run_training.setup": {
"total": 0.04767040199976691,
"count": 1,
"self": 0.04767040199976691
},
"TrainerController.start_learning": {
"total": 2326.429903614,
"count": 1,
"self": 1.4031294270421313,
"children": {
"TrainerController._reset_env": {
"total": 5.235476812999877,
"count": 1,
"self": 5.235476812999877
},
"TrainerController.advance": {
"total": 2319.5879654469577,
"count": 63892,
"self": 1.4390108628517737,
"children": {
"env_step": {
"total": 1623.6819257289376,
"count": 63892,
"self": 1467.5186215640174,
"children": {
"SubprocessEnvManager._take_step": {
"total": 155.3370998918872,
"count": 63892,
"self": 4.812713019759485,
"children": {
"TorchPolicy.evaluate": {
"total": 150.5243868721277,
"count": 62565,
"self": 150.5243868721277
}
}
},
"workers": {
"total": 0.8262042730329995,
"count": 63892,
"self": 0.0,
"children": {
"worker_root": {
"total": 2317.2171337280147,
"count": 63892,
"is_parallel": true,
"self": 976.5192746710459,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018245890000798681,
"count": 1,
"is_parallel": true,
"self": 0.0005894280002394225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012351609998404456,
"count": 8,
"is_parallel": true,
"self": 0.0012351609998404456
}
}
},
"UnityEnvironment.step": {
"total": 0.049218402999940736,
"count": 1,
"is_parallel": true,
"self": 0.0005375400000957598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045722799995928654,
"count": 1,
"is_parallel": true,
"self": 0.00045722799995928654
},
"communicator.exchange": {
"total": 0.0463869209997938,
"count": 1,
"is_parallel": true,
"self": 0.0463869209997938
},
"steps_from_proto": {
"total": 0.001836714000091888,
"count": 1,
"is_parallel": true,
"self": 0.0003844890002255852,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014522249998663028,
"count": 8,
"is_parallel": true,
"self": 0.0014522249998663028
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1340.6978590569688,
"count": 63891,
"is_parallel": true,
"self": 34.071416558218516,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.98546979493267,
"count": 63891,
"is_parallel": true,
"self": 22.98546979493267
},
"communicator.exchange": {
"total": 1170.5119343989454,
"count": 63891,
"is_parallel": true,
"self": 1170.5119343989454
},
"steps_from_proto": {
"total": 113.12903830487221,
"count": 63891,
"is_parallel": true,
"self": 24.451759901785863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.67727840308635,
"count": 511128,
"is_parallel": true,
"self": 88.67727840308635
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 694.4670288551683,
"count": 63892,
"self": 2.643223672091608,
"children": {
"process_trajectory": {
"total": 138.0346776260776,
"count": 63892,
"self": 137.63955811807682,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3951195080007892,
"count": 2,
"self": 0.3951195080007892
}
}
},
"_update_policy": {
"total": 553.7891275569991,
"count": 451,
"self": 310.42613398599724,
"children": {
"TorchPPOOptimizer.update": {
"total": 243.36299357100188,
"count": 22800,
"self": 243.36299357100188
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4470006135525182e-06,
"count": 1,
"self": 1.4470006135525182e-06
},
"TrainerController._save_models": {
"total": 0.2033304799997495,
"count": 1,
"self": 0.037743167000371614,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1655873129993779,
"count": 1,
"self": 0.1655873129993779
}
}
}
}
}
}
}