ppo-Pyramids / run_logs /timers.json
hieu10x's picture
First Push
c8c1510 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4536501467227936,
"min": 0.4481290876865387,
"max": 1.4486898183822632,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13558.6953125,
"min": 13451.04296875,
"max": 43947.453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.38248369097709656,
"min": -0.10173966735601425,
"max": 0.42480307817459106,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 100.21072387695312,
"min": -24.51926040649414,
"max": 111.72321319580078,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018510952591896057,
"min": 0.015417182818055153,
"max": 0.5189533829689026,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.849869728088379,
"min": 4.054718971252441,
"max": 122.99195861816406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06963493651543327,
"min": 0.06484056078627183,
"max": 0.07312436407246789,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9748891112160659,
"min": 0.4890327725722388,
"max": 1.0454616449646623,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014133235305939514,
"min": 0.0005144786328995766,
"max": 0.014133235305939514,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1978652942831532,
"min": 0.006688222227694496,
"max": 0.1978652942831532,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.634818883664284e-06,
"min": 7.634818883664284e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010688746437129998,
"min": 0.00010688746437129998,
"max": 0.0036328138890620986,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254490714285715,
"min": 0.10254490714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356287,
"min": 1.3886848,
"max": 2.6109378999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026423622357142856,
"min": 0.00026423622357142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036993071299999997,
"min": 0.0036993071299999997,
"max": 0.12111269620999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012681729160249233,
"min": 0.012681729160249233,
"max": 0.5785948038101196,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17754420638084412,
"min": 0.17754420638084412,
"max": 4.050163745880127,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 471.4,
"min": 417.05797101449275,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30641.0,
"min": 15984.0,
"max": 32707.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3216787595866304,
"min": -1.0000000521540642,
"max": 1.4326305334559746,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 87.23079813271761,
"min": -29.672001615166664,
"max": 103.14939840883017,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3216787595866304,
"min": -1.0000000521540642,
"max": 1.4326305334559746,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 87.23079813271761,
"min": -29.672001615166664,
"max": 103.14939840883017,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06073080798870995,
"min": 0.05868019672861385,
"max": 11.648794915527105,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.008233327254857,
"min": 3.9902533775457414,
"max": 186.38071864843369,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745764369",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1745766668"
},
"total": 2299.1279242120004,
"count": 1,
"self": 0.47584278000022096,
"children": {
"run_training.setup": {
"total": 0.020207751000270946,
"count": 1,
"self": 0.020207751000270946
},
"TrainerController.start_learning": {
"total": 2298.631873681,
"count": 1,
"self": 1.728249254962975,
"children": {
"TrainerController._reset_env": {
"total": 2.309385551000105,
"count": 1,
"self": 2.309385551000105
},
"TrainerController.advance": {
"total": 2294.504890766036,
"count": 63527,
"self": 1.749962717042763,
"children": {
"env_step": {
"total": 1601.1431607259606,
"count": 63527,
"self": 1424.2618248699428,
"children": {
"SubprocessEnvManager._take_step": {
"total": 175.8735926189479,
"count": 63527,
"self": 5.2581211779483965,
"children": {
"TorchPolicy.evaluate": {
"total": 170.6154714409995,
"count": 62568,
"self": 170.6154714409995
}
}
},
"workers": {
"total": 1.007743237069917,
"count": 63527,
"self": 0.0,
"children": {
"worker_root": {
"total": 2292.7128097279833,
"count": 63527,
"is_parallel": true,
"self": 996.4291927079212,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020728999998027575,
"count": 1,
"is_parallel": true,
"self": 0.0006981940000514442,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013747059997513134,
"count": 8,
"is_parallel": true,
"self": 0.0013747059997513134
}
}
},
"UnityEnvironment.step": {
"total": 0.05503041199972358,
"count": 1,
"is_parallel": true,
"self": 0.000566637999327213,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047038599996085395,
"count": 1,
"is_parallel": true,
"self": 0.00047038599996085395
},
"communicator.exchange": {
"total": 0.05212779800012868,
"count": 1,
"is_parallel": true,
"self": 0.05212779800012868
},
"steps_from_proto": {
"total": 0.0018655900003068382,
"count": 1,
"is_parallel": true,
"self": 0.00045312200018088333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014124680001259549,
"count": 8,
"is_parallel": true,
"self": 0.0014124680001259549
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1296.2836170200621,
"count": 63526,
"is_parallel": true,
"self": 33.53246748706624,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.48016998103958,
"count": 63526,
"is_parallel": true,
"self": 24.48016998103958
},
"communicator.exchange": {
"total": 1133.9422807490305,
"count": 63526,
"is_parallel": true,
"self": 1133.9422807490305
},
"steps_from_proto": {
"total": 104.32869880292583,
"count": 63526,
"is_parallel": true,
"self": 22.368240735874224,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.9604580670516,
"count": 508208,
"is_parallel": true,
"self": 81.9604580670516
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 691.6117673230328,
"count": 63527,
"self": 3.1357682780039795,
"children": {
"process_trajectory": {
"total": 134.79168214203128,
"count": 63527,
"self": 134.5410315480308,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25065059400048995,
"count": 2,
"self": 0.25065059400048995
}
}
},
"_update_policy": {
"total": 553.6843169029976,
"count": 451,
"self": 305.41758714700154,
"children": {
"TorchPPOOptimizer.update": {
"total": 248.26672975599604,
"count": 22785,
"self": 248.26672975599604
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.22000253922306e-07,
"count": 1,
"self": 9.22000253922306e-07
},
"TrainerController._save_models": {
"total": 0.08934718700038502,
"count": 1,
"self": 0.00141494200033776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08793224500004726,
"count": 1,
"self": 0.08793224500004726
}
}
}
}
}
}
}