ppo-Pyramids / run_logs /timers.json
LevinZheng's picture
First Push
27ae0b8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8967231512069702,
"min": 0.8967231512069702,
"max": 1.4381707906723022,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 26858.65234375,
"min": 26858.65234375,
"max": 43628.34765625,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89897.0,
"min": 29952.0,
"max": 89897.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89897.0,
"min": 29952.0,
"max": 89897.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03941419720649719,
"min": -0.057604577392339706,
"max": 0.036733295768499374,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -9.498821258544922,
"min": -13.9403076171875,
"max": 8.705791473388672,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.21333011984825134,
"min": 0.21333011984825134,
"max": 0.4381527900695801,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 51.412559509277344,
"min": 51.412559509277344,
"max": 103.84220886230469,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06952462810600284,
"min": 0.06931476695628412,
"max": 0.07181932790852992,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9038201653780369,
"min": 0.4852033686939889,
"max": 0.9038201653780369,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0023199747282824524,
"min": 0.0023199747282824524,
"max": 0.008735588810604043,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.030159671467671883,
"min": 0.024844613535071242,
"max": 0.061149121674228306,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.591830546315384e-05,
"min": 7.591830546315384e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000986937971021,
"min": 0.000986937971021,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1253060769230769,
"min": 0.1253060769230769,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.628979,
"min": 1.2868480000000002,
"max": 1.628979,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025380770846153847,
"min": 0.0025380770846153847,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.032995002100000004,
"min": 0.032995002100000004,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.1302146315574646,
"min": 0.1302146315574646,
"max": 0.481036901473999,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.692790150642395,
"min": 1.692790150642395,
"max": 3.367258310317993,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 949.125,
"min": 949.125,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30372.0,
"min": 15984.0,
"max": 33346.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.824950049398467,
"min": -1.0000000521540642,
"max": -0.7250343339783805,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -26.398401580750942,
"min": -26.398401580750942,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.824950049398467,
"min": -1.0000000521540642,
"max": -0.7250343339783805,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -26.398401580750942,
"min": -26.398401580750942,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.4681380621623248,
"min": 1.4681380621623248,
"max": 9.63938254956156,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 46.98041798919439,
"min": 46.98041798919439,
"max": 154.23012079298496,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748406327",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748406532"
},
"total": 205.60459171999992,
"count": 1,
"self": 0.38224859100000685,
"children": {
"run_training.setup": {
"total": 0.02375702199992702,
"count": 1,
"self": 0.02375702199992702
},
"TrainerController.start_learning": {
"total": 205.19858610699998,
"count": 1,
"self": 0.17140446699693257,
"children": {
"TrainerController._reset_env": {
"total": 2.7834807750000436,
"count": 1,
"self": 2.7834807750000436
},
"TrainerController.advance": {
"total": 202.09039486300298,
"count": 6282,
"self": 0.1883734270036257,
"children": {
"env_step": {
"total": 134.038868560997,
"count": 6282,
"self": 115.34716120799226,
"children": {
"SubprocessEnvManager._take_step": {
"total": 18.58670264600164,
"count": 6282,
"self": 0.5487958900024523,
"children": {
"TorchPolicy.evaluate": {
"total": 18.037906755999188,
"count": 6266,
"self": 18.037906755999188
}
}
},
"workers": {
"total": 0.10500470700310416,
"count": 6282,
"self": 0.0,
"children": {
"worker_root": {
"total": 204.7655517700033,
"count": 6282,
"is_parallel": true,
"self": 101.58716862500864,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022378840000101263,
"count": 1,
"is_parallel": true,
"self": 0.0007519550001688913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001485928999841235,
"count": 8,
"is_parallel": true,
"self": 0.001485928999841235
}
}
},
"UnityEnvironment.step": {
"total": 0.042916183999977875,
"count": 1,
"is_parallel": true,
"self": 0.0003911649999963629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034349400004884956,
"count": 1,
"is_parallel": true,
"self": 0.00034349400004884956
},
"communicator.exchange": {
"total": 0.038858984999933455,
"count": 1,
"is_parallel": true,
"self": 0.038858984999933455
},
"steps_from_proto": {
"total": 0.003322539999999208,
"count": 1,
"is_parallel": true,
"self": 0.002346940999814251,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009755990001849568,
"count": 8,
"is_parallel": true,
"self": 0.0009755990001849568
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 103.17838314499465,
"count": 6281,
"is_parallel": true,
"self": 2.8993288939864215,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.9574580570023272,
"count": 6281,
"is_parallel": true,
"self": 1.9574580570023272
},
"communicator.exchange": {
"total": 89.6137595739965,
"count": 6281,
"is_parallel": true,
"self": 89.6137595739965
},
"steps_from_proto": {
"total": 8.707836620009402,
"count": 6281,
"is_parallel": true,
"self": 1.9069822340190967,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.800854385990306,
"count": 50248,
"is_parallel": true,
"self": 6.800854385990306
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 67.86315287500236,
"count": 6282,
"self": 0.240244394002616,
"children": {
"process_trajectory": {
"total": 12.911528768000153,
"count": 6282,
"self": 12.911528768000153
},
"_update_policy": {
"total": 54.71137971299959,
"count": 34,
"self": 29.8878879169971,
"children": {
"TorchPPOOptimizer.update": {
"total": 24.823491796002486,
"count": 2313,
"self": 24.823491796002486
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0610000344968284e-06,
"count": 1,
"self": 1.0610000344968284e-06
},
"TrainerController._save_models": {
"total": 0.1533049409999876,
"count": 1,
"self": 0.00213573499991071,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15116920600007688,
"count": 1,
"self": 0.15116920600007688
}
}
}
}
}
}
}