ppo-Pyramids / run_logs /timers.json
rusuanjun's picture
First Push
fe29283 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3266650438308716,
"min": 0.3266650438308716,
"max": 1.4582947492599487,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9789.498046875,
"min": 9789.498046875,
"max": 44238.828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989999.0,
"min": 29952.0,
"max": 989999.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989999.0,
"min": 29952.0,
"max": 989999.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5490950345993042,
"min": -0.09387529641389847,
"max": 0.6206362247467041,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.197509765625,
"min": -22.530071258544922,
"max": 176.88131713867188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026201320812106133,
"min": 0.006960572209209204,
"max": 0.5513593554496765,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.310168266296387,
"min": 1.8236699104309082,
"max": 130.6721649169922,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06976140725246374,
"min": 0.06438405770706075,
"max": 0.07270294635814242,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9766597015344923,
"min": 0.5052661671873246,
"max": 1.0560003357628982,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016693593044371537,
"min": 0.0007555098931293126,
"max": 0.01753832545523246,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23371030262120152,
"min": 0.009821628610681064,
"max": 0.2614222164653862,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.299311852642861e-06,
"min": 7.299311852642861e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010219036593700004,
"min": 0.00010219036593700004,
"max": 0.0034916785361072,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243307142857143,
"min": 0.10243307142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434063,
"min": 1.3886848,
"max": 2.572588,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025306383571428585,
"min": 0.00025306383571428585,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035428937000000016,
"min": 0.0035428937000000016,
"max": 0.11640289072,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012644675560295582,
"min": 0.01259064394980669,
"max": 0.4685530662536621,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1770254522562027,
"min": 0.1770254522562027,
"max": 3.2798714637756348,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 316.7282608695652,
"min": 295.37254901960785,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29139.0,
"min": 15984.0,
"max": 32889.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6848064374218705,
"min": -1.0000000521540642,
"max": 1.6850137129133822,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.68699868023396,
"min": -29.176201567053795,
"max": 171.871398717165,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6848064374218705,
"min": -1.0000000521540642,
"max": 1.6850137129133822,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.68699868023396,
"min": -29.176201567053795,
"max": 171.871398717165,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04116387796082524,
"min": 0.03991603741023402,
"max": 9.384377058595419,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.828240650356747,
"min": 3.828240650356747,
"max": 150.1500329375267,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746550059",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746552302"
},
"total": 2243.186519125,
"count": 1,
"self": 0.4249235570005112,
"children": {
"run_training.setup": {
"total": 0.020355562999839094,
"count": 1,
"self": 0.020355562999839094
},
"TrainerController.start_learning": {
"total": 2242.741240005,
"count": 1,
"self": 1.2465372099886736,
"children": {
"TrainerController._reset_env": {
"total": 2.458327389000033,
"count": 1,
"self": 2.458327389000033
},
"TrainerController.advance": {
"total": 2238.9551998010115,
"count": 63992,
"self": 1.3739487889183692,
"children": {
"env_step": {
"total": 1564.7346606250567,
"count": 63992,
"self": 1420.898170588977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.1208055060083,
"count": 63992,
"self": 4.405205390954734,
"children": {
"TorchPolicy.evaluate": {
"total": 138.71560011505358,
"count": 62563,
"self": 138.71560011505358
}
}
},
"workers": {
"total": 0.7156845300712575,
"count": 63992,
"self": 0.0,
"children": {
"worker_root": {
"total": 2238.0675776349535,
"count": 63992,
"is_parallel": true,
"self": 925.21445807891,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019860009999774775,
"count": 1,
"is_parallel": true,
"self": 0.0006484669997917081,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013375340001857694,
"count": 8,
"is_parallel": true,
"self": 0.0013375340001857694
}
}
},
"UnityEnvironment.step": {
"total": 0.06980251499999213,
"count": 1,
"is_parallel": true,
"self": 0.0005367019998629985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004681830000663467,
"count": 1,
"is_parallel": true,
"self": 0.0004681830000663467
},
"communicator.exchange": {
"total": 0.06720736700003727,
"count": 1,
"is_parallel": true,
"self": 0.06720736700003727
},
"steps_from_proto": {
"total": 0.0015902630000255158,
"count": 1,
"is_parallel": true,
"self": 0.0003251279999858525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012651350000396633,
"count": 8,
"is_parallel": true,
"self": 0.0012651350000396633
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1312.8531195560436,
"count": 63991,
"is_parallel": true,
"self": 31.009626111055013,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.67272188600714,
"count": 63991,
"is_parallel": true,
"self": 22.67272188600714
},
"communicator.exchange": {
"total": 1166.6878869099612,
"count": 63991,
"is_parallel": true,
"self": 1166.6878869099612
},
"steps_from_proto": {
"total": 92.4828846490202,
"count": 63991,
"is_parallel": true,
"self": 18.095338066972545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.38754658204766,
"count": 511928,
"is_parallel": true,
"self": 74.38754658204766
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 672.8465903870365,
"count": 63992,
"self": 2.5367480440361305,
"children": {
"process_trajectory": {
"total": 126.89783910799974,
"count": 63992,
"self": 126.69576431099972,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20207479700002295,
"count": 2,
"self": 0.20207479700002295
}
}
},
"_update_policy": {
"total": 543.4120032350006,
"count": 455,
"self": 304.84422735097974,
"children": {
"TorchPPOOptimizer.update": {
"total": 238.56777588402088,
"count": 22770,
"self": 238.56777588402088
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0220001058769412e-06,
"count": 1,
"self": 1.0220001058769412e-06
},
"TrainerController._save_models": {
"total": 0.08117458299966529,
"count": 1,
"self": 0.0010861819996534905,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0800884010000118,
"count": 1,
"self": 0.0800884010000118
}
}
}
}
}
}
}