ppo-Pyramids / run_logs /timers.json
maxkskhor's picture
First Push
d3adec1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3344193994998932,
"min": 0.3344193994998932,
"max": 1.517533302307129,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10048.6337890625,
"min": 10048.6337890625,
"max": 46035.890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989872.0,
"min": 29952.0,
"max": 989872.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989872.0,
"min": 29952.0,
"max": 989872.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6347144842147827,
"min": -0.0849069356918335,
"max": 0.6347144842147827,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.35476684570312,
"min": -20.37766456604004,
"max": 178.35476684570312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.020774219185113907,
"min": -0.007057444658130407,
"max": 0.24249260127544403,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.837555408477783,
"min": -1.8772802352905273,
"max": 57.47074508666992,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06785614708843198,
"min": 0.06337333972693333,
"max": 0.07385488390459434,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9499860592380477,
"min": 0.4954985445007366,
"max": 1.0741643584721412,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01650496122047549,
"min": 0.0004448396531721008,
"max": 0.017585524037040615,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23106945708665688,
"min": 0.005782915491237311,
"max": 0.2563167963628931,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3084189924642855e-06,
"min": 7.3084189924642855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001023178658945,
"min": 0.0001023178658945,
"max": 0.0035080181306607,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243610714285716,
"min": 0.10243610714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341055000000003,
"min": 1.3886848,
"max": 2.5723189,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025336710357142856,
"min": 0.00025336710357142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00354713945,
"min": 0.00354713945,
"max": 0.11695699607000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008987419307231903,
"min": 0.008987419307231903,
"max": 0.294095516204834,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12582387030124664,
"min": 0.12582387030124664,
"max": 2.058668613433838,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 300.2178217821782,
"min": 287.03921568627453,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30322.0,
"min": 15984.0,
"max": 34024.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6799762247517558,
"min": -1.0000000521540642,
"max": 1.7145922121781747,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 169.67759869992733,
"min": -30.719801746308804,
"max": 176.602997854352,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6799762247517558,
"min": -1.0000000521540642,
"max": 1.7145922121781747,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 169.67759869992733,
"min": -30.719801746308804,
"max": 176.602997854352,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028417634506425173,
"min": 0.028417634506425173,
"max": 5.656275127083063,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8701810851489427,
"min": 2.8701810851489427,
"max": 90.50040203332901,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688642018",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688644283"
},
"total": 2265.1802201969995,
"count": 1,
"self": 0.9422533189995193,
"children": {
"run_training.setup": {
"total": 0.03763628900014737,
"count": 1,
"self": 0.03763628900014737
},
"TrainerController.start_learning": {
"total": 2264.200330589,
"count": 1,
"self": 1.35766573100409,
"children": {
"TrainerController._reset_env": {
"total": 3.9973224850000406,
"count": 1,
"self": 3.9973224850000406
},
"TrainerController.advance": {
"total": 2258.686421476996,
"count": 63978,
"self": 1.453276202966208,
"children": {
"env_step": {
"total": 1611.7017747049913,
"count": 63978,
"self": 1498.7901243000988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 112.07034376099728,
"count": 63978,
"self": 5.031807626063255,
"children": {
"TorchPolicy.evaluate": {
"total": 107.03853613493402,
"count": 62544,
"self": 107.03853613493402
}
}
},
"workers": {
"total": 0.8413066438952228,
"count": 63978,
"self": 0.0,
"children": {
"worker_root": {
"total": 2258.60234315599,
"count": 63978,
"is_parallel": true,
"self": 877.139667264003,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002084183000079065,
"count": 1,
"is_parallel": true,
"self": 0.0006776269997317286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014065560003473365,
"count": 8,
"is_parallel": true,
"self": 0.0014065560003473365
}
}
},
"UnityEnvironment.step": {
"total": 0.048837580000054004,
"count": 1,
"is_parallel": true,
"self": 0.0005827569998473336,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000525672000094346,
"count": 1,
"is_parallel": true,
"self": 0.000525672000094346
},
"communicator.exchange": {
"total": 0.045854604999931325,
"count": 1,
"is_parallel": true,
"self": 0.045854604999931325
},
"steps_from_proto": {
"total": 0.0018745460001809988,
"count": 1,
"is_parallel": true,
"self": 0.00038269600099738454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014918499991836143,
"count": 8,
"is_parallel": true,
"self": 0.0014918499991836143
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1381.4626758919871,
"count": 63977,
"is_parallel": true,
"self": 34.29162239916241,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.288059344953126,
"count": 63977,
"is_parallel": true,
"self": 24.288059344953126
},
"communicator.exchange": {
"total": 1213.2067219489663,
"count": 63977,
"is_parallel": true,
"self": 1213.2067219489663
},
"steps_from_proto": {
"total": 109.67627219890528,
"count": 63977,
"is_parallel": true,
"self": 21.373489119985607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.30278307891967,
"count": 511816,
"is_parallel": true,
"self": 88.30278307891967
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 645.5313705690387,
"count": 63978,
"self": 2.579686524120916,
"children": {
"process_trajectory": {
"total": 114.9027509279199,
"count": 63978,
"self": 114.58225599091975,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32049493700014864,
"count": 2,
"self": 0.32049493700014864
}
}
},
"_update_policy": {
"total": 528.0489331169979,
"count": 451,
"self": 341.8673955229615,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.1815375940364,
"count": 22803,
"self": 186.1815375940364
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3359995136852376e-06,
"count": 1,
"self": 1.3359995136852376e-06
},
"TrainerController._save_models": {
"total": 0.15891955999995844,
"count": 1,
"self": 0.0020138910003879573,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15690566899957048,
"count": 1,
"self": 0.15690566899957048
}
}
}
}
}
}
}