ppo-Pyramids / run_logs /timers.json
harisumant's picture
First Push
04967bb
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.501031219959259,
"min": 0.501031219959259,
"max": 1.5037435293197632,
"count": 34
},
"Pyramids.Policy.Entropy.sum": {
"value": 14958.7880859375,
"min": 14958.7880859375,
"max": 45617.5625,
"count": 34
},
"Pyramids.Step.mean": {
"value": 1019939.0,
"min": 29952.0,
"max": 1019939.0,
"count": 34
},
"Pyramids.Step.sum": {
"value": 1019939.0,
"min": 29952.0,
"max": 1019939.0,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5139161944389343,
"min": -0.10858037322759628,
"max": 0.5139161944389343,
"count": 34
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 137.2156219482422,
"min": -26.167869567871094,
"max": 137.2156219482422,
"count": 34
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012243442237377167,
"min": 0.002633146708831191,
"max": 0.19686265289783478,
"count": 34
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2689990997314453,
"min": 0.6925175786018372,
"max": 47.24703598022461,
"count": 34
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0665701508973836,
"min": 0.06416201426757498,
"max": 0.07429927225243914,
"count": 34
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9319821125633705,
"min": 0.474579090556269,
"max": 1.0458454094671956,
"count": 34
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01358873882033068,
"min": 0.00019294021824126635,
"max": 0.01358873882033068,
"count": 34
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1902423434846295,
"min": 0.0025082228371364626,
"max": 0.19754163087539686,
"count": 34
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001493077038022107,
"min": 0.0001493077038022107,
"max": 0.0002975753150939428,
"count": 34
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00209030785323095,
"min": 0.0020536704154432,
"max": 0.0035385414704862494,
"count": 34
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14976921785714287,
"min": 0.14976921785714287,
"max": 0.19919177142857142,
"count": 34
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.0967690500000002,
"min": 1.3845568000000001,
"max": 2.5795137500000007,
"count": 34
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004981944863928571,
"min": 0.004981944863928571,
"max": 0.009919257965714285,
"count": 34
},
"Pyramids.Policy.Beta.sum": {
"value": 0.069747228095,
"min": 0.06845722432,
"max": 0.11797342362500002,
"count": 34
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008135640062391758,
"min": 0.008135640062391758,
"max": 0.33366864919662476,
"count": 34
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11389896273612976,
"min": 0.11389896273612976,
"max": 2.3356804847717285,
"count": 34
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 374.4805194805195,
"min": 374.4805194805195,
"max": 999.0,
"count": 34
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28835.0,
"min": 15984.0,
"max": 35213.0,
"count": 34
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5735506274096378,
"min": -1.0000000521540642,
"max": 1.5735506274096378,
"count": 34
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 121.1633983105421,
"min": -32.000001668930054,
"max": 121.1633983105421,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5735506274096378,
"min": -1.0000000521540642,
"max": 1.5735506274096378,
"count": 34
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 121.1633983105421,
"min": -32.000001668930054,
"max": 121.1633983105421,
"count": 34
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03179996063805778,
"min": 0.03179996063805778,
"max": 7.494690702762455,
"count": 34
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.448596969130449,
"min": 2.428954181210429,
"max": 119.91505124419928,
"count": 34
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 34
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677134052",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677136433"
},
"total": 2380.41152972,
"count": 1,
"self": 0.5630588170001829,
"children": {
"run_training.setup": {
"total": 0.11822261499992237,
"count": 1,
"self": 0.11822261499992237
},
"TrainerController.start_learning": {
"total": 2379.730248288,
"count": 1,
"self": 1.619764880962066,
"children": {
"TrainerController._reset_env": {
"total": 7.490079941999966,
"count": 1,
"self": 7.490079941999966
},
"TrainerController.advance": {
"total": 2370.305945578038,
"count": 64844,
"self": 1.6772202349243344,
"children": {
"env_step": {
"total": 1601.1260256030575,
"count": 64844,
"self": 1472.8783546641268,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.23112958994489,
"count": 64844,
"self": 5.144986101919585,
"children": {
"TorchPolicy.evaluate": {
"total": 122.0861434880253,
"count": 63925,
"self": 41.603016973104104,
"children": {
"TorchPolicy.sample_actions": {
"total": 80.4831265149212,
"count": 63925,
"self": 80.4831265149212
}
}
}
}
},
"workers": {
"total": 1.016541348985811,
"count": 64843,
"self": 0.0,
"children": {
"worker_root": {
"total": 2374.3600811220813,
"count": 64843,
"is_parallel": true,
"self": 1027.1311618130885,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018457669998497295,
"count": 1,
"is_parallel": true,
"self": 0.0006396390001555119,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012061279996942176,
"count": 8,
"is_parallel": true,
"self": 0.0012061279996942176
}
}
},
"UnityEnvironment.step": {
"total": 0.048313515000018015,
"count": 1,
"is_parallel": true,
"self": 0.0005298540002058871,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000671768999836786,
"count": 1,
"is_parallel": true,
"self": 0.000671768999836786
},
"communicator.exchange": {
"total": 0.04547429500007638,
"count": 1,
"is_parallel": true,
"self": 0.04547429500007638
},
"steps_from_proto": {
"total": 0.0016375969998989603,
"count": 1,
"is_parallel": true,
"self": 0.0004240939999817783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001213502999917182,
"count": 8,
"is_parallel": true,
"self": 0.001213502999917182
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1347.2289193089928,
"count": 64842,
"is_parallel": true,
"self": 34.25043011082721,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.773679333072096,
"count": 64842,
"is_parallel": true,
"self": 24.773679333072096
},
"communicator.exchange": {
"total": 1188.0692960910603,
"count": 64842,
"is_parallel": true,
"self": 1188.0692960910603
},
"steps_from_proto": {
"total": 100.13551377403314,
"count": 64842,
"is_parallel": true,
"self": 24.500973461013928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.63454031301922,
"count": 518736,
"is_parallel": true,
"self": 75.63454031301922
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 767.5026997400564,
"count": 64843,
"self": 2.981368128038639,
"children": {
"process_trajectory": {
"total": 168.95087316002173,
"count": 64843,
"self": 168.71271440602186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23815875399986908,
"count": 2,
"self": 0.23815875399986908
}
}
},
"_update_policy": {
"total": 595.570458451996,
"count": 444,
"self": 228.1122524139805,
"children": {
"TorchPPOOptimizer.update": {
"total": 367.45820603801553,
"count": 23322,
"self": 367.45820603801553
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5400000847876072e-06,
"count": 1,
"self": 1.5400000847876072e-06
},
"TrainerController._save_models": {
"total": 0.3144563470000321,
"count": 1,
"self": 0.0030772049999541196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.311379142000078,
"count": 1,
"self": 0.311379142000078
}
}
}
}
}
}
}