ppo-Pyramids / run_logs /timers.json
rrozb's picture
First Push
ba3be78
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.42411768436431885,
"min": 0.42411768436431885,
"max": 1.487685203552246,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12553.8837890625,
"min": 12553.8837890625,
"max": 45130.41796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989888.0,
"min": 29952.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989888.0,
"min": 29952.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5534465909004211,
"min": -0.10796164721250534,
"max": 0.6020606756210327,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 151.6443634033203,
"min": -26.018756866455078,
"max": 170.3831787109375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02039172686636448,
"min": -0.00856967642903328,
"max": 0.41094210743904114,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.5873332023620605,
"min": -2.3052430152893066,
"max": 97.39328002929688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07005225118094434,
"min": 0.06527260991528498,
"max": 0.07325448892039152,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9807315165332208,
"min": 0.5127814224427406,
"max": 1.0616215749178082,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01412556606824399,
"min": 0.00034655866457363,
"max": 0.01752259915041022,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19775792495541586,
"min": 0.004505262639457191,
"max": 0.24531638810574308,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.739711705842856e-06,
"min": 7.739711705842856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010835596388179998,
"min": 0.00010835596388179998,
"max": 0.0036087837970721,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257987142857143,
"min": 0.10257987142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361182000000001,
"min": 1.3886848,
"max": 2.5696646000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002677291557142858,
"min": 0.0002677291557142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037482081800000013,
"min": 0.0037482081800000013,
"max": 0.12030249721,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008206339552998543,
"min": 0.008206339552998543,
"max": 0.4939265847206116,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1148887574672699,
"min": 0.1148887574672699,
"max": 3.457486152648926,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.43373493975906,
"min": 316.36170212765956,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28671.0,
"min": 15984.0,
"max": 32850.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5827214067713136,
"min": -1.0000000521540642,
"max": 1.6836382776815841,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 132.94859816879034,
"min": -30.994001604616642,
"max": 158.2619981020689,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5827214067713136,
"min": -1.0000000521540642,
"max": 1.6836382776815841,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 132.94859816879034,
"min": -30.994001604616642,
"max": 158.2619981020689,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029758985963694397,
"min": 0.029758985963694397,
"max": 10.729716904461384,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4997548209503293,
"min": 2.4997548209503293,
"max": 171.67547047138214,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693716773",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693719027"
},
"total": 2253.858343441,
"count": 1,
"self": 0.8893848950006031,
"children": {
"run_training.setup": {
"total": 0.04274361699998508,
"count": 1,
"self": 0.04274361699998508
},
"TrainerController.start_learning": {
"total": 2252.926214929,
"count": 1,
"self": 1.4359639500012236,
"children": {
"TrainerController._reset_env": {
"total": 4.378243999000006,
"count": 1,
"self": 4.378243999000006
},
"TrainerController.advance": {
"total": 2246.9563217229984,
"count": 63925,
"self": 1.4033679249514535,
"children": {
"env_step": {
"total": 1582.6366164500002,
"count": 63925,
"self": 1470.9390388839538,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.82413800700482,
"count": 63925,
"self": 4.800968192968753,
"children": {
"TorchPolicy.evaluate": {
"total": 106.02316981403607,
"count": 62563,
"self": 106.02316981403607
}
}
},
"workers": {
"total": 0.8734395590415147,
"count": 63925,
"self": 0.0,
"children": {
"worker_root": {
"total": 2247.7991108599863,
"count": 63925,
"is_parallel": true,
"self": 893.0206526419863,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051559010000232774,
"count": 1,
"is_parallel": true,
"self": 0.0037342860001103872,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014216149999128902,
"count": 8,
"is_parallel": true,
"self": 0.0014216149999128902
}
}
},
"UnityEnvironment.step": {
"total": 0.05092274499997984,
"count": 1,
"is_parallel": true,
"self": 0.001850885999942875,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005013289999737935,
"count": 1,
"is_parallel": true,
"self": 0.0005013289999737935
},
"communicator.exchange": {
"total": 0.04663789300002463,
"count": 1,
"is_parallel": true,
"self": 0.04663789300002463
},
"steps_from_proto": {
"total": 0.0019326370000385396,
"count": 1,
"is_parallel": true,
"self": 0.00039419900002712893,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015384380000114106,
"count": 8,
"is_parallel": true,
"self": 0.0015384380000114106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1354.778458218,
"count": 63924,
"is_parallel": true,
"self": 35.83692390604233,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.48403979396852,
"count": 63924,
"is_parallel": true,
"self": 23.48403979396852
},
"communicator.exchange": {
"total": 1186.9518569100057,
"count": 63924,
"is_parallel": true,
"self": 1186.9518569100057
},
"steps_from_proto": {
"total": 108.50563760798343,
"count": 63924,
"is_parallel": true,
"self": 21.225185424160145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.28045218382329,
"count": 511392,
"is_parallel": true,
"self": 87.28045218382329
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 662.9163373480468,
"count": 63925,
"self": 2.6964684920220634,
"children": {
"process_trajectory": {
"total": 112.64964723902625,
"count": 63925,
"self": 112.27691896402615,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37272827500009953,
"count": 2,
"self": 0.37272827500009953
}
}
},
"_update_policy": {
"total": 547.5702216169984,
"count": 453,
"self": 358.54527271602103,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.0249489009774,
"count": 22776,
"self": 189.0249489009774
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.395000253978651e-06,
"count": 1,
"self": 1.395000253978651e-06
},
"TrainerController._save_models": {
"total": 0.15568386199993256,
"count": 1,
"self": 0.0018303540000488283,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15385350799988373,
"count": 1,
"self": 0.15385350799988373
}
}
}
}
}
}
}