ppo-Pyramids / run_logs /timers.json
vicha-w's picture
First Push
047e61a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5706479549407959,
"min": 0.5569148063659668,
"max": 1.4732908010482788,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17064.65625,
"min": 16726.60546875,
"max": 44693.75,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41710150241851807,
"min": -0.1098051518201828,
"max": 0.49029165506362915,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 111.783203125,
"min": -26.35323715209961,
"max": 133.84962463378906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.010068372823297977,
"min": -0.032665807753801346,
"max": 0.3671211302280426,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.698323965072632,
"min": -8.950430870056152,
"max": 87.00770568847656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0660440611342589,
"min": 0.06425282879915864,
"max": 0.0722000196990263,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9246168558796246,
"min": 0.4945512807183034,
"max": 1.072795695828402,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01539184606302851,
"min": 0.00011464773926796228,
"max": 0.016791246440757263,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21548584488239914,
"min": 0.0014904206104835096,
"max": 0.2365370108697486,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.307947564049999e-06,
"min": 7.307947564049999e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010231126589669998,
"min": 0.00010231126589669998,
"max": 0.003508095230635,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243595000000003,
"min": 0.10243595000000003,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341033000000003,
"min": 1.3886848,
"max": 2.569365,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000253351405,
"min": 0.000253351405,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00354691967,
"min": 0.00354691967,
"max": 0.11695956349999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007159620523452759,
"min": 0.007159620523452759,
"max": 0.3462769091129303,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10023468732833862,
"min": 0.10023468732833862,
"max": 2.423938274383545,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 445.9130434782609,
"min": 385.41333333333336,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30768.0,
"min": 15984.0,
"max": 32678.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4050440954811432,
"min": -1.0000000521540642,
"max": 1.5612266399463017,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 95.54299849271774,
"min": -29.985001660883427,
"max": 117.09199799597263,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4050440954811432,
"min": -1.0000000521540642,
"max": 1.5612266399463017,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 95.54299849271774,
"min": -29.985001660883427,
"max": 117.09199799597263,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03332353120370445,
"min": 0.030121950620862967,
"max": 6.777860017493367,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.266000121851903,
"min": 2.2591462965647224,
"max": 108.44576027989388,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714328424",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714330793"
},
"total": 2368.708698303,
"count": 1,
"self": 0.8443999939995592,
"children": {
"run_training.setup": {
"total": 0.06650962399999116,
"count": 1,
"self": 0.06650962399999116
},
"TrainerController.start_learning": {
"total": 2367.7977886850003,
"count": 1,
"self": 1.8403310149064964,
"children": {
"TrainerController._reset_env": {
"total": 2.1499797780002154,
"count": 1,
"self": 2.1499797780002154
},
"TrainerController.advance": {
"total": 2363.675211750093,
"count": 63513,
"self": 1.8201161778847563,
"children": {
"env_step": {
"total": 1705.9606027451368,
"count": 63513,
"self": 1549.341743096163,
"children": {
"SubprocessEnvManager._take_step": {
"total": 155.51973191697152,
"count": 63513,
"self": 5.444127072974425,
"children": {
"TorchPolicy.evaluate": {
"total": 150.0756048439971,
"count": 62553,
"self": 150.0756048439971
}
}
},
"workers": {
"total": 1.0991277320022164,
"count": 63513,
"self": 0.0,
"children": {
"worker_root": {
"total": 2361.41158659907,
"count": 63513,
"is_parallel": true,
"self": 956.178787054097,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022164530000736704,
"count": 1,
"is_parallel": true,
"self": 0.0006586420004168758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015578109996567946,
"count": 8,
"is_parallel": true,
"self": 0.0015578109996567946
}
}
},
"UnityEnvironment.step": {
"total": 0.050535034999938944,
"count": 1,
"is_parallel": true,
"self": 0.0006712840004183818,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004539049996310496,
"count": 1,
"is_parallel": true,
"self": 0.0004539049996310496
},
"communicator.exchange": {
"total": 0.04752324699984456,
"count": 1,
"is_parallel": true,
"self": 0.04752324699984456
},
"steps_from_proto": {
"total": 0.0018865990000449528,
"count": 1,
"is_parallel": true,
"self": 0.0003843210001832631,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015022779998616898,
"count": 8,
"is_parallel": true,
"self": 0.0015022779998616898
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1405.2327995449727,
"count": 63512,
"is_parallel": true,
"self": 39.00432505395747,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.209978816948478,
"count": 63512,
"is_parallel": true,
"self": 27.209978816948478
},
"communicator.exchange": {
"total": 1224.8473647030373,
"count": 63512,
"is_parallel": true,
"self": 1224.8473647030373
},
"steps_from_proto": {
"total": 114.17113097102947,
"count": 63512,
"is_parallel": true,
"self": 24.22715978717497,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.9439711838545,
"count": 508096,
"is_parallel": true,
"self": 89.9439711838545
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.8944928270712,
"count": 63513,
"self": 3.204140425114929,
"children": {
"process_trajectory": {
"total": 138.9253717389729,
"count": 63513,
"self": 138.68310654197285,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24226519700005156,
"count": 2,
"self": 0.24226519700005156
}
}
},
"_update_policy": {
"total": 513.7649806629834,
"count": 452,
"self": 300.719082884978,
"children": {
"TorchPPOOptimizer.update": {
"total": 213.04589777800538,
"count": 22782,
"self": 213.04589777800538
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4470006135525182e-06,
"count": 1,
"self": 1.4470006135525182e-06
},
"TrainerController._save_models": {
"total": 0.13226469500023086,
"count": 1,
"self": 0.0023537779998150654,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1299109170004158,
"count": 1,
"self": 0.1299109170004158
}
}
}
}
}
}
}