ppo-Pyramids / run_logs /timers.json
shreyansjain's picture
First training of Pyramids
65f3835
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34700387716293335,
"min": 0.34180089831352234,
"max": 1.3792285919189453,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10299.0751953125,
"min": 10299.0751953125,
"max": 41840.27734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.43152767419815063,
"min": -0.16772426664829254,
"max": 0.43152767419815063,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 116.08094787597656,
"min": -39.75065231323242,
"max": 116.08094787597656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019006283953785896,
"min": -0.010160038247704506,
"max": 0.5549631714820862,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.112690448760986,
"min": -2.6416099071502686,
"max": 131.52627563476562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06547052290292062,
"min": 0.06547052290292062,
"max": 0.07466839108937641,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9165873206408887,
"min": 0.5103641323465586,
"max": 1.063162622843782,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013479220921202878,
"min": 0.0004773754546382908,
"max": 0.01592075733567052,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1887090928968403,
"min": 0.0038190036371063265,
"max": 0.21927461551711214,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.431311808642856e-06,
"min": 7.431311808642856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010403836532099998,
"min": 0.00010403836532099998,
"max": 0.0031434995521669,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247707142857145,
"min": 0.10247707142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346790000000003,
"min": 1.3691136000000002,
"max": 2.3478331,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025745943571428575,
"min": 0.00025745943571428575,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036044321000000003,
"min": 0.0036044321000000003,
"max": 0.10480852669,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016726713627576828,
"min": 0.016726713627576828,
"max": 0.5968450903892517,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.234173983335495,
"min": 0.234173983335495,
"max": 4.177915573120117,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 444.8970588235294,
"min": 376.2564102564103,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30253.0,
"min": 15984.0,
"max": 33555.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4080058558679678,
"min": -1.0000000521540642,
"max": 1.5215569390337678,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 95.74439819902182,
"min": -32.000001668930054,
"max": 120.20299818366766,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4080058558679678,
"min": -1.0000000521540642,
"max": 1.5215569390337678,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 95.74439819902182,
"min": -32.000001668930054,
"max": 120.20299818366766,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07700357998563799,
"min": 0.06590843772665964,
"max": 12.842302318662405,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.236243439023383,
"min": 5.023702655977104,
"max": 205.47683709859848,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679890189",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Traninig --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679892265"
},
"total": 2076.139238699,
"count": 1,
"self": 0.4438965399999688,
"children": {
"run_training.setup": {
"total": 0.10433556800001043,
"count": 1,
"self": 0.10433556800001043
},
"TrainerController.start_learning": {
"total": 2075.591006591,
"count": 1,
"self": 1.4530121850339128,
"children": {
"TrainerController._reset_env": {
"total": 9.593828909999957,
"count": 1,
"self": 9.593828909999957
},
"TrainerController.advance": {
"total": 2064.4514415179665,
"count": 63442,
"self": 1.3956657740177434,
"children": {
"env_step": {
"total": 1445.1524357679587,
"count": 63442,
"self": 1336.9493644039003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.35611200102517,
"count": 63442,
"self": 4.640017256032252,
"children": {
"TorchPolicy.evaluate": {
"total": 102.71609474499292,
"count": 62565,
"self": 102.71609474499292
}
}
},
"workers": {
"total": 0.8469593630331929,
"count": 63442,
"self": 0.0,
"children": {
"worker_root": {
"total": 2070.9416741259906,
"count": 63442,
"is_parallel": true,
"self": 848.0677640339502,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006841789999953107,
"count": 1,
"is_parallel": true,
"self": 0.00465866699983053,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021831230001225777,
"count": 8,
"is_parallel": true,
"self": 0.0021831230001225777
}
}
},
"UnityEnvironment.step": {
"total": 0.0442490310000494,
"count": 1,
"is_parallel": true,
"self": 0.0005436950000330398,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004623519999995551,
"count": 1,
"is_parallel": true,
"self": 0.0004623519999995551
},
"communicator.exchange": {
"total": 0.041514281999980085,
"count": 1,
"is_parallel": true,
"self": 0.041514281999980085
},
"steps_from_proto": {
"total": 0.0017287020000367193,
"count": 1,
"is_parallel": true,
"self": 0.00036849500008884206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013602069999478772,
"count": 8,
"is_parallel": true,
"self": 0.0013602069999478772
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1222.8739100920404,
"count": 63441,
"is_parallel": true,
"self": 30.894538918031458,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.219181797991723,
"count": 63441,
"is_parallel": true,
"self": 22.219181797991723
},
"communicator.exchange": {
"total": 1078.4065936439856,
"count": 63441,
"is_parallel": true,
"self": 1078.4065936439856
},
"steps_from_proto": {
"total": 91.35359573203164,
"count": 63441,
"is_parallel": true,
"self": 19.431429184918443,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.92216654711319,
"count": 507528,
"is_parallel": true,
"self": 71.92216654711319
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 617.90333997599,
"count": 63442,
"self": 2.4936698299552518,
"children": {
"process_trajectory": {
"total": 115.66890117003868,
"count": 63442,
"self": 115.46750665003879,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20139451999989433,
"count": 2,
"self": 0.20139451999989433
}
}
},
"_update_policy": {
"total": 499.74076897599605,
"count": 432,
"self": 320.32691189401714,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.4138570819789,
"count": 22899,
"self": 179.4138570819789
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.960000741353724e-07,
"count": 1,
"self": 8.960000741353724e-07
},
"TrainerController._save_models": {
"total": 0.09272308199979307,
"count": 1,
"self": 0.001782173999799852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09094090799999321,
"count": 1,
"self": 0.09094090799999321
}
}
}
}
}
}
}