Pyramids / run_logs /timers.json
agoel3705's picture
first_push
d2f77ba
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2097523957490921,
"min": 0.2097523957490921,
"max": 1.493598222732544,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 6265.7236328125,
"min": 6265.7236328125,
"max": 45309.796875,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499872.0,
"min": 29952.0,
"max": 1499872.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499872.0,
"min": 29952.0,
"max": 1499872.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7323671579360962,
"min": -0.1035352349281311,
"max": 0.7629164457321167,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 216.04830932617188,
"min": -25.055526733398438,
"max": 228.1120147705078,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.024602683261036873,
"min": -0.017261844128370285,
"max": 0.22436076402664185,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.257791519165039,
"min": -4.988673210144043,
"max": 53.84658432006836,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06738698095848251,
"min": 0.06407559036786918,
"max": 0.07424134721520527,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9434177334187551,
"min": 0.5104810356422751,
"max": 1.058787802117877,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017023766551513084,
"min": 0.0009166898601802243,
"max": 0.017023766551513084,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2383327317211832,
"min": 0.00641682902126157,
"max": 0.24216461378576543,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.160256089471428e-06,
"min": 3.160256089471428e-06,
"max": 0.00029676708679192377,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.4243585252599994e-05,
"min": 4.4243585252599994e-05,
"max": 0.0034729651423449997,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10105338571428572,
"min": 0.10105338571428572,
"max": 0.19892236190476195,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4147474000000002,
"min": 1.3794090666666667,
"max": 2.5343560666666667,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00011523323285714286,
"min": 0.00011523323285714286,
"max": 0.009892343954285714,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00161326526,
"min": 0.00161326526,
"max": 0.1157797345,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009697283618152142,
"min": 0.009697283618152142,
"max": 0.36767488718032837,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13576197624206543,
"min": 0.13576197624206543,
"max": 2.5737242698669434,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 252.35593220338984,
"min": 236.8796992481203,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29778.0,
"min": 15984.0,
"max": 33111.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7476440568344067,
"min": -1.0000000521540642,
"max": 1.7476440568344067,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 206.22199870646,
"min": -32.000001668930054,
"max": 228.83399833738804,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7476440568344067,
"min": -1.0000000521540642,
"max": 1.7476440568344067,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 206.22199870646,
"min": -32.000001668930054,
"max": 228.83399833738804,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.025343465129450217,
"min": 0.024217579453485967,
"max": 6.98977246042341,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9905288852751255,
"min": 2.9114665780653013,
"max": 111.83635936677456,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697462316",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697464661"
},
"total": 2344.444770566,
"count": 1,
"self": 0.3218462660001933,
"children": {
"run_training.setup": {
"total": 0.04271605299993553,
"count": 1,
"self": 0.04271605299993553
},
"TrainerController.start_learning": {
"total": 2344.080208247,
"count": 1,
"self": 1.8659408659927976,
"children": {
"TrainerController._reset_env": {
"total": 8.417073597000012,
"count": 1,
"self": 8.417073597000012
},
"TrainerController.advance": {
"total": 2333.728816466007,
"count": 96582,
"self": 1.8694936790971042,
"children": {
"env_step": {
"total": 1533.4879481329328,
"count": 96582,
"self": 1359.780631651915,
"children": {
"SubprocessEnvManager._take_step": {
"total": 172.53304720501353,
"count": 96582,
"self": 6.34598913101695,
"children": {
"TorchPolicy.evaluate": {
"total": 166.18705807399658,
"count": 93806,
"self": 166.18705807399658
}
}
},
"workers": {
"total": 1.174269276004452,
"count": 96582,
"self": 0.0,
"children": {
"worker_root": {
"total": 2341.102420435947,
"count": 96582,
"is_parallel": true,
"self": 1118.400173227919,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051776069999505125,
"count": 1,
"is_parallel": true,
"self": 0.0038569949998645825,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00132061200008593,
"count": 8,
"is_parallel": true,
"self": 0.00132061200008593
}
}
},
"UnityEnvironment.step": {
"total": 0.035204239999984566,
"count": 1,
"is_parallel": true,
"self": 0.000330158999872765,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030958700006067374,
"count": 1,
"is_parallel": true,
"self": 0.00030958700006067374
},
"communicator.exchange": {
"total": 0.033601556000007804,
"count": 1,
"is_parallel": true,
"self": 0.033601556000007804
},
"steps_from_proto": {
"total": 0.000962938000043323,
"count": 1,
"is_parallel": true,
"self": 0.0002212020000342818,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007417360000090412,
"count": 8,
"is_parallel": true,
"self": 0.0007417360000090412
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1222.702247208028,
"count": 96581,
"is_parallel": true,
"self": 30.89930734996142,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.555562532013482,
"count": 96581,
"is_parallel": true,
"self": 21.555562532013482
},
"communicator.exchange": {
"total": 1082.0822710419789,
"count": 96581,
"is_parallel": true,
"self": 1082.0822710419789
},
"steps_from_proto": {
"total": 88.16510628407411,
"count": 96581,
"is_parallel": true,
"self": 19.367069528034335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.79803675603978,
"count": 772648,
"is_parallel": true,
"self": 68.79803675603978
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 798.3713746539771,
"count": 96582,
"self": 3.7253303899469756,
"children": {
"process_trajectory": {
"total": 153.23227832303076,
"count": 96582,
"self": 152.92836154303075,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30391678000000866,
"count": 3,
"self": 0.30391678000000866
}
}
},
"_update_policy": {
"total": 641.4137659409994,
"count": 679,
"self": 373.8677256929635,
"children": {
"TorchPPOOptimizer.update": {
"total": 267.54604024803587,
"count": 34176,
"self": 267.54604024803587
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.160000010713702e-06,
"count": 1,
"self": 1.160000010713702e-06
},
"TrainerController._save_models": {
"total": 0.06837615799986452,
"count": 1,
"self": 0.001238759999978356,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06713739799988616,
"count": 1,
"self": 0.06713739799988616
}
}
}
}
}
}
}