PyramidTraining / run_logs /timers.json
SamuelReyes's picture
First Push
ac16b94
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4401095509529114,
"min": 0.42719903588294983,
"max": 1.384224772453308,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13175.119140625,
"min": 12638.255859375,
"max": 41991.84375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989888.0,
"min": 29959.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989888.0,
"min": 29959.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3571150004863739,
"min": -0.09607616066932678,
"max": 0.40010347962379456,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 95.70681762695312,
"min": -22.866127014160156,
"max": 106.82762908935547,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027814822271466255,
"min": -0.002628646558150649,
"max": 0.3108713924884796,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.454372406005859,
"min": -0.6939626932144165,
"max": 73.98738861083984,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06379121950955043,
"min": 0.06379121950955043,
"max": 0.07260322353880232,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8930770731337059,
"min": 0.5761177049408275,
"max": 1.0890483530820347,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017464480543692065,
"min": 0.0013920487489709995,
"max": 0.017464480543692065,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2445027276116889,
"min": 0.016704584987651993,
"max": 0.2445027276116889,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.436004664221429e-06,
"min": 7.436004664221429e-06,
"max": 0.00029501175166275,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010410406529910001,
"min": 0.00010410406529910001,
"max": 0.003492330135889999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024786357142857,
"min": 0.1024786357142857,
"max": 0.19833725,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347008999999997,
"min": 1.4347008999999997,
"max": 2.572383200000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002576157078571429,
"min": 0.0002576157078571429,
"max": 0.009833891275,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036066199100000004,
"min": 0.0036066199100000004,
"max": 0.11642458900000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011781989596784115,
"min": 0.011781989596784115,
"max": 0.4199594259262085,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16494785249233246,
"min": 0.16494785249233246,
"max": 3.359675407409668,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 434.71830985915494,
"min": 431.64179104477614,
"max": 980.1875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30865.0,
"min": 16118.0,
"max": 32598.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4306833122132554,
"min": -0.8568424754070513,
"max": 1.4306833122132554,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 103.00919847935438,
"min": -28.275801688432693,
"max": 103.00919847935438,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4306833122132554,
"min": -0.8568424754070513,
"max": 1.4306833122132554,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 103.00919847935438,
"min": -28.275801688432693,
"max": 103.00919847935438,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05294481567964087,
"min": 0.05294481567964087,
"max": 8.271439988823499,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8120267289341427,
"min": 3.611602952965768,
"max": 140.61447980999947,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691854164",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691856531"
},
"total": 2367.140538169,
"count": 1,
"self": 1.092234687000655,
"children": {
"run_training.setup": {
"total": 0.04754448099993169,
"count": 1,
"self": 0.04754448099993169
},
"TrainerController.start_learning": {
"total": 2366.0007590009996,
"count": 1,
"self": 1.4693136379933094,
"children": {
"TrainerController._reset_env": {
"total": 4.11726319599984,
"count": 1,
"self": 4.11726319599984
},
"TrainerController.advance": {
"total": 2360.2564457400067,
"count": 63620,
"self": 1.4253876988746015,
"children": {
"env_step": {
"total": 1663.3963650271248,
"count": 63620,
"self": 1546.4751360310606,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.02991351905393,
"count": 63620,
"self": 4.973095398084752,
"children": {
"TorchPolicy.evaluate": {
"total": 111.05681812096918,
"count": 62559,
"self": 111.05681812096918
}
}
},
"workers": {
"total": 0.8913154770102665,
"count": 63620,
"self": 0.0,
"children": {
"worker_root": {
"total": 2360.4790948329883,
"count": 63620,
"is_parallel": true,
"self": 934.3837678999507,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016927819997363258,
"count": 1,
"is_parallel": true,
"self": 0.0005164719996173517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011763100001189741,
"count": 8,
"is_parallel": true,
"self": 0.0011763100001189741
}
}
},
"UnityEnvironment.step": {
"total": 0.0460419420001017,
"count": 1,
"is_parallel": true,
"self": 0.0005636020005113096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047328299979199073,
"count": 1,
"is_parallel": true,
"self": 0.00047328299979199073
},
"communicator.exchange": {
"total": 0.04304329399974449,
"count": 1,
"is_parallel": true,
"self": 0.04304329399974449
},
"steps_from_proto": {
"total": 0.0019617630000539066,
"count": 1,
"is_parallel": true,
"self": 0.00042235900082232547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015394039992315811,
"count": 8,
"is_parallel": true,
"self": 0.0015394039992315811
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1426.0953269330375,
"count": 63619,
"is_parallel": true,
"self": 35.49173163098885,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.405742976981855,
"count": 63619,
"is_parallel": true,
"self": 23.405742976981855
},
"communicator.exchange": {
"total": 1257.3534896480783,
"count": 63619,
"is_parallel": true,
"self": 1257.3534896480783
},
"steps_from_proto": {
"total": 109.84436267698857,
"count": 63619,
"is_parallel": true,
"self": 21.705387305205022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.13897537178354,
"count": 508952,
"is_parallel": true,
"self": 88.13897537178354
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 695.4346930140073,
"count": 63620,
"self": 2.904138178959329,
"children": {
"process_trajectory": {
"total": 116.04861135804458,
"count": 63620,
"self": 115.79287874304464,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2557326149999426,
"count": 2,
"self": 0.2557326149999426
}
}
},
"_update_policy": {
"total": 576.4819434770034,
"count": 457,
"self": 373.6968971189767,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.78504635802665,
"count": 22761,
"self": 202.78504635802665
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3489998309523799e-06,
"count": 1,
"self": 1.3489998309523799e-06
},
"TrainerController._save_models": {
"total": 0.15773507799985964,
"count": 1,
"self": 0.0018806959997164086,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15585438200014323,
"count": 1,
"self": 0.15585438200014323
}
}
}
}
}
}
}