ppo-PyramidsRND / run_logs /timers.json
globophobe's picture
First Push
b60fb11
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5155686736106873,
"min": 0.5128698945045471,
"max": 1.480726718902588,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15343.32421875,
"min": 15343.32421875,
"max": 44919.32421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989928.0,
"min": 29940.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989928.0,
"min": 29940.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2655923068523407,
"min": -0.0993962213397026,
"max": 0.363382488489151,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 68.78840637207031,
"min": -23.556903839111328,
"max": 96.65974426269531,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -1.3531920909881592,
"min": -1.3531920909881592,
"max": 0.4172818064689636,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -350.47674560546875,
"min": -350.47674560546875,
"max": 110.99696350097656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06800666713942995,
"min": 0.06348549001060226,
"max": 0.07338918411350322,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9520933399520193,
"min": 0.5047890518088798,
"max": 1.0706449622036112,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.3558263771812476,
"min": 0.00020599436135512432,
"max": 0.3558263771812476,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 4.9815692805374665,
"min": 0.002677926697616616,
"max": 4.9815692805374665,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.345040408828575e-06,
"min": 7.345040408828575e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010283056572360005,
"min": 0.00010283056572360005,
"max": 0.003508346030551399,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244831428571431,
"min": 0.10244831428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342764000000003,
"min": 1.3886848,
"max": 2.5694486000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002545865971428573,
"min": 0.0002545865971428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035642123600000022,
"min": 0.0035642123600000022,
"max": 0.11696791514,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010971345007419586,
"min": 0.01027912087738514,
"max": 0.4431288242340088,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1535988301038742,
"min": 0.1498798429965973,
"max": 3.1019017696380615,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 543.574074074074,
"min": 452.46153846153845,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29353.0,
"min": 16867.0,
"max": 32542.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0859444124831095,
"min": -0.9999936006722911,
"max": 1.3936461298511578,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 58.640998274087906,
"min": -31.998401656746864,
"max": 90.58699844032526,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0859444124831095,
"min": -0.9999936006722911,
"max": 1.3936461298511578,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 58.640998274087906,
"min": -31.998401656746864,
"max": 90.58699844032526,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06160674647091784,
"min": 0.05156582837651233,
"max": 9.068151594084853,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3267643094295636,
"min": 3.111286702682264,
"max": 154.15857709944248,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681004747",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681006824"
},
"total": 2077.360403674,
"count": 1,
"self": 0.483012795999457,
"children": {
"run_training.setup": {
"total": 0.1141264840000531,
"count": 1,
"self": 0.1141264840000531
},
"TrainerController.start_learning": {
"total": 2076.763264394,
"count": 1,
"self": 1.368847760889821,
"children": {
"TrainerController._reset_env": {
"total": 3.9053872270000056,
"count": 1,
"self": 3.9053872270000056
},
"TrainerController.advance": {
"total": 2071.39178355411,
"count": 63353,
"self": 1.4502579250661256,
"children": {
"env_step": {
"total": 1443.406999009006,
"count": 63353,
"self": 1337.2951103130113,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.31902369003024,
"count": 63353,
"self": 4.759278668032948,
"children": {
"TorchPolicy.evaluate": {
"total": 100.55974502199729,
"count": 62567,
"self": 100.55974502199729
}
}
},
"workers": {
"total": 0.7928650059646998,
"count": 63353,
"self": 0.0,
"children": {
"worker_root": {
"total": 2071.7996064910144,
"count": 63353,
"is_parallel": true,
"self": 844.2455680059734,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017289159999336334,
"count": 1,
"is_parallel": true,
"self": 0.0005661100000224906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011628059999111429,
"count": 8,
"is_parallel": true,
"self": 0.0011628059999111429
}
}
},
"UnityEnvironment.step": {
"total": 0.05158070300001327,
"count": 1,
"is_parallel": true,
"self": 0.0013342060000240963,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005451399999856221,
"count": 1,
"is_parallel": true,
"self": 0.0005451399999856221
},
"communicator.exchange": {
"total": 0.048098138999989715,
"count": 1,
"is_parallel": true,
"self": 0.048098138999989715
},
"steps_from_proto": {
"total": 0.0016032180000138396,
"count": 1,
"is_parallel": true,
"self": 0.00037989500003732246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012233229999765172,
"count": 8,
"is_parallel": true,
"self": 0.0012233229999765172
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1227.554038485041,
"count": 63352,
"is_parallel": true,
"self": 32.942668925014914,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.574204810056244,
"count": 63352,
"is_parallel": true,
"self": 23.574204810056244
},
"communicator.exchange": {
"total": 1075.9845709729962,
"count": 63352,
"is_parallel": true,
"self": 1075.9845709729962
},
"steps_from_proto": {
"total": 95.05259377697382,
"count": 63352,
"is_parallel": true,
"self": 19.70514538996747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.34744838700635,
"count": 506816,
"is_parallel": true,
"self": 75.34744838700635
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 626.5345266200377,
"count": 63353,
"self": 2.5457139629531866,
"children": {
"process_trajectory": {
"total": 104.43931524008565,
"count": 63353,
"self": 104.22606468308561,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2132505570000376,
"count": 2,
"self": 0.2132505570000376
}
}
},
"_update_policy": {
"total": 519.5494974169989,
"count": 449,
"self": 332.6558949279877,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.8936024890112,
"count": 22821,
"self": 186.8936024890112
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.830000635702163e-07,
"count": 1,
"self": 9.830000635702163e-07
},
"TrainerController._save_models": {
"total": 0.097244868999951,
"count": 1,
"self": 0.0014881339998282783,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09575673500012272,
"count": 1,
"self": 0.09575673500012272
}
}
}
}
}
}
}