PyramidsRND / run_logs /timers.json
zgerem's picture
First Push
145efaa verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4913412928581238,
"min": 0.4913412928581238,
"max": 1.439706563949585,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14810.9921875,
"min": 14810.9921875,
"max": 43674.9375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989993.0,
"min": 29982.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989993.0,
"min": 29982.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.37251564860343933,
"min": -0.1004050001502037,
"max": 0.4193427562713623,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 98.71664428710938,
"min": -24.19760513305664,
"max": 112.14228820800781,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014797584153711796,
"min": -0.038920387625694275,
"max": 0.45125487446784973,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.9213597774505615,
"min": -9.885778427124023,
"max": 107.3986587524414,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07138498777637606,
"min": 0.06538585452937071,
"max": 0.07294094735663662,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9993898288692649,
"min": 0.5056615078995063,
"max": 1.036158256601387,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013427846303318251,
"min": 0.00013116746736695312,
"max": 0.016707347589015053,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1879898482464555,
"min": 0.0018363445431373439,
"max": 0.23390286624621073,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.621404602421429e-06,
"min": 7.621404602421429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001066996644339,
"min": 0.0001066996644339,
"max": 0.0033820706726432,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254043571428571,
"min": 0.10254043571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355661,
"min": 1.3886848,
"max": 2.5273568000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002637895278571429,
"min": 0.0002637895278571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00369305339,
"min": 0.00369305339,
"max": 0.11276294432,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012463592924177647,
"min": 0.01187005452811718,
"max": 0.39912012219429016,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1744903028011322,
"min": 0.16618075966835022,
"max": 2.7938408851623535,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 440.8888888888889,
"min": 403.7837837837838,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27776.0,
"min": 16781.0,
"max": 33476.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.336844422159687,
"min": -0.9999742455059483,
"max": 1.522850676531523,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 84.22119859606028,
"min": -30.999201610684395,
"max": 112.118398450315,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.336844422159687,
"min": -0.9999742455059483,
"max": 1.522850676531523,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 84.22119859606028,
"min": -30.999201610684395,
"max": 112.118398450315,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05540553152206398,
"min": 0.054392140985598685,
"max": 7.816912961795049,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.490548485890031,
"min": 3.490548485890031,
"max": 132.88752035051584,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743352000",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743354093"
},
"total": 2093.824530574,
"count": 1,
"self": 0.4790714040000239,
"children": {
"run_training.setup": {
"total": 0.01985053900011735,
"count": 1,
"self": 0.01985053900011735
},
"TrainerController.start_learning": {
"total": 2093.325608631,
"count": 1,
"self": 1.2803749259469441,
"children": {
"TrainerController._reset_env": {
"total": 2.8496536909999577,
"count": 1,
"self": 2.8496536909999577
},
"TrainerController.advance": {
"total": 2089.113223817053,
"count": 63463,
"self": 1.3799092570925495,
"children": {
"env_step": {
"total": 1422.3375799709568,
"count": 63463,
"self": 1275.0725231447923,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.51980230110598,
"count": 63463,
"self": 4.503664570074761,
"children": {
"TorchPolicy.evaluate": {
"total": 142.01613773103122,
"count": 62552,
"self": 142.01613773103122
}
}
},
"workers": {
"total": 0.745254525058499,
"count": 63463,
"self": 0.0,
"children": {
"worker_root": {
"total": 2088.5943062750334,
"count": 63463,
"is_parallel": true,
"self": 920.9232588840498,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002515399999992951,
"count": 1,
"is_parallel": true,
"self": 0.0007670340003187448,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017483659996742063,
"count": 8,
"is_parallel": true,
"self": 0.0017483659996742063
}
}
},
"UnityEnvironment.step": {
"total": 0.046445869999843126,
"count": 1,
"is_parallel": true,
"self": 0.0005415250000169181,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004546879999907105,
"count": 1,
"is_parallel": true,
"self": 0.0004546879999907105
},
"communicator.exchange": {
"total": 0.043894980999994004,
"count": 1,
"is_parallel": true,
"self": 0.043894980999994004
},
"steps_from_proto": {
"total": 0.0015546759998414927,
"count": 1,
"is_parallel": true,
"self": 0.0003299809993677627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00122469500047373,
"count": 8,
"is_parallel": true,
"self": 0.00122469500047373
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1167.6710473909836,
"count": 63462,
"is_parallel": true,
"self": 30.810180833009554,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.346455694029373,
"count": 63462,
"is_parallel": true,
"self": 22.346455694029373
},
"communicator.exchange": {
"total": 1022.9814092159768,
"count": 63462,
"is_parallel": true,
"self": 1022.9814092159768
},
"steps_from_proto": {
"total": 91.53300164796792,
"count": 63462,
"is_parallel": true,
"self": 18.05196841886027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.48103322910765,
"count": 507696,
"is_parallel": true,
"self": 73.48103322910765
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 665.3957345890037,
"count": 63463,
"self": 2.3748772450144315,
"children": {
"process_trajectory": {
"total": 124.00513473099295,
"count": 63463,
"self": 123.80023607199269,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20489865900026416,
"count": 2,
"self": 0.20489865900026416
}
}
},
"_update_policy": {
"total": 539.0157226129963,
"count": 446,
"self": 294.37971499597984,
"children": {
"TorchPPOOptimizer.update": {
"total": 244.6360076170165,
"count": 22869,
"self": 244.6360076170165
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.750001481734216e-07,
"count": 1,
"self": 9.750001481734216e-07
},
"TrainerController._save_models": {
"total": 0.08235522199993284,
"count": 1,
"self": 0.0014555510001628136,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08089967099977002,
"count": 1,
"self": 0.08089967099977002
}
}
}
}
}
}
}