ppo-PyramidsRND / run_logs /timers.json
eugeneseo's picture
First Push
992e82b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5242840647697449,
"min": 0.5080607533454895,
"max": 1.4981200695037842,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15611.08203125,
"min": 15217.435546875,
"max": 45446.96875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.34749481081962585,
"min": -0.20754402875900269,
"max": 0.3903818428516388,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 90.00115966796875,
"min": -49.18793487548828,
"max": 102.28004455566406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007121610920876265,
"min": -0.012895774096250534,
"max": 0.21408753097057343,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8444972038269043,
"min": -3.378692865371704,
"max": 51.595096588134766,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06442877443831614,
"min": 0.06442877443831614,
"max": 0.07293453921758927,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.902002842136426,
"min": 0.5068771559607081,
"max": 1.0503293978433836,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013646016535252175,
"min": 9.778299810958812e-05,
"max": 0.015344665735504238,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19104423149353045,
"min": 0.0011733959773150575,
"max": 0.23016998603256356,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.301326137685717e-06,
"min": 7.301326137685717e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010221856592760005,
"min": 0.00010221856592760005,
"max": 0.0032612438129187996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243374285714286,
"min": 0.10243374285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340724,
"min": 1.3886848,
"max": 2.3870812,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002531309114285715,
"min": 0.0002531309114285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035438327600000015,
"min": 0.0035438327600000015,
"max": 0.10872941187999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007883915677666664,
"min": 0.007883915677666664,
"max": 0.30272242426872253,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.110374815762043,
"min": 0.110374815762043,
"max": 2.1190569400787354,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 475.95,
"min": 423.2068965517241,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28557.0,
"min": 15984.0,
"max": 33144.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3906599731494984,
"min": -1.0000000521540642,
"max": 1.3906599731494984,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 83.4395983889699,
"min": -31.995601668953896,
"max": 94.42539878934622,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3906599731494984,
"min": -1.0000000521540642,
"max": 1.3906599731494984,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 83.4395983889699,
"min": -31.995601668953896,
"max": 94.42539878934622,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03897851447133386,
"min": 0.03719240275551846,
"max": 6.390565377660096,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.3387108682800317,
"min": 2.1571593598200707,
"max": 102.24904604256153,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741746654",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741748903"
},
"total": 2249.181806783,
"count": 1,
"self": 0.5890523189996202,
"children": {
"run_training.setup": {
"total": 0.021470989000135887,
"count": 1,
"self": 0.021470989000135887
},
"TrainerController.start_learning": {
"total": 2248.571283475,
"count": 1,
"self": 1.8407774730294477,
"children": {
"TrainerController._reset_env": {
"total": 2.291252269000097,
"count": 1,
"self": 2.291252269000097
},
"TrainerController.advance": {
"total": 2244.346701587971,
"count": 63482,
"self": 1.9068028878787118,
"children": {
"env_step": {
"total": 1532.601012258075,
"count": 63482,
"self": 1350.0843178650393,
"children": {
"SubprocessEnvManager._take_step": {
"total": 181.51924469305277,
"count": 63482,
"self": 5.422844384035443,
"children": {
"TorchPolicy.evaluate": {
"total": 176.09640030901733,
"count": 62563,
"self": 176.09640030901733
}
}
},
"workers": {
"total": 0.9974496999827807,
"count": 63482,
"self": 0.0,
"children": {
"worker_root": {
"total": 2242.7835007169447,
"count": 63482,
"is_parallel": true,
"self": 1022.3409031669544,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021627000000989938,
"count": 1,
"is_parallel": true,
"self": 0.0007396299999982148,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001423070000100779,
"count": 8,
"is_parallel": true,
"self": 0.001423070000100779
}
}
},
"UnityEnvironment.step": {
"total": 0.053704851000020426,
"count": 1,
"is_parallel": true,
"self": 0.000559641000108968,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005180489999929705,
"count": 1,
"is_parallel": true,
"self": 0.0005180489999929705
},
"communicator.exchange": {
"total": 0.05088570799989611,
"count": 1,
"is_parallel": true,
"self": 0.05088570799989611
},
"steps_from_proto": {
"total": 0.001741453000022375,
"count": 1,
"is_parallel": true,
"self": 0.0003899230000570242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013515299999653507,
"count": 8,
"is_parallel": true,
"self": 0.0013515299999653507
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.4425975499903,
"count": 63481,
"is_parallel": true,
"self": 34.15560580707847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.380075068978385,
"count": 63481,
"is_parallel": true,
"self": 25.380075068978385
},
"communicator.exchange": {
"total": 1055.8688392989914,
"count": 63481,
"is_parallel": true,
"self": 1055.8688392989914
},
"steps_from_proto": {
"total": 105.03807737494208,
"count": 63481,
"is_parallel": true,
"self": 22.446552717793793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.59152465714828,
"count": 507848,
"is_parallel": true,
"self": 82.59152465714828
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 709.8388864420172,
"count": 63482,
"self": 3.3183808340145333,
"children": {
"process_trajectory": {
"total": 136.72170242000448,
"count": 63482,
"self": 136.5042076200043,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21749480000016774,
"count": 2,
"self": 0.21749480000016774
}
}
},
"_update_policy": {
"total": 569.7988031879981,
"count": 443,
"self": 311.1193476370122,
"children": {
"TorchPPOOptimizer.update": {
"total": 258.67945555098595,
"count": 22809,
"self": 258.67945555098595
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.600000001024455e-07,
"count": 1,
"self": 8.600000001024455e-07
},
"TrainerController._save_models": {
"total": 0.09255128499989951,
"count": 1,
"self": 0.001525285999832704,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0910259990000668,
"count": 1,
"self": 0.0910259990000668
}
}
}
}
}
}
}