ppo-Pyramids / run_logs /timers.json
apalombit's picture
=init
32ea693 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35806092619895935,
"min": 0.339580774307251,
"max": 1.481310248374939,
"count": 37
},
"Pyramids.Policy.Entropy.sum": {
"value": 10701.724609375,
"min": 10226.279296875,
"max": 44937.02734375,
"count": 37
},
"Pyramids.Step.mean": {
"value": 1109999.0,
"min": 29952.0,
"max": 1109999.0,
"count": 37
},
"Pyramids.Step.sum": {
"value": 1109999.0,
"min": 29952.0,
"max": 1109999.0,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6619495153427124,
"min": -0.15645715594291687,
"max": 0.7064971327781677,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 182.6980743408203,
"min": -37.080345153808594,
"max": 203.47117614746094,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013017060235142708,
"min": -0.007959118112921715,
"max": 0.32368749380111694,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.5927085876464844,
"min": -2.1330437660217285,
"max": 76.71393585205078,
"count": 37
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06970367031670026,
"min": 0.06366284428772309,
"max": 0.07284661074633172,
"count": 37
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9758513844338036,
"min": 0.4823735696019407,
"max": 1.065766130041904,
"count": 37
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01336823227942588,
"min": 0.0002890888737953617,
"max": 0.016741622307953157,
"count": 37
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18715525191196233,
"min": 0.003758155359339702,
"max": 0.23438271231134422,
"count": 37
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00025892110744296876,
"min": 0.00025892110744296876,
"max": 0.0002993938287734857,
"count": 37
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0036248955042015624,
"min": 0.0020884176038608003,
"max": 0.004322798834067075,
"count": 37
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.18630703124999998,
"min": 0.18630703124999998,
"max": 0.19979794285714286,
"count": 37
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.6082984375,
"min": 1.3961392000000001,
"max": 2.940932925,
"count": 37
},
"Pyramids.Policy.Beta.mean": {
"value": 0.008632072421875001,
"min": 0.008632072421875001,
"max": 0.009979814491428571,
"count": 37
},
"Pyramids.Policy.Beta.sum": {
"value": 0.12084901390625001,
"min": 0.06961430608,
"max": 0.1440991992075,
"count": 37
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012595540843904018,
"min": 0.01165219396352768,
"max": 0.34735098481178284,
"count": 37
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1763375699520111,
"min": 0.1631307154893875,
"max": 2.4314568042755127,
"count": 37
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 312.44086021505376,
"min": 272.64,
"max": 999.0,
"count": 37
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29057.0,
"min": 15984.0,
"max": 34697.0,
"count": 37
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6445333195790168,
"min": -1.0000000521540642,
"max": 1.7273599864542484,
"count": 37
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 152.94159872084856,
"min": -32.000001668930054,
"max": 177.80679866671562,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6445333195790168,
"min": -1.0000000521540642,
"max": 1.7273599864542484,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 152.94159872084856,
"min": -32.000001668930054,
"max": 177.80679866671562,
"count": 37
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04068384490255016,
"min": 0.038076556946325585,
"max": 6.593175041489303,
"count": 37
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.783597575937165,
"min": 3.687539422826376,
"max": 105.49080066382885,
"count": 37
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746734179",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746738857"
},
"total": 4677.675218562999,
"count": 1,
"self": 1.3604283009999563,
"children": {
"run_training.setup": {
"total": 0.03367517900005623,
"count": 1,
"self": 0.03367517900005623
},
"TrainerController.start_learning": {
"total": 4676.281115082999,
"count": 1,
"self": 3.4943962300958447,
"children": {
"TrainerController._reset_env": {
"total": 4.64862111399998,
"count": 1,
"self": 4.64862111399998
},
"TrainerController.advance": {
"total": 4667.837350419904,
"count": 72049,
"self": 3.900093922744418,
"children": {
"env_step": {
"total": 3140.158064409073,
"count": 72049,
"self": 2895.056847966129,
"children": {
"SubprocessEnvManager._take_step": {
"total": 243.12717542901657,
"count": 72049,
"self": 11.204983215079778,
"children": {
"TorchPolicy.evaluate": {
"total": 231.9221922139368,
"count": 70215,
"self": 231.9221922139368
}
}
},
"workers": {
"total": 1.9740410139272626,
"count": 72048,
"self": 0.0,
"children": {
"worker_root": {
"total": 4664.358516483033,
"count": 72048,
"is_parallel": true,
"self": 2030.563331331055,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006048692000035771,
"count": 1,
"is_parallel": true,
"self": 0.004340985999988334,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017077060000474376,
"count": 8,
"is_parallel": true,
"self": 0.0017077060000474376
}
}
},
"UnityEnvironment.step": {
"total": 0.13653325500001756,
"count": 1,
"is_parallel": true,
"self": 0.0008534310001095946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005332879999286888,
"count": 1,
"is_parallel": true,
"self": 0.0005332879999286888
},
"communicator.exchange": {
"total": 0.12310162500000388,
"count": 1,
"is_parallel": true,
"self": 0.12310162500000388
},
"steps_from_proto": {
"total": 0.0120449109999754,
"count": 1,
"is_parallel": true,
"self": 0.004124887000102717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007920023999872683,
"count": 8,
"is_parallel": true,
"self": 0.007920023999872683
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2633.795185151978,
"count": 72047,
"is_parallel": true,
"self": 64.4391144639194,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 40.403225075992395,
"count": 72047,
"is_parallel": true,
"self": 40.403225075992395
},
"communicator.exchange": {
"total": 2363.6174830059613,
"count": 72047,
"is_parallel": true,
"self": 2363.6174830059613
},
"steps_from_proto": {
"total": 165.33536260610492,
"count": 72047,
"is_parallel": true,
"self": 37.80448978611764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 127.53087281998728,
"count": 576376,
"is_parallel": true,
"self": 127.53087281998728
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1523.7791920880863,
"count": 72048,
"self": 7.298866861093757,
"children": {
"process_trajectory": {
"total": 241.67032660299708,
"count": 72048,
"self": 241.09262229199692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5777043110001614,
"count": 2,
"self": 0.5777043110001614
}
}
},
"_update_policy": {
"total": 1274.8099986239956,
"count": 508,
"self": 489.98250759700113,
"children": {
"TorchPPOOptimizer.update": {
"total": 784.8274910269945,
"count": 25584,
"self": 784.8274910269945
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.101999598380644e-06,
"count": 1,
"self": 2.101999598380644e-06
},
"TrainerController._save_models": {
"total": 0.3007452169995304,
"count": 1,
"self": 0.0048413839995191665,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2959038330000112,
"count": 1,
"self": 0.2959038330000112
}
}
}
}
}
}
}