ppo-PyramidsRND / run_logs /timers.json
liajun's picture
PyramidsRND
da311d9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.29347652196884155,
"min": 0.29347652196884155,
"max": 1.5022066831588745,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8743.2529296875,
"min": 8743.2529296875,
"max": 45570.94140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989912.0,
"min": 29931.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989912.0,
"min": 29931.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6002259254455566,
"min": -0.10934381932020187,
"max": 0.6595625281333923,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 169.2637176513672,
"min": -26.35186004638672,
"max": 189.95401000976562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0015624576481059194,
"min": -0.0015624576481059194,
"max": 0.24648569524288177,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.4406130611896515,
"min": -0.4406130611896515,
"max": 59.15656661987305,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06716271334379499,
"min": 0.06442514481438723,
"max": 0.07233393183109459,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0074407001569248,
"min": 0.49283350202664417,
"max": 1.0503861283570728,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0146781556140114,
"min": 0.0005518404154420483,
"max": 0.017098561761529362,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22017233421017102,
"min": 0.006016654179987774,
"max": 0.24150884354101287,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.539017487026663e-06,
"min": 7.539017487026663e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011308526230539994,
"min": 0.00011308526230539994,
"max": 0.0031359227546924997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251297333333334,
"min": 0.10251297333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376946,
"min": 1.3886848,
"max": 2.5273634,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002610460359999999,
"min": 0.0002610460359999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003915690539999998,
"min": 0.003915690539999998,
"max": 0.10455621925,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01058043073862791,
"min": 0.01049977820366621,
"max": 0.3788689076900482,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1587064564228058,
"min": 0.1469969004392624,
"max": 2.6520824432373047,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 316.26,
"min": 275.688679245283,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31626.0,
"min": 16858.0,
"max": 32762.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6637299814075233,
"min": -0.9999806972280625,
"max": 1.686569796980552,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 166.37299814075232,
"min": -30.99940161406994,
"max": 178.7763984799385,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6637299814075233,
"min": -0.9999806972280625,
"max": 1.686569796980552,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 166.37299814075232,
"min": -30.99940161406994,
"max": 178.7763984799385,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0348160317534348,
"min": 0.03058682974561926,
"max": 6.983203866025981,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.48160317534348,
"min": 3.1855211621295894,
"max": 118.71446572244167,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1764166903",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1764169061"
},
"total": 2158.775338717,
"count": 1,
"self": 0.9097885549999774,
"children": {
"run_training.setup": {
"total": 0.02703091700004734,
"count": 1,
"self": 0.02703091700004734
},
"TrainerController.start_learning": {
"total": 2157.838519245,
"count": 1,
"self": 1.3006092700229601,
"children": {
"TrainerController._reset_env": {
"total": 3.108287515999905,
"count": 1,
"self": 3.108287515999905
},
"TrainerController.advance": {
"total": 2153.3200228879773,
"count": 63890,
"self": 1.3428594089523358,
"children": {
"env_step": {
"total": 1501.502342313047,
"count": 63890,
"self": 1357.0570100970258,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.66729275300713,
"count": 63890,
"self": 4.343767452082602,
"children": {
"TorchPolicy.evaluate": {
"total": 139.32352530092453,
"count": 62557,
"self": 139.32352530092453
}
}
},
"workers": {
"total": 0.7780394630141245,
"count": 63890,
"self": 0.0,
"children": {
"worker_root": {
"total": 2151.479580719987,
"count": 63890,
"is_parallel": true,
"self": 905.9421898040187,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005402501000048687,
"count": 1,
"is_parallel": true,
"self": 0.004058358000065709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001344142999982978,
"count": 8,
"is_parallel": true,
"self": 0.001344142999982978
}
}
},
"UnityEnvironment.step": {
"total": 0.0672274330000846,
"count": 1,
"is_parallel": true,
"self": 0.0005725920002532803,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004550379999272991,
"count": 1,
"is_parallel": true,
"self": 0.0004550379999272991
},
"communicator.exchange": {
"total": 0.062473771999975725,
"count": 1,
"is_parallel": true,
"self": 0.062473771999975725
},
"steps_from_proto": {
"total": 0.003726030999928298,
"count": 1,
"is_parallel": true,
"self": 0.0003481990002001112,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003377831999728187,
"count": 8,
"is_parallel": true,
"self": 0.003377831999728187
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1245.5373909159684,
"count": 63889,
"is_parallel": true,
"self": 33.44459041577329,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.3293146840723,
"count": 63889,
"is_parallel": true,
"self": 22.3293146840723
},
"communicator.exchange": {
"total": 1088.7754206010482,
"count": 63889,
"is_parallel": true,
"self": 1088.7754206010482
},
"steps_from_proto": {
"total": 100.98806521507458,
"count": 63889,
"is_parallel": true,
"self": 20.853978268138462,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.13408694693612,
"count": 511112,
"is_parallel": true,
"self": 80.13408694693612
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.4748211659781,
"count": 63890,
"self": 2.4683614989886564,
"children": {
"process_trajectory": {
"total": 120.06988337598409,
"count": 63890,
"self": 119.81654965898406,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25333371700003227,
"count": 2,
"self": 0.25333371700003227
}
}
},
"_update_policy": {
"total": 527.9365762910054,
"count": 445,
"self": 293.42228434597985,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.51429194502555,
"count": 22839,
"self": 234.51429194502555
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1560000530153047e-06,
"count": 1,
"self": 1.1560000530153047e-06
},
"TrainerController._save_models": {
"total": 0.10959841499970935,
"count": 1,
"self": 0.001973135999378428,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10762527900033092,
"count": 1,
"self": 0.10762527900033092
}
}
}
}
}
}
}