Pyramids / run_logs /timers.json
Hawk91's picture
Mean Score of 1.742
d2382fa
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3727378249168396,
"min": 0.3500407934188843,
"max": 1.448949933052063,
"count": 40
},
"Pyramids.Policy.Entropy.sum": {
"value": 10991.29296875,
"min": 10568.431640625,
"max": 43955.34375,
"count": 40
},
"Pyramids.Step.mean": {
"value": 1199969.0,
"min": 29952.0,
"max": 1199969.0,
"count": 40
},
"Pyramids.Step.sum": {
"value": 1199969.0,
"min": 29952.0,
"max": 1199969.0,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6842027306556702,
"min": -0.10395296663045883,
"max": 0.7050032615661621,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 196.36618041992188,
"min": -24.948711395263672,
"max": 202.3359375,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.021814122796058655,
"min": 0.004625443369150162,
"max": 0.22501064836978912,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.260653495788574,
"min": 1.2673715353012085,
"max": 54.00255584716797,
"count": 40
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06820339822055152,
"min": 0.06575424749912206,
"max": 0.0728266060399446,
"count": 40
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9548475750877212,
"min": 0.4942122602719217,
"max": 1.0799974787137385,
"count": 40
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01770647350626643,
"min": 0.0001677883654474058,
"max": 0.01770647350626643,
"count": 40
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24789062908773,
"min": 0.0020134603853688695,
"max": 0.26198252593918503,
"count": 40
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00018156871090568096,
"min": 0.00018156871090568096,
"max": 0.00029838354339596195,
"count": 40
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0025419619526795335,
"min": 0.0020886848037717336,
"max": 0.003927308090897333,
"count": 40
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16052289047619048,
"min": 0.16052289047619048,
"max": 0.19946118095238097,
"count": 40
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.247320466666667,
"min": 1.3962282666666668,
"max": 2.7525654333333343,
"count": 40
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006056236758571429,
"min": 0.006056236758571429,
"max": 0.009946171977142856,
"count": 40
},
"Pyramids.Policy.Beta.sum": {
"value": 0.08478731462000001,
"min": 0.06962320384,
"max": 0.13091935640000002,
"count": 40
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008340010419487953,
"min": 0.008340010419487953,
"max": 0.3860357403755188,
"count": 40
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11676014959812164,
"min": 0.11676014959812164,
"max": 2.7022502422332764,
"count": 40
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 258.87719298245617,
"min": 258.87719298245617,
"max": 999.0,
"count": 40
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29512.0,
"min": 15984.0,
"max": 32470.0,
"count": 40
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7420265284259762,
"min": -1.0000000521540642,
"max": 1.7420265284259762,
"count": 40
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 196.84899771213531,
"min": -31.998401671648026,
"max": 196.84899771213531,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7420265284259762,
"min": -1.0000000521540642,
"max": 1.7420265284259762,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 196.84899771213531,
"min": -31.998401671648026,
"max": 196.84899771213531,
"count": 40
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.022404033346081036,
"min": 0.022404033346081036,
"max": 7.82018069177866,
"count": 40
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.531655768107157,
"min": 2.531655768107157,
"max": 125.12289106845856,
"count": 40
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677140563",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677143258"
},
"total": 2694.732394388,
"count": 1,
"self": 0.3392702930004816,
"children": {
"run_training.setup": {
"total": 0.10602526399998169,
"count": 1,
"self": 0.10602526399998169
},
"TrainerController.start_learning": {
"total": 2694.2870988309996,
"count": 1,
"self": 1.6445071220905447,
"children": {
"TrainerController._reset_env": {
"total": 6.4523384210001495,
"count": 1,
"self": 6.4523384210001495
},
"TrainerController.advance": {
"total": 2686.048270196909,
"count": 77792,
"self": 1.7915910298170274,
"children": {
"env_step": {
"total": 1787.0869560109522,
"count": 77792,
"self": 1646.9183388157953,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.14214180304816,
"count": 77792,
"self": 5.741004645988596,
"children": {
"TorchPolicy.evaluate": {
"total": 133.40113715705957,
"count": 76061,
"self": 44.82925655898316,
"children": {
"TorchPolicy.sample_actions": {
"total": 88.5718805980764,
"count": 76061,
"self": 88.5718805980764
}
}
}
}
},
"workers": {
"total": 1.0264753921087504,
"count": 77791,
"self": 0.0,
"children": {
"worker_root": {
"total": 2688.4260324488782,
"count": 77791,
"is_parallel": true,
"self": 1184.705385187906,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019777120000981085,
"count": 1,
"is_parallel": true,
"self": 0.0007212129994513816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001256499000646727,
"count": 8,
"is_parallel": true,
"self": 0.001256499000646727
}
}
},
"UnityEnvironment.step": {
"total": 0.08285423999996055,
"count": 1,
"is_parallel": true,
"self": 0.0005538749996958359,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005083540004306997,
"count": 1,
"is_parallel": true,
"self": 0.0005083540004306997
},
"communicator.exchange": {
"total": 0.08002825999983543,
"count": 1,
"is_parallel": true,
"self": 0.08002825999983543
},
"steps_from_proto": {
"total": 0.0017637509999985923,
"count": 1,
"is_parallel": true,
"self": 0.0004401500000312808,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013236009999673115,
"count": 8,
"is_parallel": true,
"self": 0.0013236009999673115
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1503.7206472609723,
"count": 77790,
"is_parallel": true,
"self": 37.4480420421437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.668153776905456,
"count": 77790,
"is_parallel": true,
"self": 28.668153776905456
},
"communicator.exchange": {
"total": 1322.148492987008,
"count": 77790,
"is_parallel": true,
"self": 1322.148492987008
},
"steps_from_proto": {
"total": 115.45595845491516,
"count": 77790,
"is_parallel": true,
"self": 27.878334041896778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.57762441301838,
"count": 622320,
"is_parallel": true,
"self": 87.57762441301838
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 897.1697231561398,
"count": 77791,
"self": 3.0829499581718665,
"children": {
"process_trajectory": {
"total": 199.4822866009572,
"count": 77791,
"self": 199.29614934395704,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18613725700015493,
"count": 2,
"self": 0.18613725700015493
}
}
},
"_update_policy": {
"total": 694.6044865970107,
"count": 549,
"self": 267.5196682960159,
"children": {
"TorchPPOOptimizer.update": {
"total": 427.0848183009948,
"count": 27726,
"self": 427.0848183009948
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3960006981506012e-06,
"count": 1,
"self": 1.3960006981506012e-06
},
"TrainerController._save_models": {
"total": 0.14198169499923097,
"count": 1,
"self": 0.0018931889999294071,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14008850599930156,
"count": 1,
"self": 0.14008850599930156
}
}
}
}
}
}
}