worsty's picture
First Push
eb89e20
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.26649004220962524,
"min": 0.26649004220962524,
"max": 1.5135557651519775,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7973.38232421875,
"min": 7973.38232421875,
"max": 45915.2265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5051019787788391,
"min": -0.17653048038482666,
"max": 0.6611797213554382,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 137.3877410888672,
"min": -41.83772277832031,
"max": 193.06448364257812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022577596828341484,
"min": -0.04366803169250488,
"max": 0.35309648513793945,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.141106128692627,
"min": -11.659364700317383,
"max": 83.68386840820312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06906367288902401,
"min": 0.06387797875749861,
"max": 0.07273810475612401,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0359550933353603,
"min": 0.5091667332928681,
"max": 1.0492193176126903,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015242325414955203,
"min": 0.00037106482315939275,
"max": 0.01708136777354715,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22863488122432804,
"min": 0.004452777877912713,
"max": 0.2391391488296601,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.620477459873331e-06,
"min": 7.620477459873331e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011430716189809998,
"min": 0.00011430716189809998,
"max": 0.0033311520896159995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254012666666666,
"min": 0.10254012666666666,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5381019,
"min": 1.3886848,
"max": 2.4843195,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026375865399999994,
"min": 0.00026375865399999994,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003956379809999999,
"min": 0.003956379809999999,
"max": 0.11104736160000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01127812173217535,
"min": 0.01127812173217535,
"max": 0.41321805119514465,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16917182505130768,
"min": 0.16466549038887024,
"max": 2.892526388168335,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 360.6904761904762,
"min": 279.0373831775701,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30298.0,
"min": 15984.0,
"max": 33084.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4709156351276191,
"min": -1.0000000521540642,
"max": 1.6835644667811482,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.08599771559238,
"min": -29.995601549744606,
"max": 180.14139794558287,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4709156351276191,
"min": -1.0000000521540642,
"max": 1.6835644667811482,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.08599771559238,
"min": -29.995601549744606,
"max": 180.14139794558287,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04244995389185588,
"min": 0.03658826292975749,
"max": 8.311285859905183,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.523346173024038,
"min": 3.523346173024038,
"max": 132.98057375848293,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682812876",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682815108"
},
"total": 2232.110722017,
"count": 1,
"self": 0.9289113670006373,
"children": {
"run_training.setup": {
"total": 0.03660877699985576,
"count": 1,
"self": 0.03660877699985576
},
"TrainerController.start_learning": {
"total": 2231.1452018729997,
"count": 1,
"self": 1.3155358589274329,
"children": {
"TrainerController._reset_env": {
"total": 3.8610737270000755,
"count": 1,
"self": 3.8610737270000755
},
"TrainerController.advance": {
"total": 2225.875774834073,
"count": 64005,
"self": 1.3523520021781223,
"children": {
"env_step": {
"total": 1591.9768877009728,
"count": 64005,
"self": 1483.3077816811065,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.89719774087553,
"count": 64005,
"self": 4.725693300847524,
"children": {
"TorchPolicy.evaluate": {
"total": 103.171504440028,
"count": 62567,
"self": 103.171504440028
}
}
},
"workers": {
"total": 0.7719082789908498,
"count": 64005,
"self": 0.0,
"children": {
"worker_root": {
"total": 2226.299219842935,
"count": 64005,
"is_parallel": true,
"self": 855.4931650939234,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0015549939998891205,
"count": 1,
"is_parallel": true,
"self": 0.00041579299977456685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011392010001145536,
"count": 8,
"is_parallel": true,
"self": 0.0011392010001145536
}
}
},
"UnityEnvironment.step": {
"total": 0.044578513000033126,
"count": 1,
"is_parallel": true,
"self": 0.0004971179996573483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004745430001094064,
"count": 1,
"is_parallel": true,
"self": 0.0004745430001094064
},
"communicator.exchange": {
"total": 0.041888282999934745,
"count": 1,
"is_parallel": true,
"self": 0.041888282999934745
},
"steps_from_proto": {
"total": 0.0017185690003316267,
"count": 1,
"is_parallel": true,
"self": 0.0003644059993348492,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013541630009967776,
"count": 8,
"is_parallel": true,
"self": 0.0013541630009967776
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1370.8060547490118,
"count": 64004,
"is_parallel": true,
"self": 29.709298047051107,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.61486570998659,
"count": 64004,
"is_parallel": true,
"self": 21.61486570998659
},
"communicator.exchange": {
"total": 1225.0163940020193,
"count": 64004,
"is_parallel": true,
"self": 1225.0163940020193
},
"steps_from_proto": {
"total": 94.46549698995477,
"count": 64004,
"is_parallel": true,
"self": 19.383735204194636,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.08176178576014,
"count": 512032,
"is_parallel": true,
"self": 75.08176178576014
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 632.5465351309222,
"count": 64005,
"self": 2.5616451688188135,
"children": {
"process_trajectory": {
"total": 107.78708269911158,
"count": 64005,
"self": 107.569841469111,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2172412300005817,
"count": 2,
"self": 0.2172412300005817
}
}
},
"_update_policy": {
"total": 522.1978072629918,
"count": 449,
"self": 334.95471420098374,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.24309306200803,
"count": 22785,
"self": 187.24309306200803
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.469995347899385e-07,
"count": 1,
"self": 9.469995347899385e-07
},
"TrainerController._save_models": {
"total": 0.09281650599950808,
"count": 1,
"self": 0.0014039559991942951,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09141255000031379,
"count": 1,
"self": 0.09141255000031379
}
}
}
}
}
}
}