kalypso42's picture
First Push
4091477
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4080785810947418,
"min": 0.4080785810947418,
"max": 1.2320975065231323,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12222.76953125,
"min": 12222.76953125,
"max": 37376.91015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7012405395507812,
"min": -0.19191142916679382,
"max": 0.7012405395507812,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 190.0361785888672,
"min": -46.25065612792969,
"max": 190.0361785888672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.201072558760643,
"min": -0.05749780312180519,
"max": 0.4325537383556366,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 54.49066162109375,
"min": -14.834433555603027,
"max": 102.5152359008789,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06627741552143736,
"min": 0.0650135630111377,
"max": 0.07875053462263103,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.927883817300123,
"min": 0.5512537423584172,
"max": 1.0543325000908226,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014260766986851756,
"min": 0.0006256178160617287,
"max": 0.020137641515161488,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19965073781592457,
"min": 0.008133031608802473,
"max": 0.28192698121226084,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.59948318115714e-06,
"min": 7.59948318115714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010639276453619997,
"min": 0.00010639276453619997,
"max": 0.0035081387306205004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10506625714285715,
"min": 0.10506625714285715,
"max": 0.2967670857142858,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4709276000000002,
"min": 1.4709276000000002,
"max": 3.7387589999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026305954428571427,
"min": 0.00026305954428571427,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036828336199999996,
"min": 0.0036828336199999996,
"max": 0.11696101205,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010993401519954205,
"min": 0.010993401519954205,
"max": 0.463376522064209,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1539076268672943,
"min": 0.1539076268672943,
"max": 3.243635654449463,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 387.8082191780822,
"min": 377.2345679012346,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28310.0,
"min": 15984.0,
"max": 34310.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.475175324878464,
"min": -1.0000000521540642,
"max": 1.4992715818462548,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 107.68779871612787,
"min": -29.934601679444313,
"max": 121.44099812954664,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.475175324878464,
"min": -1.0000000521540642,
"max": 1.4992715818462548,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 107.68779871612787,
"min": -29.934601679444313,
"max": 121.44099812954664,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.044458862496555895,
"min": 0.044458862496555895,
"max": 9.41978757083416,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2454969622485805,
"min": 3.2454969622485805,
"max": 150.71660113334656,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700224280",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700226501"
},
"total": 2221.3547279859995,
"count": 1,
"self": 0.4754672979993302,
"children": {
"run_training.setup": {
"total": 0.04142081000009057,
"count": 1,
"self": 0.04142081000009057
},
"TrainerController.start_learning": {
"total": 2220.837839878,
"count": 1,
"self": 1.38578868796003,
"children": {
"TrainerController._reset_env": {
"total": 3.3689679430001434,
"count": 1,
"self": 3.3689679430001434
},
"TrainerController.advance": {
"total": 2216.000478661041,
"count": 63577,
"self": 1.4560039290408895,
"children": {
"env_step": {
"total": 1581.4895533920323,
"count": 63577,
"self": 1448.03114958308,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.60081954796715,
"count": 63577,
"self": 4.59733608792385,
"children": {
"TorchPolicy.evaluate": {
"total": 128.0034834600433,
"count": 62549,
"self": 128.0034834600433
}
}
},
"workers": {
"total": 0.8575842609852771,
"count": 63577,
"self": 0.0,
"children": {
"worker_root": {
"total": 2216.0080060800124,
"count": 63577,
"is_parallel": true,
"self": 891.4087300660201,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018634230000316165,
"count": 1,
"is_parallel": true,
"self": 0.000591125000255488,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012722979997761286,
"count": 8,
"is_parallel": true,
"self": 0.0012722979997761286
}
}
},
"UnityEnvironment.step": {
"total": 0.045073990999981106,
"count": 1,
"is_parallel": true,
"self": 0.0005590480000137177,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047381900003529154,
"count": 1,
"is_parallel": true,
"self": 0.00047381900003529154
},
"communicator.exchange": {
"total": 0.04230647099984708,
"count": 1,
"is_parallel": true,
"self": 0.04230647099984708
},
"steps_from_proto": {
"total": 0.001734653000085018,
"count": 1,
"is_parallel": true,
"self": 0.0003675709999697574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013670820001152606,
"count": 8,
"is_parallel": true,
"self": 0.0013670820001152606
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1324.5992760139923,
"count": 63576,
"is_parallel": true,
"self": 34.63257074698254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.324120327986975,
"count": 63576,
"is_parallel": true,
"self": 25.324120327986975
},
"communicator.exchange": {
"total": 1162.7083945480326,
"count": 63576,
"is_parallel": true,
"self": 1162.7083945480326
},
"steps_from_proto": {
"total": 101.93419039099012,
"count": 63576,
"is_parallel": true,
"self": 20.4955127049966,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.43867768599353,
"count": 508608,
"is_parallel": true,
"self": 81.43867768599353
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 633.0549213399679,
"count": 63577,
"self": 2.5803982169070423,
"children": {
"process_trajectory": {
"total": 126.01360010105577,
"count": 63577,
"self": 125.84986732805487,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16373277300090194,
"count": 2,
"self": 0.16373277300090194
}
}
},
"_update_policy": {
"total": 504.4609230220051,
"count": 448,
"self": 301.98452709802064,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.47639592398446,
"count": 22791,
"self": 202.47639592398446
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.929991963668726e-07,
"count": 1,
"self": 8.929991963668726e-07
},
"TrainerController._save_models": {
"total": 0.0826036929993279,
"count": 1,
"self": 0.001286749999053427,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08131694300027448,
"count": 1,
"self": 0.08131694300027448
}
}
}
}
}
}
}