TokyoNights's picture
First Push
d19c121
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5402947068214417,
"min": 0.5402947068214417,
"max": 1.4567368030548096,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16096.4599609375,
"min": 16096.4599609375,
"max": 44191.56640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989922.0,
"min": 29952.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989922.0,
"min": 29952.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48381948471069336,
"min": -0.11222109943628311,
"max": 0.516444981098175,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 131.11508178710938,
"min": -26.933063507080078,
"max": 143.5717010498047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.002095951233059168,
"min": -0.0030446392484009266,
"max": 0.41008633375167847,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.5680027604103088,
"min": -0.8068293929100037,
"max": 98.83080291748047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0657983328469234,
"min": 0.06422752184850399,
"max": 0.07664175539790084,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9211766598569275,
"min": 0.5364922877853059,
"max": 1.0563879498307975,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01657337732037532,
"min": 0.00011156696450884036,
"max": 0.01657337732037532,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23202728248525448,
"min": 0.0014503705386149247,
"max": 0.23202728248525448,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.595111754042859e-06,
"min": 7.595111754042859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010633156455660002,
"min": 0.00010633156455660002,
"max": 0.0033762124745958994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253167142857142,
"min": 0.10253167142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354433999999998,
"min": 1.3886848,
"max": 2.4254041,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002629139757142857,
"min": 0.0002629139757142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036807956599999997,
"min": 0.0036807956599999997,
"max": 0.11255786959000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009117186069488525,
"min": 0.009117186069488525,
"max": 0.6731635928153992,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12764060497283936,
"min": 0.12764060497283936,
"max": 4.7121453285217285,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 349.9259259259259,
"min": 349.9259259259259,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28344.0,
"min": 15984.0,
"max": 33054.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5317949797026813,
"min": -1.0000000521540642,
"max": 1.5856328897083862,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.5435983762145,
"min": -29.892801702022552,
"max": 125.26499828696251,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5317949797026813,
"min": -1.0000000521540642,
"max": 1.5856328897083862,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.5435983762145,
"min": -29.892801702022552,
"max": 125.26499828696251,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.032540400068683084,
"min": 0.032540400068683084,
"max": 13.512209890410304,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6032320054946467,
"min": 2.6032320054946467,
"max": 216.19535824656487,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685503595",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685507049"
},
"total": 3454.329192923,
"count": 1,
"self": 0.6271926839999651,
"children": {
"run_training.setup": {
"total": 0.08290582000017821,
"count": 1,
"self": 0.08290582000017821
},
"TrainerController.start_learning": {
"total": 3453.619094419,
"count": 1,
"self": 2.379310211013035,
"children": {
"TrainerController._reset_env": {
"total": 1.4022995929999524,
"count": 1,
"self": 1.4022995929999524
},
"TrainerController.advance": {
"total": 3449.727839599987,
"count": 63629,
"self": 2.311375003944704,
"children": {
"env_step": {
"total": 2259.611601991033,
"count": 63629,
"self": 2116.738596180985,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.47448493409638,
"count": 63629,
"self": 7.261908253080037,
"children": {
"TorchPolicy.evaluate": {
"total": 134.21257668101634,
"count": 62561,
"self": 134.21257668101634
}
}
},
"workers": {
"total": 1.3985208759513625,
"count": 63629,
"self": 0.0,
"children": {
"worker_root": {
"total": 3446.622361202999,
"count": 63629,
"is_parallel": true,
"self": 1504.75343406816,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002207785000337026,
"count": 1,
"is_parallel": true,
"self": 0.0006573270002263598,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001550458000110666,
"count": 8,
"is_parallel": true,
"self": 0.001550458000110666
}
}
},
"UnityEnvironment.step": {
"total": 0.07521966900003463,
"count": 1,
"is_parallel": true,
"self": 0.0011329810004099272,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000381746999664756,
"count": 1,
"is_parallel": true,
"self": 0.000381746999664756
},
"communicator.exchange": {
"total": 0.06330328000012742,
"count": 1,
"is_parallel": true,
"self": 0.06330328000012742
},
"steps_from_proto": {
"total": 0.01040166099983253,
"count": 1,
"is_parallel": true,
"self": 0.0005143589992258057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.009887302000606724,
"count": 8,
"is_parallel": true,
"self": 0.009887302000606724
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1941.868927134839,
"count": 63628,
"is_parallel": true,
"self": 46.360143823896124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.909887424945737,
"count": 63628,
"is_parallel": true,
"self": 26.909887424945737
},
"communicator.exchange": {
"total": 1725.4528307489659,
"count": 63628,
"is_parallel": true,
"self": 1725.4528307489659
},
"steps_from_proto": {
"total": 143.1460651370312,
"count": 63628,
"is_parallel": true,
"self": 29.85303827701182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 113.29302686001938,
"count": 509024,
"is_parallel": true,
"self": 113.29302686001938
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1187.8048626050095,
"count": 63629,
"self": 4.397731097893484,
"children": {
"process_trajectory": {
"total": 149.42335614911826,
"count": 63629,
"self": 149.11963096911768,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30372518000058335,
"count": 2,
"self": 0.30372518000058335
}
}
},
"_update_policy": {
"total": 1033.9837753579977,
"count": 444,
"self": 420.432722890097,
"children": {
"TorchPPOOptimizer.update": {
"total": 613.5510524679007,
"count": 22800,
"self": 613.5510524679007
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.002999852062203e-06,
"count": 1,
"self": 1.002999852062203e-06
},
"TrainerController._save_models": {
"total": 0.10964401199998974,
"count": 1,
"self": 0.001869048999651568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10777496300033818,
"count": 1,
"self": 0.10777496300033818
}
}
}
}
}
}
}