jaober's picture
First Push
87a2642
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.370951384305954,
"min": 0.36033034324645996,
"max": 1.460603952407837,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11158.2177734375,
"min": 10807.451171875,
"max": 44308.8828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5080922245979309,
"min": -0.11320599168539047,
"max": 0.6237267255783081,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 138.20108032226562,
"min": -27.282644271850586,
"max": 182.12820434570312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.08365298062562943,
"min": -0.13568054139614105,
"max": 0.39471274614334106,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 22.753610610961914,
"min": -36.90510559082031,
"max": 93.54692077636719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06509611582095245,
"min": 0.06509611582095245,
"max": 0.0734572583320029,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9113456214933344,
"min": 0.47610657875129725,
"max": 1.0500176363857463,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015794922219529755,
"min": 0.00033058679439091363,
"max": 0.01756318785677043,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22112891107341656,
"min": 0.0023141075607363954,
"max": 0.2561092455822897,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.431504665721428e-06,
"min": 7.431504665721428e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001040410653201,
"min": 0.0001040410653201,
"max": 0.0033831587722805,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247713571428571,
"min": 0.10247713571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346799,
"min": 1.327104,
"max": 2.5277194999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025746585785714287,
"min": 0.00025746585785714287,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00360452201,
"min": 0.00360452201,
"max": 0.11279917805,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01184157095849514,
"min": 0.01184157095849514,
"max": 0.47674569487571716,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16578198969364166,
"min": 0.16578198969364166,
"max": 3.3372199535369873,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 347.5487804878049,
"min": 325.23655913978496,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28499.0,
"min": 15984.0,
"max": 32475.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6284264796469585,
"min": -1.0000000521540642,
"max": 1.6758695528235124,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 135.15939781069756,
"min": -32.000001668930054,
"max": 154.17999885976315,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6284264796469585,
"min": -1.0000000521540642,
"max": 1.6758695528235124,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 135.15939781069756,
"min": -32.000001668930054,
"max": 154.17999885976315,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.043146331430411425,
"min": 0.042458049374387054,
"max": 10.38443098589778,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5811455087241484,
"min": 3.5811455087241484,
"max": 166.15089577436447,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693744561",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693746808"
},
"total": 2246.6704688709997,
"count": 1,
"self": 0.4755684889996701,
"children": {
"run_training.setup": {
"total": 0.05907389999993029,
"count": 1,
"self": 0.05907389999993029
},
"TrainerController.start_learning": {
"total": 2246.135826482,
"count": 1,
"self": 1.5695940369596428,
"children": {
"TrainerController._reset_env": {
"total": 5.05291871999998,
"count": 1,
"self": 5.05291871999998
},
"TrainerController.advance": {
"total": 2239.40776575104,
"count": 63708,
"self": 1.5103896959926715,
"children": {
"env_step": {
"total": 1583.9088696790307,
"count": 63708,
"self": 1468.2431237319704,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.72219476201963,
"count": 63708,
"self": 4.909115589026669,
"children": {
"TorchPolicy.evaluate": {
"total": 109.81307917299296,
"count": 62557,
"self": 109.81307917299296
}
}
},
"workers": {
"total": 0.9435511850406328,
"count": 63708,
"self": 0.0,
"children": {
"worker_root": {
"total": 2240.8566284768713,
"count": 63708,
"is_parallel": true,
"self": 891.618221425821,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026419190000979142,
"count": 1,
"is_parallel": true,
"self": 0.000739044000283684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019028749998142303,
"count": 8,
"is_parallel": true,
"self": 0.0019028749998142303
}
}
},
"UnityEnvironment.step": {
"total": 0.04926708199991481,
"count": 1,
"is_parallel": true,
"self": 0.0005573319997438375,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048631100003149186,
"count": 1,
"is_parallel": true,
"self": 0.00048631100003149186
},
"communicator.exchange": {
"total": 0.04629582100005791,
"count": 1,
"is_parallel": true,
"self": 0.04629582100005791
},
"steps_from_proto": {
"total": 0.0019276180000815657,
"count": 1,
"is_parallel": true,
"self": 0.0003779040000608802,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015497140000206855,
"count": 8,
"is_parallel": true,
"self": 0.0015497140000206855
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1349.2384070510502,
"count": 63707,
"is_parallel": true,
"self": 34.60348134797073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.12589989403955,
"count": 63707,
"is_parallel": true,
"self": 23.12589989403955
},
"communicator.exchange": {
"total": 1183.307322025048,
"count": 63707,
"is_parallel": true,
"self": 1183.307322025048
},
"steps_from_proto": {
"total": 108.20170378399189,
"count": 63707,
"is_parallel": true,
"self": 21.605117519099394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.5965862648925,
"count": 509656,
"is_parallel": true,
"self": 86.5965862648925
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 653.9885063760169,
"count": 63708,
"self": 2.817985639053177,
"children": {
"process_trajectory": {
"total": 111.96548317395536,
"count": 63708,
"self": 111.75257897895472,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21290419500064672,
"count": 2,
"self": 0.21290419500064672
}
}
},
"_update_policy": {
"total": 539.2050375630083,
"count": 433,
"self": 352.26552520305745,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.93951235995087,
"count": 22857,
"self": 186.93951235995087
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.44300020110677e-06,
"count": 1,
"self": 1.44300020110677e-06
},
"TrainerController._save_models": {
"total": 0.10554653100007272,
"count": 1,
"self": 0.0017096780002248124,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1038368529998479,
"count": 1,
"self": 0.1038368529998479
}
}
}
}
}
}
}