ppo-Pyramids / run_logs /timers.json
arnonl's picture
push2
0102f39
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36294257640838623,
"min": 0.36294257640838623,
"max": 1.4506468772888184,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10911.505859375,
"min": 10911.505859375,
"max": 44006.82421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989919.0,
"min": 29952.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989919.0,
"min": 29952.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5275699496269226,
"min": -0.18417397141456604,
"max": 0.6649685502052307,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 146.13687133789062,
"min": -43.64923095703125,
"max": 188.18609619140625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03552060201764107,
"min": -0.025781646370887756,
"max": 0.28118646144866943,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.83920669555664,
"min": -6.935262680053711,
"max": 66.64118957519531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07127303847491497,
"min": 0.06344443568813905,
"max": 0.07306796977383212,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9978225386488095,
"min": 0.484442425395152,
"max": 1.0407982482865918,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013435025654741495,
"min": 0.0008379345040933509,
"max": 0.015341344526839546,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18809035916638092,
"min": 0.007271818532627685,
"max": 0.23012016790259318,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.370776114535718e-06,
"min": 7.370776114535718e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010319086560350006,
"min": 0.00010319086560350006,
"max": 0.0033800297733235,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245689285714286,
"min": 0.10245689285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343965,
"min": 1.3691136000000002,
"max": 2.5266765000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025544359642857157,
"min": 0.00025544359642857157,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003576210350000002,
"min": 0.003576210350000002,
"max": 0.11269498234999997,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009307344444096088,
"min": 0.009307344444096088,
"max": 0.342073917388916,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1303028166294098,
"min": 0.1303028166294098,
"max": 2.394517421722412,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 349.98809523809524,
"min": 309.77777777777777,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29399.0,
"min": 15984.0,
"max": 32862.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5522626319743065,
"min": -1.0000000521540642,
"max": 1.6617113450034098,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 128.83779845386744,
"min": -32.000001668930054,
"max": 165.16039817780256,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5522626319743065,
"min": -1.0000000521540642,
"max": 1.6617113450034098,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 128.83779845386744,
"min": -32.000001668930054,
"max": 165.16039817780256,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.034119358636373186,
"min": 0.032897544274803917,
"max": 6.553097438067198,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8319067668189746,
"min": 2.8319067668189746,
"max": 104.84955900907516,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674121480",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674123559"
},
"total": 2079.672300736,
"count": 1,
"self": 0.43824554500042723,
"children": {
"run_training.setup": {
"total": 0.10615037600018695,
"count": 1,
"self": 0.10615037600018695
},
"TrainerController.start_learning": {
"total": 2079.1279048149995,
"count": 1,
"self": 1.1655877949524438,
"children": {
"TrainerController._reset_env": {
"total": 6.087503411999933,
"count": 1,
"self": 6.087503411999933
},
"TrainerController.advance": {
"total": 2071.7936006260475,
"count": 64139,
"self": 1.1570579520503088,
"children": {
"env_step": {
"total": 1429.831025229008,
"count": 64139,
"self": 1333.1188601079589,
"children": {
"SubprocessEnvManager._take_step": {
"total": 96.00557914202182,
"count": 64139,
"self": 4.10894005907403,
"children": {
"TorchPolicy.evaluate": {
"total": 91.89663908294779,
"count": 62570,
"self": 31.363026755957435,
"children": {
"TorchPolicy.sample_actions": {
"total": 60.53361232699035,
"count": 62570,
"self": 60.53361232699035
}
}
}
}
},
"workers": {
"total": 0.7065859790272953,
"count": 64139,
"self": 0.0,
"children": {
"worker_root": {
"total": 2075.121054828942,
"count": 64139,
"is_parallel": true,
"self": 835.6394470789792,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019353149996277352,
"count": 1,
"is_parallel": true,
"self": 0.0006868189998385787,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012484959997891565,
"count": 8,
"is_parallel": true,
"self": 0.0012484959997891565
}
}
},
"UnityEnvironment.step": {
"total": 0.046480992999931914,
"count": 1,
"is_parallel": true,
"self": 0.0006309650002549461,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042325200001869234,
"count": 1,
"is_parallel": true,
"self": 0.00042325200001869234
},
"communicator.exchange": {
"total": 0.04374660399980712,
"count": 1,
"is_parallel": true,
"self": 0.04374660399980712
},
"steps_from_proto": {
"total": 0.0016801719998511544,
"count": 1,
"is_parallel": true,
"self": 0.0004423479995239177,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012378240003272367,
"count": 8,
"is_parallel": true,
"self": 0.0012378240003272367
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.4816077499627,
"count": 64138,
"is_parallel": true,
"self": 27.2880592378674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.614151576971835,
"count": 64138,
"is_parallel": true,
"self": 21.614151576971835
},
"communicator.exchange": {
"total": 1094.1258006210742,
"count": 64138,
"is_parallel": true,
"self": 1094.1258006210742
},
"steps_from_proto": {
"total": 96.45359631404926,
"count": 64138,
"is_parallel": true,
"self": 20.695645612175667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.7579507018736,
"count": 513104,
"is_parallel": true,
"self": 75.7579507018736
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 640.8055174449892,
"count": 64139,
"self": 2.1292097989598915,
"children": {
"process_trajectory": {
"total": 142.16351680503067,
"count": 64139,
"self": 141.98513321303108,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17838359199959086,
"count": 2,
"self": 0.17838359199959086
}
}
},
"_update_policy": {
"total": 496.5127908409986,
"count": 451,
"self": 185.57491433003997,
"children": {
"TorchPPOOptimizer.update": {
"total": 310.93787651095863,
"count": 22812,
"self": 310.93787651095863
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.739997040014714e-07,
"count": 1,
"self": 9.739997040014714e-07
},
"TrainerController._save_models": {
"total": 0.08121200799996586,
"count": 1,
"self": 0.0013904809993618983,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07982152700060396,
"count": 1,
"self": 0.07982152700060396
}
}
}
}
}
}
}