ppo-Pyramids / run_logs /timers.json
BachNgoH's picture
training pyramids using ppo
1a1c508
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3369864821434021,
"min": 0.3369864821434021,
"max": 1.4255772829055786,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10125.76953125,
"min": 10125.76953125,
"max": 43246.3125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.571969747543335,
"min": -0.09670498222112656,
"max": 0.5902937650680542,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.867431640625,
"min": -23.30590057373047,
"max": 167.05313110351562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0003978550957981497,
"min": -0.012243017554283142,
"max": 0.30374136567115784,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.11259299516677856,
"min": -3.4035589694976807,
"max": 73.20166778564453,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07224562679416127,
"min": 0.06418935314001169,
"max": 0.07394406515093233,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0114387751182579,
"min": 0.4904311826217106,
"max": 1.0569209165890547,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01620872739570151,
"min": 0.00038326661446603654,
"max": 0.017580594809586313,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2269221835398211,
"min": 0.004215932759126402,
"max": 0.24612832733420836,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.65423316290714e-06,
"min": 7.65423316290714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010715926428069997,
"min": 0.00010715926428069997,
"max": 0.0032603156132281997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255137857142858,
"min": 0.10255137857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357193000000001,
"min": 1.3886848,
"max": 2.4437452000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002648827192857143,
"min": 0.0002648827192857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00370835807,
"min": 0.00370835807,
"max": 0.10869850281999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011723242700099945,
"min": 0.011704780161380768,
"max": 0.4499180018901825,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16412539780139923,
"min": 0.16386692225933075,
"max": 3.149425983428955,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 342.1123595505618,
"min": 335.20238095238096,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30448.0,
"min": 15984.0,
"max": 32730.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.613808869322141,
"min": -1.0000000521540642,
"max": 1.613808869322141,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.2427982389927,
"min": -31.996801659464836,
"max": 147.93919824063778,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.613808869322141,
"min": -1.0000000521540642,
"max": 1.613808869322141,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.2427982389927,
"min": -31.996801659464836,
"max": 147.93919824063778,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04129651653412212,
"min": 0.041107160606603514,
"max": 8.68898061197251,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.716686488070991,
"min": 3.5763229727745056,
"max": 139.02368979156017,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675764870",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675767225"
},
"total": 2355.473320643,
"count": 1,
"self": 0.4800105480003367,
"children": {
"run_training.setup": {
"total": 0.10679358700008379,
"count": 1,
"self": 0.10679358700008379
},
"TrainerController.start_learning": {
"total": 2354.8865165079997,
"count": 1,
"self": 1.60636151304152,
"children": {
"TrainerController._reset_env": {
"total": 6.151862176000122,
"count": 1,
"self": 6.151862176000122
},
"TrainerController.advance": {
"total": 2347.040954792959,
"count": 63920,
"self": 1.6566444918248635,
"children": {
"env_step": {
"total": 1588.2469839510386,
"count": 63920,
"self": 1460.0569028600676,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.1748961460462,
"count": 63920,
"self": 5.139217148047464,
"children": {
"TorchPolicy.evaluate": {
"total": 122.03567899799873,
"count": 62568,
"self": 40.599583920993155,
"children": {
"TorchPolicy.sample_actions": {
"total": 81.43609507700558,
"count": 62568,
"self": 81.43609507700558
}
}
}
}
},
"workers": {
"total": 1.0151849449248402,
"count": 63920,
"self": 0.0,
"children": {
"worker_root": {
"total": 2348.870186385817,
"count": 63920,
"is_parallel": true,
"self": 1017.1737908208333,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019313200000397046,
"count": 1,
"is_parallel": true,
"self": 0.000702570000612468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012287499994272366,
"count": 8,
"is_parallel": true,
"self": 0.0012287499994272366
}
}
},
"UnityEnvironment.step": {
"total": 0.10217383300005167,
"count": 1,
"is_parallel": true,
"self": 0.0005509830002665694,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004986489998373145,
"count": 1,
"is_parallel": true,
"self": 0.0004986489998373145
},
"communicator.exchange": {
"total": 0.0992353980000189,
"count": 1,
"is_parallel": true,
"self": 0.0992353980000189
},
"steps_from_proto": {
"total": 0.0018888029999288847,
"count": 1,
"is_parallel": true,
"self": 0.0005245930005912669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013642099993376178,
"count": 8,
"is_parallel": true,
"self": 0.0013642099993376178
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1331.6963955649835,
"count": 63919,
"is_parallel": true,
"self": 32.752466958849254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.32895019908574,
"count": 63919,
"is_parallel": true,
"self": 25.32895019908574
},
"communicator.exchange": {
"total": 1161.6084107010697,
"count": 63919,
"is_parallel": true,
"self": 1161.6084107010697
},
"steps_from_proto": {
"total": 112.0065677059788,
"count": 63919,
"is_parallel": true,
"self": 25.879246797146152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.12732090883264,
"count": 511352,
"is_parallel": true,
"self": 86.12732090883264
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 757.1373263500955,
"count": 63920,
"self": 3.023136538233757,
"children": {
"process_trajectory": {
"total": 175.6339810128652,
"count": 63920,
"self": 175.43753098686557,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19645002599963846,
"count": 2,
"self": 0.19645002599963846
}
}
},
"_update_policy": {
"total": 578.4802087989965,
"count": 448,
"self": 222.86723063498903,
"children": {
"TorchPPOOptimizer.update": {
"total": 355.6129781640075,
"count": 22815,
"self": 355.6129781640075
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1209995136596262e-06,
"count": 1,
"self": 1.1209995136596262e-06
},
"TrainerController._save_models": {
"total": 0.08733690499957447,
"count": 1,
"self": 0.0017186259992740816,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08561827900030039,
"count": 1,
"self": 0.08561827900030039
}
}
}
}
}
}
}