ppo-Pyramids / run_logs /timers.json
marianokamp's picture
Initial.
cd0191c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.12530042231082916,
"min": 0.11758170276880264,
"max": 1.4623968601226807,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 3795.09912109375,
"min": 3544.3828125,
"max": 44363.26953125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999935.0,
"min": 29952.0,
"max": 2999935.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999935.0,
"min": 29952.0,
"max": 2999935.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7794694900512695,
"min": -0.12581132352352142,
"max": 0.8871976137161255,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 233.06138610839844,
"min": -30.57215118408203,
"max": 269.70806884765625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013617660850286484,
"min": -0.05305878072977066,
"max": 0.38107696175575256,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.071680545806885,
"min": -14.325870513916016,
"max": 90.31523895263672,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06668864378296128,
"min": 0.06332928630818009,
"max": 0.07527718086370218,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.933641012961458,
"min": 0.48843737269919657,
"max": 1.0776327850859766,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015113021432472973,
"min": 0.0003899888091238052,
"max": 0.018621311025483357,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2115823000546216,
"min": 0.004289876900361857,
"max": 0.26069835435676697,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4063709598142915e-06,
"min": 1.4063709598142915e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 1.968919343740008e-05,
"min": 1.968919343740008e-05,
"max": 0.004027250757583133,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10046875714285715,
"min": 0.10046875714285715,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4065626,
"min": 1.3962282666666668,
"max": 2.842416866666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.682883857142878e-05,
"min": 5.682883857142878e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0007956037400000029,
"min": 0.0007956037400000029,
"max": 0.13425744498,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009158189408481121,
"min": 0.008886491879820824,
"max": 0.5615325570106506,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12821465730667114,
"min": 0.12441088259220123,
"max": 3.930727958679199,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 233.84297520661158,
"min": 202.21333333333334,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28295.0,
"min": 15984.0,
"max": 33385.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7496247829730847,
"min": -1.0000000521540642,
"max": 1.784449318051338,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 211.70459873974323,
"min": -31.998801663517952,
"max": 267.66739770770073,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7496247829730847,
"min": -1.0000000521540642,
"max": 1.784449318051338,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 211.70459873974323,
"min": -31.998801663517952,
"max": 267.66739770770073,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.022597352704966503,
"min": 0.020750649035678057,
"max": 11.320017603226006,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.734279677300947,
"min": 2.734279677300947,
"max": 181.1202816516161,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673611398",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673622172"
},
"total": 10773.972994929001,
"count": 1,
"self": 0.7415345670015085,
"children": {
"run_training.setup": {
"total": 0.14499570099997072,
"count": 1,
"self": 0.14499570099997072
},
"TrainerController.start_learning": {
"total": 10773.086464660999,
"count": 1,
"self": 7.441989713466683,
"children": {
"TrainerController._reset_env": {
"total": 4.440407903999585,
"count": 1,
"self": 4.440407903999585
},
"TrainerController.advance": {
"total": 10761.080140587532,
"count": 195464,
"self": 6.910499891524523,
"children": {
"env_step": {
"total": 7218.539935791099,
"count": 195464,
"self": 6813.396876323788,
"children": {
"SubprocessEnvManager._take_step": {
"total": 400.39649747449675,
"count": 195464,
"self": 19.26538600265485,
"children": {
"TorchPolicy.evaluate": {
"total": 381.1311114718419,
"count": 187564,
"self": 84.6899272079936,
"children": {
"TorchPolicy.sample_actions": {
"total": 296.4411842638483,
"count": 187564,
"self": 296.4411842638483
}
}
}
}
},
"workers": {
"total": 4.74656199281435,
"count": 195464,
"self": 0.0,
"children": {
"worker_root": {
"total": 10753.83121954867,
"count": 195464,
"is_parallel": true,
"self": 4427.332200703235,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029062350004096515,
"count": 1,
"is_parallel": true,
"self": 0.0010541669998929137,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018520680005167378,
"count": 8,
"is_parallel": true,
"self": 0.0018520680005167378
}
}
},
"UnityEnvironment.step": {
"total": 0.06552483300038148,
"count": 1,
"is_parallel": true,
"self": 0.0006672150011581834,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005307530000209226,
"count": 1,
"is_parallel": true,
"self": 0.0005307530000209226
},
"communicator.exchange": {
"total": 0.0622894769994673,
"count": 1,
"is_parallel": true,
"self": 0.0622894769994673
},
"steps_from_proto": {
"total": 0.002037387999735074,
"count": 1,
"is_parallel": true,
"self": 0.0004959360003340407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015414519994010334,
"count": 8,
"is_parallel": true,
"self": 0.0015414519994010334
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6326.499018845436,
"count": 195463,
"is_parallel": true,
"self": 128.74677668654658,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.80999634612635,
"count": 195463,
"is_parallel": true,
"self": 78.80999634612635
},
"communicator.exchange": {
"total": 5730.876744883976,
"count": 195463,
"is_parallel": true,
"self": 5730.876744883976
},
"steps_from_proto": {
"total": 388.0655009287866,
"count": 195463,
"is_parallel": true,
"self": 97.15597599610737,
"children": {
"_process_rank_one_or_two_observation": {
"total": 290.90952493267923,
"count": 1563704,
"is_parallel": true,
"self": 290.90952493267923
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3535.629704904909,
"count": 195464,
"self": 14.978971290142908,
"children": {
"process_trajectory": {
"total": 620.741367486773,
"count": 195464,
"self": 620.1138219647719,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6275455220011281,
"count": 6,
"self": 0.6275455220011281
}
}
},
"_update_policy": {
"total": 2899.909366127993,
"count": 1401,
"self": 734.9283789987876,
"children": {
"TorchPPOOptimizer.update": {
"total": 2164.9809871292055,
"count": 68385,
"self": 2164.9809871292055
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8060000002151355e-06,
"count": 1,
"self": 1.8060000002151355e-06
},
"TrainerController._save_models": {
"total": 0.12392465000084485,
"count": 1,
"self": 0.0032138450023921905,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12071080499845266,
"count": 1,
"self": 0.12071080499845266
}
}
}
}
}
}
}