ppo-pyramids / run_logs /timers.json
NielsV's picture
Initial commit
1d69d6e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.15071994066238403,
"min": 0.14803652465343475,
"max": 1.5077581405639648,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4511.9521484375,
"min": 4448.20166015625,
"max": 45739.3515625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999996.0,
"min": 29952.0,
"max": 2999996.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999996.0,
"min": 29952.0,
"max": 2999996.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7497345805168152,
"min": -0.09831540286540985,
"max": 0.9040558934211731,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 227.16958618164062,
"min": -23.694011688232422,
"max": 281.161376953125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01719963736832142,
"min": -0.02216150425374508,
"max": 0.22051340341567993,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.211490154266357,
"min": -6.493320941925049,
"max": 52.26167678833008,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06911255511872963,
"min": 0.06332173679816062,
"max": 0.07434474569785698,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9675757716622149,
"min": 0.4831407433839036,
"max": 1.0876180005337421,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01555315397593554,
"min": 3.5719992444883146e-05,
"max": 0.016945322503387514,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21774415566309757,
"min": 0.000500079894228364,
"max": 0.2541798375508127,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4486709457142882e-06,
"min": 1.4486709457142882e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0281393240000036e-05,
"min": 2.0281393240000036e-05,
"max": 0.0039275934908022,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10048285714285717,
"min": 0.10048285714285717,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4067600000000002,
"min": 1.3962282666666668,
"max": 2.7091978000000005,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.823742857142865e-05,
"min": 5.823742857142865e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008153240000000011,
"min": 0.0008153240000000011,
"max": 0.13092886022000003,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0050439960323274136,
"min": 0.0050439960323274136,
"max": 0.29111671447753906,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07061594724655151,
"min": 0.07061594724655151,
"max": 2.0378170013427734,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 228.7348484848485,
"min": 187.25806451612902,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30193.0,
"min": 15984.0,
"max": 33303.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7404180338844322,
"min": -1.0000000521540642,
"max": 1.8127179357867975,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 231.47559850662947,
"min": -31.99080166220665,
"max": 282.7839979827404,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7404180338844322,
"min": -1.0000000521540642,
"max": 1.8127179357867975,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 231.47559850662947,
"min": -31.99080166220665,
"max": 282.7839979827404,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.012189188642782699,
"min": 0.010497263342865782,
"max": 5.702630968764424,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.6211620894900989,
"min": 1.5237018227489898,
"max": 91.24209550023079,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674299953",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674306611"
},
"total": 6657.8570329350005,
"count": 1,
"self": 0.4925036260010529,
"children": {
"run_training.setup": {
"total": 0.09791167499997755,
"count": 1,
"self": 0.09791167499997755
},
"TrainerController.start_learning": {
"total": 6657.266617633999,
"count": 1,
"self": 3.765038232880215,
"children": {
"TrainerController._reset_env": {
"total": 10.296631672000103,
"count": 1,
"self": 10.296631672000103
},
"TrainerController.advance": {
"total": 6643.121927204118,
"count": 195118,
"self": 4.015972863999195,
"children": {
"env_step": {
"total": 4701.885779841092,
"count": 195118,
"self": 4391.480737600168,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.0487785609198,
"count": 195118,
"self": 12.710043596010564,
"children": {
"TorchPolicy.evaluate": {
"total": 295.33873496490924,
"count": 187564,
"self": 100.17143457984548,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.16730038506375,
"count": 187564,
"self": 195.16730038506375
}
}
}
}
},
"workers": {
"total": 2.3562636800040764,
"count": 195118,
"self": 0.0,
"children": {
"worker_root": {
"total": 6646.502543591864,
"count": 195118,
"is_parallel": true,
"self": 2549.988485565814,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051770489999398706,
"count": 1,
"is_parallel": true,
"self": 0.0030662109998047526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002110838000135118,
"count": 8,
"is_parallel": true,
"self": 0.002110838000135118
}
}
},
"UnityEnvironment.step": {
"total": 0.04564486200001738,
"count": 1,
"is_parallel": true,
"self": 0.00047227799996107933,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004117720000067493,
"count": 1,
"is_parallel": true,
"self": 0.0004117720000067493
},
"communicator.exchange": {
"total": 0.04285201700008656,
"count": 1,
"is_parallel": true,
"self": 0.04285201700008656
},
"steps_from_proto": {
"total": 0.0019087949999629927,
"count": 1,
"is_parallel": true,
"self": 0.0006888450000133162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012199499999496766,
"count": 8,
"is_parallel": true,
"self": 0.0012199499999496766
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4096.51405802605,
"count": 195117,
"is_parallel": true,
"self": 82.86133542811422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 63.37426435800808,
"count": 195117,
"is_parallel": true,
"self": 63.37426435800808
},
"communicator.exchange": {
"total": 3682.352384369153,
"count": 195117,
"is_parallel": true,
"self": 3682.352384369153
},
"steps_from_proto": {
"total": 267.92607387077555,
"count": 195117,
"is_parallel": true,
"self": 62.71980149411661,
"children": {
"_process_rank_one_or_two_observation": {
"total": 205.20627237665894,
"count": 1560936,
"is_parallel": true,
"self": 205.20627237665894
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1937.2201744990266,
"count": 195118,
"self": 7.507366441980366,
"children": {
"process_trajectory": {
"total": 430.03568984804383,
"count": 195118,
"self": 429.46806292204496,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5676269259988658,
"count": 6,
"self": 0.5676269259988658
}
}
},
"_update_policy": {
"total": 1499.6771182090024,
"count": 1392,
"self": 560.5464127579185,
"children": {
"TorchPPOOptimizer.update": {
"total": 939.1307054510839,
"count": 68355,
"self": 939.1307054510839
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.401000190526247e-06,
"count": 1,
"self": 1.401000190526247e-06
},
"TrainerController._save_models": {
"total": 0.08301912400020228,
"count": 1,
"self": 0.0014329440000437899,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08158618000015849,
"count": 1,
"self": 0.08158618000015849
}
}
}
}
}
}
}