ppo-PyramidsRND / run_logs /timers.json
JoBuettner's picture
First Push
a2d1e53
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7615676522254944,
"min": 0.7366476655006409,
"max": 1.4750863313674927,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 23041.990234375,
"min": 22170.1484375,
"max": 44748.21875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29952.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1887703686952591,
"min": -0.11251546442508698,
"max": 0.1887703686952591,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 47.75890350341797,
"min": -27.003711700439453,
"max": 47.75890350341797,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.35893896222114563,
"min": -0.03557216748595238,
"max": 0.3744964599609375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 90.81155395507812,
"min": -8.893041610717773,
"max": 90.81155395507812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07127098914032674,
"min": 0.06421439810208167,
"max": 0.07406754818855793,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0690648371049012,
"min": 0.4993229856267679,
"max": 1.0690648371049012,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.051294526947434735,
"min": 0.0004945802782409725,
"max": 0.051294526947434735,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.769417904211521,
"min": 0.004451222504168752,
"max": 0.769417904211521,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5077174974599986e-06,
"min": 7.5077174974599986e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011261576246189998,
"min": 0.00011261576246189998,
"max": 0.0029053566315479,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250253999999999,
"min": 0.10250253999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375381,
"min": 1.3886848,
"max": 2.2750547,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002600037459999999,
"min": 0.0002600037459999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003900056189999999,
"min": 0.003900056189999999,
"max": 0.09687836479,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010838523507118225,
"min": 0.010409833863377571,
"max": 0.5958119034767151,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16257785260677338,
"min": 0.1457376778125763,
"max": 4.17068338394165,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 612.6666666666666,
"min": 612.6666666666666,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27570.0,
"min": 15984.0,
"max": 32633.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9872266377011935,
"min": -1.0000000521540642,
"max": 0.9872266377011935,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 44.42519869655371,
"min": -32.000001668930054,
"max": 44.42519869655371,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9872266377011935,
"min": -1.0000000521540642,
"max": 0.9872266377011935,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 44.42519869655371,
"min": -32.000001668930054,
"max": 44.42519869655371,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06893579374947068,
"min": 0.06893579374947068,
"max": 12.16698963008821,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1021107187261805,
"min": 3.1021107187261805,
"max": 194.67183408141136,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683801402",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683803364"
},
"total": 1962.370647868,
"count": 1,
"self": 0.894356026999958,
"children": {
"run_training.setup": {
"total": 0.03774204199999076,
"count": 1,
"self": 0.03774204199999076
},
"TrainerController.start_learning": {
"total": 1961.4385497990002,
"count": 1,
"self": 1.2229321371137303,
"children": {
"TrainerController._reset_env": {
"total": 4.282748813000126,
"count": 1,
"self": 4.282748813000126
},
"TrainerController.advance": {
"total": 1955.7949178448866,
"count": 63070,
"self": 1.2859526439888214,
"children": {
"env_step": {
"total": 1333.3671643509765,
"count": 63070,
"self": 1229.526667190859,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.09123560611351,
"count": 63070,
"self": 4.589528344928112,
"children": {
"TorchPolicy.evaluate": {
"total": 98.5017072611854,
"count": 62562,
"self": 98.5017072611854
}
}
},
"workers": {
"total": 0.7492615540040788,
"count": 63070,
"self": 0.0,
"children": {
"worker_root": {
"total": 1956.6825003117774,
"count": 63070,
"is_parallel": true,
"self": 834.1446016106838,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002035150999745383,
"count": 1,
"is_parallel": true,
"self": 0.0006153979993541725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014197530003912107,
"count": 8,
"is_parallel": true,
"self": 0.0014197530003912107
}
}
},
"UnityEnvironment.step": {
"total": 0.07122674499987625,
"count": 1,
"is_parallel": true,
"self": 0.000540603999979794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047536900001432514,
"count": 1,
"is_parallel": true,
"self": 0.00047536900001432514
},
"communicator.exchange": {
"total": 0.0685423830000218,
"count": 1,
"is_parallel": true,
"self": 0.0685423830000218
},
"steps_from_proto": {
"total": 0.0016683889998603263,
"count": 1,
"is_parallel": true,
"self": 0.00036165499932394596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013067340005363803,
"count": 8,
"is_parallel": true,
"self": 0.0013067340005363803
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1122.5378987010936,
"count": 63069,
"is_parallel": true,
"self": 30.845594963044732,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.414371044051677,
"count": 63069,
"is_parallel": true,
"self": 22.414371044051677
},
"communicator.exchange": {
"total": 975.3357322690545,
"count": 63069,
"is_parallel": true,
"self": 975.3357322690545
},
"steps_from_proto": {
"total": 93.94220042494271,
"count": 63069,
"is_parallel": true,
"self": 18.936389694984427,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.00581072995828,
"count": 504552,
"is_parallel": true,
"self": 75.00581072995828
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.1418008499213,
"count": 63070,
"self": 2.2230571949667137,
"children": {
"process_trajectory": {
"total": 103.50253521395598,
"count": 63070,
"self": 103.24785130895634,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2546839049996379,
"count": 2,
"self": 0.2546839049996379
}
}
},
"_update_policy": {
"total": 515.4162084409986,
"count": 430,
"self": 332.1626408939819,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.2535675470167,
"count": 22875,
"self": 183.2535675470167
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3190001482143998e-06,
"count": 1,
"self": 1.3190001482143998e-06
},
"TrainerController._save_models": {
"total": 0.1379496849995121,
"count": 1,
"self": 0.0019190089997209725,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13603067599979113,
"count": 1,
"self": 0.13603067599979113
}
}
}
}
}
}
}