ppo-Pyramids / run_logs /timers.json
JiriG's picture
First Push
0b3bcfe
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41876494884490967,
"min": 0.4149459898471832,
"max": 1.4618172645568848,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12643.3515625,
"min": 12355.431640625,
"max": 44345.6875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40510398149490356,
"min": -0.10476311296224594,
"max": 0.43668830394744873,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 108.16276550292969,
"min": -25.143146514892578,
"max": 116.59577941894531,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01292134914547205,
"min": 0.007899358868598938,
"max": 0.2508623003959656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.450000286102295,
"min": 2.030135154724121,
"max": 60.20695495605469,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06986299122218043,
"min": 0.0648454232765029,
"max": 0.07450793748752015,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.978081877110526,
"min": 0.5055740482773332,
"max": 1.0500078292194908,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013377986960265497,
"min": 0.00012043791461638646,
"max": 0.013417518230672621,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18729181744371695,
"min": 0.0014452549753966376,
"max": 0.1878452552294167,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.753961701092856e-06,
"min": 7.753961701092856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010855546381529999,
"min": 0.00010855546381529999,
"max": 0.0032253478248840993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258462142857143,
"min": 0.10258462142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361847,
"min": 1.3691136000000002,
"max": 2.3176183999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002682036807142857,
"min": 0.0002682036807142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037548515299999996,
"min": 0.0037548515299999996,
"max": 0.10752407841,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011600017547607422,
"min": 0.011399288661777973,
"max": 0.39281731843948364,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1624002456665039,
"min": 0.15959003567695618,
"max": 2.7497212886810303,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 453.95384615384614,
"min": 435.27941176470586,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29507.0,
"min": 15984.0,
"max": 32753.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3233030996052548,
"min": -1.0000000521540642,
"max": 1.477203150826787,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 84.69139837473631,
"min": -32.000001668930054,
"max": 96.39759847521782,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3233030996052548,
"min": -1.0000000521540642,
"max": 1.477203150826787,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 84.69139837473631,
"min": -32.000001668930054,
"max": 96.39759847521782,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05518667281330636,
"min": 0.05222232431327621,
"max": 7.411740625277162,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.531947060051607,
"min": 3.2900064317364013,
"max": 118.58785000443459,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698560317",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698562522"
},
"total": 2205.5481747780004,
"count": 1,
"self": 0.8391638840002997,
"children": {
"run_training.setup": {
"total": 0.0420756660000734,
"count": 1,
"self": 0.0420756660000734
},
"TrainerController.start_learning": {
"total": 2204.666935228,
"count": 1,
"self": 1.5258462659749057,
"children": {
"TrainerController._reset_env": {
"total": 3.5338060110000242,
"count": 1,
"self": 3.5338060110000242
},
"TrainerController.advance": {
"total": 2199.482544122025,
"count": 63450,
"self": 1.5864850140751514,
"children": {
"env_step": {
"total": 1569.2506224899676,
"count": 63450,
"self": 1426.2316820489827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.06768462393643,
"count": 63450,
"self": 4.829183049854919,
"children": {
"TorchPolicy.evaluate": {
"total": 137.2385015740815,
"count": 62555,
"self": 137.2385015740815
}
}
},
"workers": {
"total": 0.9512558170483771,
"count": 63450,
"self": 0.0,
"children": {
"worker_root": {
"total": 2199.801969698969,
"count": 63450,
"is_parallel": true,
"self": 896.901941788934,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020000139998046507,
"count": 1,
"is_parallel": true,
"self": 0.0006833049997112539,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013167090000933968,
"count": 8,
"is_parallel": true,
"self": 0.0013167090000933968
}
}
},
"UnityEnvironment.step": {
"total": 0.0802530690000367,
"count": 1,
"is_parallel": true,
"self": 0.0006221530002221698,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004965759999322472,
"count": 1,
"is_parallel": true,
"self": 0.0004965759999322472
},
"communicator.exchange": {
"total": 0.07743072399989614,
"count": 1,
"is_parallel": true,
"self": 0.07743072399989614
},
"steps_from_proto": {
"total": 0.001703615999986141,
"count": 1,
"is_parallel": true,
"self": 0.00036749699984284234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013361190001432988,
"count": 8,
"is_parallel": true,
"self": 0.0013361190001432988
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1302.900027910035,
"count": 63449,
"is_parallel": true,
"self": 36.105914641911795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.79160279203188,
"count": 63449,
"is_parallel": true,
"self": 26.79160279203188
},
"communicator.exchange": {
"total": 1132.4091527530438,
"count": 63449,
"is_parallel": true,
"self": 1132.4091527530438
},
"steps_from_proto": {
"total": 107.59335772304757,
"count": 63449,
"is_parallel": true,
"self": 22.413546863942656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.17981085910492,
"count": 507592,
"is_parallel": true,
"self": 85.17981085910492
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 628.6454366179823,
"count": 63450,
"self": 2.7926854970062323,
"children": {
"process_trajectory": {
"total": 124.36549070897581,
"count": 63450,
"self": 124.09871779397554,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2667729150002742,
"count": 2,
"self": 0.2667729150002742
}
}
},
"_update_policy": {
"total": 501.4872604120003,
"count": 435,
"self": 294.04544809299887,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.44181231900143,
"count": 22788,
"self": 207.44181231900143
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5360001270892099e-06,
"count": 1,
"self": 1.5360001270892099e-06
},
"TrainerController._save_models": {
"total": 0.12473729299972547,
"count": 1,
"self": 0.00206398599948443,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12267330700024104,
"count": 1,
"self": 0.12267330700024104
}
}
}
}
}
}
}