Pyramidsv1 / run_logs /timers.json
H-amza's picture
first commit
a51a3d4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.353591650724411,
"min": 0.34824860095977783,
"max": 1.460567593574524,
"count": 56
},
"Pyramids.Policy.Entropy.sum": {
"value": 10624.7216796875,
"min": 10402.8818359375,
"max": 44307.77734375,
"count": 56
},
"Pyramids.Step.mean": {
"value": 1679936.0,
"min": 29952.0,
"max": 1679936.0,
"count": 56
},
"Pyramids.Step.sum": {
"value": 1679936.0,
"min": 29952.0,
"max": 1679936.0,
"count": 56
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6895836591720581,
"min": -0.11628881096839905,
"max": 0.767508864402771,
"count": 56
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 195.84176635742188,
"min": -27.90931510925293,
"max": 221.81005859375,
"count": 56
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012621568515896797,
"min": -0.031149884685873985,
"max": 0.23430316150188446,
"count": 56
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.5845253467559814,
"min": -8.53506851196289,
"max": 55.529850006103516,
"count": 56
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0687060652755045,
"min": 0.06586742103986797,
"max": 0.07400308160382943,
"count": 56
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.961884913857063,
"min": 0.4948795660386857,
"max": 1.0948977358365322,
"count": 56
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01424026587008367,
"min": 0.0006663407392315117,
"max": 0.016218521269740233,
"count": 56
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19936372218117138,
"min": 0.008662429610009651,
"max": 0.22705929777636327,
"count": 56
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00025002954379968137,
"min": 0.00025002954379968137,
"max": 0.0002995150630187886,
"count": 56
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0035004136131955394,
"min": 0.00209660544113152,
"max": 0.004169216960261049,
"count": 56
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.18334317571428568,
"min": 0.18334317571428568,
"max": 0.1998383542857143,
"count": 56
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.5668044599999997,
"min": 1.3988684800000002,
"max": 2.889738950000001,
"count": 56
},
"Pyramids.Policy.Beta.mean": {
"value": 0.008335983253857144,
"min": 0.008335983253857144,
"max": 0.009983851593142858,
"count": 56
},
"Pyramids.Policy.Beta.sum": {
"value": 0.116703765554,
"min": 0.06988696115200001,
"max": 0.138984921105,
"count": 56
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00815605092793703,
"min": 0.007685383781790733,
"max": 0.36510565876960754,
"count": 56
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11418470740318298,
"min": 0.11103454977273941,
"max": 2.555739641189575,
"count": 56
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 291.18446601941747,
"min": 257.77777777777777,
"max": 999.0,
"count": 56
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29992.0,
"min": 15984.0,
"max": 32982.0,
"count": 56
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6707115229219198,
"min": -1.0000000521540642,
"max": 1.7251264798080819,
"count": 56
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 173.75399838387966,
"min": -30.49400170147419,
"max": 201.83979813754559,
"count": 56
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6707115229219198,
"min": -1.0000000521540642,
"max": 1.7251264798080819,
"count": 56
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 173.75399838387966,
"min": -30.49400170147419,
"max": 201.83979813754559,
"count": 56
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.024493022927624855,
"min": 0.02134973293756887,
"max": 7.382361877709627,
"count": 56
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.547274384472985,
"min": 2.2203722255071625,
"max": 118.11779004335403,
"count": 56
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 56
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 56
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693823363",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693827061"
},
"total": 3698.533238058,
"count": 1,
"self": 0.5815155339992089,
"children": {
"run_training.setup": {
"total": 0.0405830300001071,
"count": 1,
"self": 0.0405830300001071
},
"TrainerController.start_learning": {
"total": 3697.9111394940005,
"count": 1,
"self": 2.180277634005506,
"children": {
"TrainerController._reset_env": {
"total": 4.018690837000122,
"count": 1,
"self": 4.018690837000122
},
"TrainerController.advance": {
"total": 3691.415373041995,
"count": 107607,
"self": 2.2184552468925176,
"children": {
"env_step": {
"total": 2596.8048594829415,
"count": 107607,
"self": 2419.117078891062,
"children": {
"SubprocessEnvManager._take_step": {
"total": 176.33687775006547,
"count": 107607,
"self": 7.782326285007912,
"children": {
"TorchPolicy.evaluate": {
"total": 168.55455146505756,
"count": 105184,
"self": 168.55455146505756
}
}
},
"workers": {
"total": 1.3509028418138769,
"count": 107607,
"self": 0.0,
"children": {
"worker_root": {
"total": 3689.944686289052,
"count": 107607,
"is_parallel": true,
"self": 1457.3822174250977,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017570739998973295,
"count": 1,
"is_parallel": true,
"self": 0.0005461169998852711,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012109570000120584,
"count": 8,
"is_parallel": true,
"self": 0.0012109570000120584
}
}
},
"UnityEnvironment.step": {
"total": 0.04616919499994765,
"count": 1,
"is_parallel": true,
"self": 0.0005645289998028602,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004904840000108379,
"count": 1,
"is_parallel": true,
"self": 0.0004904840000108379
},
"communicator.exchange": {
"total": 0.04298331400013922,
"count": 1,
"is_parallel": true,
"self": 0.04298331400013922
},
"steps_from_proto": {
"total": 0.0021308679999947344,
"count": 1,
"is_parallel": true,
"self": 0.0005569869999817456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015738810000129888,
"count": 8,
"is_parallel": true,
"self": 0.0015738810000129888
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2232.562468863954,
"count": 107606,
"is_parallel": true,
"self": 56.06648272397479,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 37.790391623057985,
"count": 107606,
"is_parallel": true,
"self": 37.790391623057985
},
"communicator.exchange": {
"total": 1965.960767741927,
"count": 107606,
"is_parallel": true,
"self": 1965.960767741927
},
"steps_from_proto": {
"total": 172.7448267749944,
"count": 107606,
"is_parallel": true,
"self": 33.357563551233625,
"children": {
"_process_rank_one_or_two_observation": {
"total": 139.38726322376078,
"count": 860848,
"is_parallel": true,
"self": 139.38726322376078
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1092.3920583121605,
"count": 107607,
"self": 4.301352635175817,
"children": {
"process_trajectory": {
"total": 183.98929780698995,
"count": 107607,
"self": 183.68874121799013,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3005565889998252,
"count": 3,
"self": 0.3005565889998252
}
}
},
"_update_policy": {
"total": 904.1014078699948,
"count": 771,
"self": 587.1496411399585,
"children": {
"TorchPPOOptimizer.update": {
"total": 316.9517667300363,
"count": 38373,
"self": 316.9517667300363
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.282999619434122e-06,
"count": 1,
"self": 1.282999619434122e-06
},
"TrainerController._save_models": {
"total": 0.29679669800043484,
"count": 1,
"self": 0.0039019360001475434,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2928947620002873,
"count": 1,
"self": 0.2928947620002873
}
}
}
}
}
}
}