ppo-Pyramid / run_logs /timers.json
rasyadanfz's picture
First PPO Pyramid Model
ea2bf04 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4514184296131134,
"min": 0.4514184296131134,
"max": 1.5583761930465698,
"count": 36
},
"Pyramids.Policy.Entropy.sum": {
"value": 13564.220703125,
"min": 13564.220703125,
"max": 47274.8984375,
"count": 36
},
"Pyramids.Step.mean": {
"value": 1079937.0,
"min": 29922.0,
"max": 1079937.0,
"count": 36
},
"Pyramids.Step.sum": {
"value": 1079937.0,
"min": 29922.0,
"max": 1079937.0,
"count": 36
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5719555616378784,
"min": -0.11679188907146454,
"max": 0.5856527090072632,
"count": 36
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.8597412109375,
"min": -28.1468448638916,
"max": 161.64015197753906,
"count": 36
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007849574089050293,
"min": -0.017095467075705528,
"max": 0.2694181799888611,
"count": 36
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.166482448577881,
"min": -4.615776062011719,
"max": 63.85211181640625,
"count": 36
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06120021041275683,
"min": 0.06120021041275683,
"max": 0.07368334999668341,
"count": 36
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.4284014728892978,
"min": 0.3444846692215714,
"max": 0.55275172322096,
"count": 36
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015315190891016716,
"min": 0.0003247945840249398,
"max": 0.017797986171289155,
"count": 36
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10720633623711702,
"min": 0.0022735620881745786,
"max": 0.1286224854979297,
"count": 36
},
"Pyramids.Policy.LearningRate.mean": {
"value": 9.915330461155846e-06,
"min": 9.915330461155846e-06,
"max": 0.00029514229252832716,
"count": 36
},
"Pyramids.Policy.LearningRate.sum": {
"value": 6.940731322809092e-05,
"min": 6.940731322809092e-05,
"max": 0.0018445569033295453,
"count": 36
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10330507792207791,
"min": 0.10330507792207791,
"max": 0.19838076363636362,
"count": 36
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.7231355454545454,
"min": 0.7231355454545454,
"max": 1.4145963636363639,
"count": 36
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00034017728441558455,
"min": 0.00034017728441558455,
"max": 0.009838238287272727,
"count": 36
},
"Pyramids.Policy.Beta.sum": {
"value": 0.002381240990909092,
"min": 0.002381240990909092,
"max": 0.061493742045454554,
"count": 36
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008311551064252853,
"min": 0.008311551064252853,
"max": 0.33355727791786194,
"count": 36
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.058180857449769974,
"min": 0.058180857449769974,
"max": 1.6677863597869873,
"count": 36
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 361.2183908045977,
"min": 319.7590361445783,
"max": 999.0,
"count": 36
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31426.0,
"min": 16849.0,
"max": 32959.0,
"count": 36
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6157838839224015,
"min": -0.9999375520274043,
"max": 1.6561421536537537,
"count": 36
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 140.57319790124893,
"min": -31.998001664876938,
"max": 146.08919881284237,
"count": 36
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6157838839224015,
"min": -0.9999375520274043,
"max": 1.6561421536537537,
"count": 36
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 140.57319790124893,
"min": -31.998001664876938,
"max": 146.08919881284237,
"count": 36
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03137038925601753,
"min": 0.03060568171421547,
"max": 7.917204534306245,
"count": 36
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.729223865273525,
"min": 2.545566466404125,
"max": 134.59247708320618,
"count": 36
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 36
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730100352",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730102906"
},
"total": 2554.1297229869997,
"count": 1,
"self": 0.4295359199995801,
"children": {
"run_training.setup": {
"total": 0.07431983999958902,
"count": 1,
"self": 0.07431983999958902
},
"TrainerController.start_learning": {
"total": 2553.6258672270005,
"count": 1,
"self": 1.5909967890511325,
"children": {
"TrainerController._reset_env": {
"total": 2.7573871260001397,
"count": 1,
"self": 2.7573871260001397
},
"TrainerController.advance": {
"total": 2549.1881253249494,
"count": 70210,
"self": 1.6380512678006198,
"children": {
"env_step": {
"total": 1805.224631284119,
"count": 70210,
"self": 1632.8224390892374,
"children": {
"SubprocessEnvManager._take_step": {
"total": 171.44006528389673,
"count": 70210,
"self": 5.40600388087114,
"children": {
"TorchPolicy.evaluate": {
"total": 166.0340614030256,
"count": 68806,
"self": 166.0340614030256
}
}
},
"workers": {
"total": 0.9621269109848072,
"count": 70210,
"self": 0.0,
"children": {
"worker_root": {
"total": 2547.68743062686,
"count": 70210,
"is_parallel": true,
"self": 1049.4520835637995,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028856190001533832,
"count": 1,
"is_parallel": true,
"self": 0.0007951550023790332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00209046399777435,
"count": 8,
"is_parallel": true,
"self": 0.00209046399777435
}
}
},
"UnityEnvironment.step": {
"total": 0.05555313599961664,
"count": 1,
"is_parallel": true,
"self": 0.0007187099990915158,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005548090002776007,
"count": 1,
"is_parallel": true,
"self": 0.0005548090002776007
},
"communicator.exchange": {
"total": 0.052297407999503775,
"count": 1,
"is_parallel": true,
"self": 0.052297407999503775
},
"steps_from_proto": {
"total": 0.001982209000743751,
"count": 1,
"is_parallel": true,
"self": 0.0004866959998253151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014955130009184359,
"count": 8,
"is_parallel": true,
"self": 0.0014955130009184359
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1498.2353470630605,
"count": 70209,
"is_parallel": true,
"self": 37.904958780251945,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.171803152820758,
"count": 70209,
"is_parallel": true,
"self": 27.171803152820758
},
"communicator.exchange": {
"total": 1321.4065988800385,
"count": 70209,
"is_parallel": true,
"self": 1321.4065988800385
},
"steps_from_proto": {
"total": 111.75198624994937,
"count": 70209,
"is_parallel": true,
"self": 23.243654741371756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.50833150857761,
"count": 561672,
"is_parallel": true,
"self": 88.50833150857761
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 742.3254427730299,
"count": 70210,
"self": 3.031253361900781,
"children": {
"process_trajectory": {
"total": 152.78918633012654,
"count": 70210,
"self": 152.52523560012742,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2639507299991237,
"count": 2,
"self": 0.2639507299991237
}
}
},
"_update_policy": {
"total": 586.5050030810025,
"count": 257,
"self": 343.0075790620367,
"children": {
"TorchPPOOptimizer.update": {
"total": 243.49742401896583,
"count": 22485,
"self": 243.49742401896583
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.22000253922306e-07,
"count": 1,
"self": 9.22000253922306e-07
},
"TrainerController._save_models": {
"total": 0.08935706499960361,
"count": 1,
"self": 0.0017563369992785738,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08760072800032503,
"count": 1,
"self": 0.08760072800032503
}
}
}
}
}
}
}