ppo-Pyramids / run_logs /timers.json
Vivek-huggingface's picture
First Push
039c628 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.435935914516449,
"min": 0.4213733673095703,
"max": 1.390051007270813,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13161.77734375,
"min": 12715.3623046875,
"max": 42168.5859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989984.0,
"min": 29952.0,
"max": 989984.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989984.0,
"min": 29952.0,
"max": 989984.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3185103237628937,
"min": -0.0947466641664505,
"max": 0.45888981223106384,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 83.44970703125,
"min": -22.928691864013672,
"max": 122.98246765136719,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.030679401010274887,
"min": -0.03833285719156265,
"max": 0.3219869136810303,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.038002967834473,
"min": -10.388204574584961,
"max": 77.59884643554688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06979774181163316,
"min": 0.06623267750200328,
"max": 0.07352059617569197,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9771683853628643,
"min": 0.5146441732298438,
"max": 1.0854656835629914,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011916276192949566,
"min": 0.0009966640639475918,
"max": 0.013191068361715328,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16682786670129393,
"min": 0.012956632831318694,
"max": 0.19786602542572992,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.282340429728572e-06,
"min": 7.282340429728572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001019527660162,
"min": 0.0001019527660162,
"max": 0.0035071196309602,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242741428571431,
"min": 0.10242741428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339838000000003,
"min": 1.3886848,
"max": 2.5690398,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002524986871428572,
"min": 0.0002524986871428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035349816200000005,
"min": 0.0035349816200000005,
"max": 0.11692707601999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007213735021650791,
"min": 0.007059289142489433,
"max": 0.3894733786582947,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10099229216575623,
"min": 0.10099229216575623,
"max": 2.726313591003418,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 468.84615384615387,
"min": 434.5217391304348,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30475.0,
"min": 15984.0,
"max": 32902.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0684605789455501,
"min": -1.0000000521540642,
"max": 1.4157617486575071,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 70.5183982104063,
"min": -29.997601568698883,
"max": 98.51399824023247,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0684605789455501,
"min": -1.0000000521540642,
"max": 1.4157617486575071,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 70.5183982104063,
"min": -29.997601568698883,
"max": 98.51399824023247,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03525093159546624,
"min": 0.034974775920896456,
"max": 7.668156085535884,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.326561485300772,
"min": 2.1767208746168762,
"max": 122.69049736857414,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724515309",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1724517564"
},
"total": 2255.441398202,
"count": 1,
"self": 0.476132029999917,
"children": {
"run_training.setup": {
"total": 0.05323798499989607,
"count": 1,
"self": 0.05323798499989607
},
"TrainerController.start_learning": {
"total": 2254.912028187,
"count": 1,
"self": 1.5806336990176533,
"children": {
"TrainerController._reset_env": {
"total": 2.0885575190000054,
"count": 1,
"self": 2.0885575190000054
},
"TrainerController.advance": {
"total": 2251.1569353709824,
"count": 63674,
"self": 1.680537942934734,
"children": {
"env_step": {
"total": 1593.7604527870237,
"count": 63674,
"self": 1448.775508080011,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.01518980801347,
"count": 63674,
"self": 5.024245855046956,
"children": {
"TorchPolicy.evaluate": {
"total": 138.99094395296652,
"count": 62547,
"self": 138.99094395296652
}
}
},
"workers": {
"total": 0.9697548989993265,
"count": 63674,
"self": 0.0,
"children": {
"worker_root": {
"total": 2249.452297531949,
"count": 63674,
"is_parallel": true,
"self": 931.0677311609898,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002127320999989024,
"count": 1,
"is_parallel": true,
"self": 0.0007352200000241282,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013921009999648959,
"count": 8,
"is_parallel": true,
"self": 0.0013921009999648959
}
}
},
"UnityEnvironment.step": {
"total": 0.07180084900005568,
"count": 1,
"is_parallel": true,
"self": 0.0006310430001121858,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046396199991249887,
"count": 1,
"is_parallel": true,
"self": 0.00046396199991249887
},
"communicator.exchange": {
"total": 0.0688994699999057,
"count": 1,
"is_parallel": true,
"self": 0.0688994699999057
},
"steps_from_proto": {
"total": 0.0018063740001252881,
"count": 1,
"is_parallel": true,
"self": 0.000375905000282728,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014304689998425602,
"count": 8,
"is_parallel": true,
"self": 0.0014304689998425602
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1318.3845663709594,
"count": 63673,
"is_parallel": true,
"self": 35.01980330987681,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.757310351068554,
"count": 63673,
"is_parallel": true,
"self": 24.757310351068554
},
"communicator.exchange": {
"total": 1154.0818827359622,
"count": 63673,
"is_parallel": true,
"self": 1154.0818827359622
},
"steps_from_proto": {
"total": 104.5255699740519,
"count": 63673,
"is_parallel": true,
"self": 21.971425594896118,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.55414437915579,
"count": 509384,
"is_parallel": true,
"self": 82.55414437915579
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.7159446410242,
"count": 63674,
"self": 2.9970947040508236,
"children": {
"process_trajectory": {
"total": 135.11948154497077,
"count": 63674,
"self": 134.90320173397095,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21627981099982208,
"count": 2,
"self": 0.21627981099982208
}
}
},
"_update_policy": {
"total": 517.5993683920026,
"count": 452,
"self": 306.53042027901597,
"children": {
"TorchPPOOptimizer.update": {
"total": 211.06894811298662,
"count": 22734,
"self": 211.06894811298662
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.570000318286475e-07,
"count": 1,
"self": 8.570000318286475e-07
},
"TrainerController._save_models": {
"total": 0.08590074099993217,
"count": 1,
"self": 0.0012907669997730409,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08460997400015913,
"count": 1,
"self": 0.08460997400015913
}
}
}
}
}
}
}