ppo-Pyramids / run_logs /timers.json
TAS-Theo's picture
First Push
13cc360 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48379194736480713,
"min": 0.48379194736480713,
"max": 1.4188684225082397,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14436.3515625,
"min": 14436.3515625,
"max": 43042.79296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989885.0,
"min": 29952.0,
"max": 989885.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989885.0,
"min": 29952.0,
"max": 989885.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2871297299861908,
"min": -0.1082937940955162,
"max": 0.2871297299861908,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 73.79234313964844,
"min": -26.098804473876953,
"max": 74.86961364746094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012709363363683224,
"min": -0.015038547106087208,
"max": 0.305647075176239,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2663064002990723,
"min": -3.819791078567505,
"max": 73.35530090332031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0704256998031356,
"min": 0.0650896433937244,
"max": 0.07321692797761468,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9859597972438983,
"min": 0.507143614908022,
"max": 1.0747438535521117,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015386342804482585,
"min": 0.00015941712284968967,
"max": 0.016267696638923464,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2154087992627562,
"min": 0.001913005474196276,
"max": 0.2154087992627562,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.46328322655714e-06,
"min": 7.46328322655714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010448596517179996,
"min": 0.00010448596517179996,
"max": 0.0034929307356897994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248772857142859,
"min": 0.10248772857142859,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348282,
"min": 1.3691136000000002,
"max": 2.5275118,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002585240842857142,
"min": 0.0002585240842857142,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003619337179999999,
"min": 0.003619337179999999,
"max": 0.11644458897999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01102800015360117,
"min": 0.01102800015360117,
"max": 0.5263792276382446,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15439200401306152,
"min": 0.15439200401306152,
"max": 3.684654474258423,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 525.1090909090909,
"min": 525.1090909090909,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28881.0,
"min": 15984.0,
"max": 32526.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1838945181532339,
"min": -1.0000000521540642,
"max": 1.1838945181532339,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 65.11419849842787,
"min": -32.000001668930054,
"max": 65.7153983861208,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1838945181532339,
"min": -1.0000000521540642,
"max": 1.1838945181532339,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 65.11419849842787,
"min": -32.000001668930054,
"max": 65.7153983861208,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0596144320062277,
"min": 0.0596144320062277,
"max": 10.240615317597985,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2787937603425235,
"min": 3.1664377196575515,
"max": 163.84984508156776,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744707272",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744709416"
},
"total": 2144.453138808,
"count": 1,
"self": 0.4761590659995818,
"children": {
"run_training.setup": {
"total": 0.02058423399989806,
"count": 1,
"self": 0.02058423399989806
},
"TrainerController.start_learning": {
"total": 2143.9563955080002,
"count": 1,
"self": 1.4712903360054952,
"children": {
"TrainerController._reset_env": {
"total": 2.103301040000133,
"count": 1,
"self": 2.103301040000133
},
"TrainerController.advance": {
"total": 2140.295352301994,
"count": 63360,
"self": 1.5650157579984807,
"children": {
"env_step": {
"total": 1455.1577125119986,
"count": 63360,
"self": 1295.0736453200282,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.2363002579998,
"count": 63360,
"self": 4.795404632996224,
"children": {
"TorchPolicy.evaluate": {
"total": 154.44089562500358,
"count": 62561,
"self": 154.44089562500358
}
}
},
"workers": {
"total": 0.8477669339706608,
"count": 63360,
"self": 0.0,
"children": {
"worker_root": {
"total": 2139.1640401640398,
"count": 63360,
"is_parallel": true,
"self": 957.8612570309672,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020115160000386823,
"count": 1,
"is_parallel": true,
"self": 0.0006671319997622049,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013443840002764773,
"count": 8,
"is_parallel": true,
"self": 0.0013443840002764773
}
}
},
"UnityEnvironment.step": {
"total": 0.04637463199992453,
"count": 1,
"is_parallel": true,
"self": 0.0005450900000596448,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004684909999923548,
"count": 1,
"is_parallel": true,
"self": 0.0004684909999923548
},
"communicator.exchange": {
"total": 0.04379681799991886,
"count": 1,
"is_parallel": true,
"self": 0.04379681799991886
},
"steps_from_proto": {
"total": 0.0015642329999536742,
"count": 1,
"is_parallel": true,
"self": 0.0003316960001029656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012325369998507085,
"count": 8,
"is_parallel": true,
"self": 0.0012325369998507085
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1181.3027831330726,
"count": 63359,
"is_parallel": true,
"self": 31.905697707952868,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.023310856005537,
"count": 63359,
"is_parallel": true,
"self": 23.023310856005537
},
"communicator.exchange": {
"total": 1029.1221493700637,
"count": 63359,
"is_parallel": true,
"self": 1029.1221493700637
},
"steps_from_proto": {
"total": 97.25162519905052,
"count": 63359,
"is_parallel": true,
"self": 19.951814660913442,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.29981053813708,
"count": 506872,
"is_parallel": true,
"self": 77.29981053813708
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 683.5726240319973,
"count": 63360,
"self": 2.744036913961054,
"children": {
"process_trajectory": {
"total": 128.95145754004307,
"count": 63360,
"self": 128.7410575250433,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21040001499977734,
"count": 2,
"self": 0.21040001499977734
}
}
},
"_update_policy": {
"total": 551.8771295779932,
"count": 445,
"self": 302.57382373997984,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.30330583801333,
"count": 22824,
"self": 249.30330583801333
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.090000847005285e-07,
"count": 1,
"self": 8.090000847005285e-07
},
"TrainerController._save_models": {
"total": 0.08645102100035729,
"count": 1,
"self": 0.001533769000616303,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08491725199974098,
"count": 1,
"self": 0.08491725199974098
}
}
}
}
}
}
}