ppo-PyramidsRND / run_logs /timers.json
MoeenTB's picture
Upload folder using huggingface_hub
38ce672
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30416736006736755,
"min": 0.30275237560272217,
"max": 1.479612946510315,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9042.287109375,
"min": 9019.5986328125,
"max": 44885.5390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5716589093208313,
"min": -0.08118095248937607,
"max": 0.6175125241279602,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.49282836914062,
"min": -19.56460952758789,
"max": 171.66848754882812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01039412897080183,
"min": 0.007501782849431038,
"max": 0.26723921298980713,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.8999619483947754,
"min": 1.852940320968628,
"max": 64.40464782714844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07004273266093583,
"min": 0.06456896775581153,
"max": 0.0733892018736451,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9805982572531017,
"min": 0.49355158257320253,
"max": 1.0472979241964857,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016665813418512698,
"min": 0.0009456961514945661,
"max": 0.016665813418512698,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23332138785917778,
"min": 0.01171671236803984,
"max": 0.23332138785917778,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.633426026985714e-06,
"min": 7.633426026985714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001068679643778,
"min": 0.0001068679643778,
"max": 0.0035092067302645,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254444285714286,
"min": 0.10254444285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356222,
"min": 1.3886848,
"max": 2.5697355,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002641898414285716,
"min": 0.0002641898414285716,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003698657780000002,
"min": 0.003698657780000002,
"max": 0.11699657645,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014323187991976738,
"min": 0.014323187991976738,
"max": 0.3910225033760071,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20052462816238403,
"min": 0.20052462816238403,
"max": 2.7371575832366943,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 314.80434782608694,
"min": 314.80434782608694,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28962.0,
"min": 15984.0,
"max": 32514.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.663447803615228,
"min": -1.0000000521540642,
"max": 1.663447803615228,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.03719793260098,
"min": -30.215001687407494,
"max": 153.03719793260098,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.663447803615228,
"min": -1.0000000521540642,
"max": 1.663447803615228,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.03719793260098,
"min": -30.215001687407494,
"max": 153.03719793260098,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04692120766539238,
"min": 0.04692120766539238,
"max": 7.4878272684291005,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.316751105216099,
"min": 4.267883414286189,
"max": 119.80523629486561,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697151665",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697153927"
},
"total": 2262.433822276,
"count": 1,
"self": 0.4934116719996382,
"children": {
"run_training.setup": {
"total": 0.04281772399917827,
"count": 1,
"self": 0.04281772399917827
},
"TrainerController.start_learning": {
"total": 2261.897592880001,
"count": 1,
"self": 1.399870953811842,
"children": {
"TrainerController._reset_env": {
"total": 3.0901478759988095,
"count": 1,
"self": 3.0901478759988095
},
"TrainerController.advance": {
"total": 2257.330663273189,
"count": 63994,
"self": 1.4326668191824865,
"children": {
"env_step": {
"total": 1622.7186875731313,
"count": 63994,
"self": 1497.1508321805377,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.74149527874215,
"count": 63994,
"self": 4.770742512815559,
"children": {
"TorchPolicy.evaluate": {
"total": 119.97075276592659,
"count": 62555,
"self": 119.97075276592659
}
}
},
"workers": {
"total": 0.8263601138514787,
"count": 63994,
"self": 0.0,
"children": {
"worker_root": {
"total": 2257.1516210859063,
"count": 63994,
"is_parallel": true,
"self": 880.1478878741545,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002026785001362441,
"count": 1,
"is_parallel": true,
"self": 0.0006583210033568321,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001368463998005609,
"count": 8,
"is_parallel": true,
"self": 0.001368463998005609
}
}
},
"UnityEnvironment.step": {
"total": 0.04984591999891563,
"count": 1,
"is_parallel": true,
"self": 0.000610960996709764,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004952900017087813,
"count": 1,
"is_parallel": true,
"self": 0.0004952900017087813
},
"communicator.exchange": {
"total": 0.0470267460004834,
"count": 1,
"is_parallel": true,
"self": 0.0470267460004834
},
"steps_from_proto": {
"total": 0.0017129230000136886,
"count": 1,
"is_parallel": true,
"self": 0.0003516320011840435,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001361290998829645,
"count": 8,
"is_parallel": true,
"self": 0.001361290998829645
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1377.0037332117518,
"count": 63993,
"is_parallel": true,
"self": 34.86118684739631,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.683152167070148,
"count": 63993,
"is_parallel": true,
"self": 24.683152167070148
},
"communicator.exchange": {
"total": 1218.531810514105,
"count": 63993,
"is_parallel": true,
"self": 1218.531810514105
},
"steps_from_proto": {
"total": 98.92758368318027,
"count": 63993,
"is_parallel": true,
"self": 19.854489011720943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.07309467145933,
"count": 511944,
"is_parallel": true,
"self": 79.07309467145933
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 633.1793088808754,
"count": 63994,
"self": 2.642631363172768,
"children": {
"process_trajectory": {
"total": 121.07478625171461,
"count": 63994,
"self": 120.89975906071595,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17502719099866226,
"count": 2,
"self": 0.17502719099866226
}
}
},
"_update_policy": {
"total": 509.461891265988,
"count": 452,
"self": 306.3452739119166,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.11661735407142,
"count": 22815,
"self": 203.11661735407142
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0360017768107355e-06,
"count": 1,
"self": 1.0360017768107355e-06
},
"TrainerController._save_models": {
"total": 0.07690974099932646,
"count": 1,
"self": 0.0013725669996347278,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07553717399969173,
"count": 1,
"self": 0.07553717399969173
}
}
}
}
}
}
}