Narunat's picture
First Push
11351cf verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7528964281082153,
"min": 0.7528964281082153,
"max": 2.8509700298309326,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7155.52783203125,
"min": 7155.52783203125,
"max": 29102.703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.956405639648438,
"min": 0.29787591099739075,
"max": 12.956405639648438,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2526.4990234375,
"min": 57.787925720214844,
"max": 2640.55517578125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0706908948757736,
"min": 0.06312227231603317,
"max": 0.07285380519162715,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2827635795030944,
"min": 0.2557500552445394,
"max": 0.3531862621089471,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1779409669266612,
"min": 0.12655679031512607,
"max": 0.28504161676939793,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7117638677066448,
"min": 0.5062271612605043,
"max": 1.4252080838469896,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.613636363636363,
"min": 3.590909090909091,
"max": 25.89090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1127.0,
"min": 158.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.613636363636363,
"min": 3.590909090909091,
"max": 25.89090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1127.0,
"min": 158.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756042176",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756042919"
},
"total": 743.43093095,
"count": 1,
"self": 0.692404420999992,
"children": {
"run_training.setup": {
"total": 0.05675258299993402,
"count": 1,
"self": 0.05675258299993402
},
"TrainerController.start_learning": {
"total": 742.681773946,
"count": 1,
"self": 0.908091301991135,
"children": {
"TrainerController._reset_env": {
"total": 5.091291355000067,
"count": 1,
"self": 5.091291355000067
},
"TrainerController.advance": {
"total": 736.5825437700089,
"count": 18192,
"self": 0.9466520380024122,
"children": {
"env_step": {
"total": 522.316609806976,
"count": 18192,
"self": 443.059950774983,
"children": {
"SubprocessEnvManager._take_step": {
"total": 78.70692454199889,
"count": 18192,
"self": 3.5555905599931066,
"children": {
"TorchPolicy.evaluate": {
"total": 75.15133398200578,
"count": 18192,
"self": 75.15133398200578
}
}
},
"workers": {
"total": 0.5497344899940799,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 739.5420104680068,
"count": 18192,
"is_parallel": true,
"self": 361.1152129420086,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008963831999835747,
"count": 1,
"is_parallel": true,
"self": 0.005890254999940225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003073576999895522,
"count": 10,
"is_parallel": true,
"self": 0.003073576999895522
}
}
},
"UnityEnvironment.step": {
"total": 0.1300518049999937,
"count": 1,
"is_parallel": true,
"self": 0.0008425529999840364,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005747320001319167,
"count": 1,
"is_parallel": true,
"self": 0.0005747320001319167
},
"communicator.exchange": {
"total": 0.12612553600001775,
"count": 1,
"is_parallel": true,
"self": 0.12612553600001775
},
"steps_from_proto": {
"total": 0.0025089839998599928,
"count": 1,
"is_parallel": true,
"self": 0.00048591900008432276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00202306499977567,
"count": 10,
"is_parallel": true,
"self": 0.00202306499977567
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 378.42679752599815,
"count": 18191,
"is_parallel": true,
"self": 17.365708915024925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.253897432972508,
"count": 18191,
"is_parallel": true,
"self": 9.253897432972508
},
"communicator.exchange": {
"total": 300.10134320500265,
"count": 18191,
"is_parallel": true,
"self": 300.10134320500265
},
"steps_from_proto": {
"total": 51.70584797299807,
"count": 18191,
"is_parallel": true,
"self": 10.591154011963454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.11469396103462,
"count": 181910,
"is_parallel": true,
"self": 41.11469396103462
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 213.3192819250305,
"count": 18192,
"self": 1.2155570840138807,
"children": {
"process_trajectory": {
"total": 43.26120213201739,
"count": 18192,
"self": 42.60721722101766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6539849109997249,
"count": 4,
"self": 0.6539849109997249
}
}
},
"_update_policy": {
"total": 168.84252270899924,
"count": 90,
"self": 57.879228728990256,
"children": {
"TorchPPOOptimizer.update": {
"total": 110.96329398000898,
"count": 4587,
"self": 110.96329398000898
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.651000275160186e-06,
"count": 1,
"self": 1.651000275160186e-06
},
"TrainerController._save_models": {
"total": 0.09984586799964745,
"count": 1,
"self": 0.001943943999322073,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09790192400032538,
"count": 1,
"self": 0.09790192400032538
}
}
}
}
}
}
}