George067's picture
First Push
449965b verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9693571925163269,
"min": 0.9693571925163269,
"max": 2.850846529006958,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9212.7705078125,
"min": 9212.7705078125,
"max": 29101.44140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.11558723449707,
"min": 0.31932133436203003,
"max": 13.11558723449707,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2557.53955078125,
"min": 61.948341369628906,
"max": 2670.13330078125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06964090305165668,
"min": 0.05701588993343096,
"max": 0.07507025217802912,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27856361220662673,
"min": 0.22806355973372383,
"max": 0.3753512608901456,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21432905534611027,
"min": 0.13605169292685448,
"max": 0.2939279655615489,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8573162213844411,
"min": 0.5442067717074179,
"max": 1.4696398278077445,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.636363636363637,
"min": 3.659090909090909,
"max": 25.836363636363636,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1128.0,
"min": 161.0,
"max": 1421.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.636363636363637,
"min": 3.659090909090909,
"max": 25.836363636363636,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1128.0,
"min": 161.0,
"max": 1421.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749130789",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749131206"
},
"total": 417.087426606,
"count": 1,
"self": 0.43832693800004563,
"children": {
"run_training.setup": {
"total": 0.0230886819999796,
"count": 1,
"self": 0.0230886819999796
},
"TrainerController.start_learning": {
"total": 416.626010986,
"count": 1,
"self": 0.3445621200175992,
"children": {
"TrainerController._reset_env": {
"total": 3.4970720240003175,
"count": 1,
"self": 3.4970720240003175
},
"TrainerController.advance": {
"total": 412.70017037698244,
"count": 18192,
"self": 0.34747715804724066,
"children": {
"env_step": {
"total": 290.1963189809535,
"count": 18192,
"self": 220.68855383191567,
"children": {
"SubprocessEnvManager._take_step": {
"total": 69.31105237100837,
"count": 18192,
"self": 1.2051165860034416,
"children": {
"TorchPolicy.evaluate": {
"total": 68.10593578500493,
"count": 18192,
"self": 68.10593578500493
}
}
},
"workers": {
"total": 0.19671277802945042,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 415.3742345950245,
"count": 18192,
"is_parallel": true,
"self": 221.32249860800948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005289345000164758,
"count": 1,
"is_parallel": true,
"self": 0.0037362019997999596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015531430003647984,
"count": 10,
"is_parallel": true,
"self": 0.0015531430003647984
}
}
},
"UnityEnvironment.step": {
"total": 0.03561513800013927,
"count": 1,
"is_parallel": true,
"self": 0.0006261670000640152,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037656199992852635,
"count": 1,
"is_parallel": true,
"self": 0.00037656199992852635
},
"communicator.exchange": {
"total": 0.032872345999749086,
"count": 1,
"is_parallel": true,
"self": 0.032872345999749086
},
"steps_from_proto": {
"total": 0.0017400630003976403,
"count": 1,
"is_parallel": true,
"self": 0.0003308500004095549,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014092129999880854,
"count": 10,
"is_parallel": true,
"self": 0.0014092129999880854
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 194.05173598701504,
"count": 18191,
"is_parallel": true,
"self": 9.449950699041437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.182944166956986,
"count": 18191,
"is_parallel": true,
"self": 5.182944166956986
},
"communicator.exchange": {
"total": 149.61518464498477,
"count": 18191,
"is_parallel": true,
"self": 149.61518464498477
},
"steps_from_proto": {
"total": 29.803656476031847,
"count": 18191,
"is_parallel": true,
"self": 5.1902249359463895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.613431540085458,
"count": 181910,
"is_parallel": true,
"self": 24.613431540085458
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 122.15637423798171,
"count": 18192,
"self": 0.3985493270097322,
"children": {
"process_trajectory": {
"total": 27.145626110975172,
"count": 18192,
"self": 26.67712529797518,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46850081299999147,
"count": 4,
"self": 0.46850081299999147
}
}
},
"_update_policy": {
"total": 94.6121987999968,
"count": 90,
"self": 37.94580041101426,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.66639838898254,
"count": 4587,
"self": 56.66639838898254
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.919999683916103e-07,
"count": 1,
"self": 9.919999683916103e-07
},
"TrainerController._save_models": {
"total": 0.0842054729996562,
"count": 1,
"self": 0.0008880959994712612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08331737700018493,
"count": 1,
"self": 0.08331737700018493
}
}
}
}
}
}
}