manoruo's picture
First Push
9e83d9a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0924500226974487,
"min": 1.0924500226974487,
"max": 2.871011257171631,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10442.7294921875,
"min": 10442.7294921875,
"max": 29433.607421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.969993591308594,
"min": 0.41508492827415466,
"max": 11.969993591308594,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2334.148681640625,
"min": 80.52647399902344,
"max": 2377.607421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06915072155813916,
"min": 0.06360901765041418,
"max": 0.07945232800648584,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27660288623255663,
"min": 0.25443607060165674,
"max": 0.3972616400324292,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20598531799281344,
"min": 0.1303618755716575,
"max": 0.2789942549621739,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8239412719712538,
"min": 0.52144750228663,
"max": 1.3453191404833513,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.272727272727273,
"min": 3.590909090909091,
"max": 24.272727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1068.0,
"min": 158.0,
"max": 1288.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.272727272727273,
"min": 3.590909090909091,
"max": 24.272727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1068.0,
"min": 158.0,
"max": 1288.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735217579",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735218191"
},
"total": 611.137156926,
"count": 1,
"self": 0.54370644599976,
"children": {
"run_training.setup": {
"total": 0.08339410700000371,
"count": 1,
"self": 0.08339410700000371
},
"TrainerController.start_learning": {
"total": 610.5100563730002,
"count": 1,
"self": 0.9243355600674477,
"children": {
"TrainerController._reset_env": {
"total": 8.263597371000287,
"count": 1,
"self": 8.263597371000287
},
"TrainerController.advance": {
"total": 601.2343053449331,
"count": 18202,
"self": 0.43965686288765937,
"children": {
"env_step": {
"total": 600.7946484820454,
"count": 18202,
"self": 457.89449106317215,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.46761517491223,
"count": 18202,
"self": 2.6545951199113915,
"children": {
"TorchPolicy.evaluate": {
"total": 139.81302005500083,
"count": 18202,
"self": 139.81302005500083
}
}
},
"workers": {
"total": 0.432542243961052,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 608.633767935044,
"count": 18202,
"is_parallel": true,
"self": 294.2518986990872,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008058956000240869,
"count": 1,
"is_parallel": true,
"self": 0.005350487999749021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002708468000491848,
"count": 10,
"is_parallel": true,
"self": 0.002708468000491848
}
}
},
"UnityEnvironment.step": {
"total": 0.04633580700010498,
"count": 1,
"is_parallel": true,
"self": 0.0010587100000520877,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004977799999323906,
"count": 1,
"is_parallel": true,
"self": 0.0004977799999323906
},
"communicator.exchange": {
"total": 0.042038175000016054,
"count": 1,
"is_parallel": true,
"self": 0.042038175000016054
},
"steps_from_proto": {
"total": 0.002741142000104446,
"count": 1,
"is_parallel": true,
"self": 0.0005249400001048343,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002216201999999612,
"count": 10,
"is_parallel": true,
"self": 0.002216201999999612
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 314.38186923595686,
"count": 18201,
"is_parallel": true,
"self": 15.49595522489335,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.628161766124322,
"count": 18201,
"is_parallel": true,
"self": 7.628161766124322
},
"communicator.exchange": {
"total": 246.06817856895123,
"count": 18201,
"is_parallel": true,
"self": 246.06817856895123
},
"steps_from_proto": {
"total": 45.18957367598796,
"count": 18201,
"is_parallel": true,
"self": 9.017220031999386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.17235364398857,
"count": 182010,
"is_parallel": true,
"self": 36.17235364398857
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002049329996225424,
"count": 1,
"self": 0.0002049329996225424,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 593.9315898012565,
"count": 783621,
"is_parallel": true,
"self": 18.28126973317694,
"children": {
"process_trajectory": {
"total": 320.88957979507995,
"count": 783621,
"is_parallel": true,
"self": 320.14646681307886,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7431129820010938,
"count": 4,
"is_parallel": true,
"self": 0.7431129820010938
}
}
},
"_update_policy": {
"total": 254.76074027299956,
"count": 90,
"is_parallel": true,
"self": 71.8510801080065,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.90966016499306,
"count": 4584,
"is_parallel": true,
"self": 182.90966016499306
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08761316399977659,
"count": 1,
"self": 0.0017287769996983116,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08588438700007828,
"count": 1,
"self": 0.08588438700007828
}
}
}
}
}
}
}