ych1016's picture
First Push
f9c1fc8 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7979820966720581,
"min": 0.7979820966720581,
"max": 2.8125789165496826,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7584.02197265625,
"min": 7584.02197265625,
"max": 28710.8046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.988581657409668,
"min": 0.44115740060806274,
"max": 12.988581657409668,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2532.7734375,
"min": 85.58453369140625,
"max": 2627.951171875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06721604479596363,
"min": 0.05982717842405192,
"max": 0.07531327150137776,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2688641791838545,
"min": 0.23930871369620768,
"max": 0.37656635750688877,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20015758779995582,
"min": 0.130256018014716,
"max": 0.3244797846265868,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8006303511998233,
"min": 0.521024072058864,
"max": 1.6223989231329339,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.59090909090909,
"min": 4.090909090909091,
"max": 25.6,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1126.0,
"min": 180.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.59090909090909,
"min": 4.090909090909091,
"max": 25.6,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1126.0,
"min": 180.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749018529",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749018972"
},
"total": 443.26285516099995,
"count": 1,
"self": 0.43851974400001836,
"children": {
"run_training.setup": {
"total": 0.024953573000061624,
"count": 1,
"self": 0.024953573000061624
},
"TrainerController.start_learning": {
"total": 442.79938184399987,
"count": 1,
"self": 0.34805257101083953,
"children": {
"TrainerController._reset_env": {
"total": 3.6585985670000127,
"count": 1,
"self": 3.6585985670000127
},
"TrainerController.advance": {
"total": 438.71093341398887,
"count": 18192,
"self": 0.3787118819942634,
"children": {
"env_step": {
"total": 308.6036714229898,
"count": 18192,
"self": 234.0071981769837,
"children": {
"SubprocessEnvManager._take_step": {
"total": 74.38495105901029,
"count": 18192,
"self": 1.2735717950035905,
"children": {
"TorchPolicy.evaluate": {
"total": 73.1113792640067,
"count": 18192,
"self": 73.1113792640067
}
}
},
"workers": {
"total": 0.21152218699580771,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 441.4283448960132,
"count": 18192,
"is_parallel": true,
"self": 235.97115521601404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006067588000064461,
"count": 1,
"is_parallel": true,
"self": 0.004328487999941899,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001739100000122562,
"count": 10,
"is_parallel": true,
"self": 0.001739100000122562
}
}
},
"UnityEnvironment.step": {
"total": 0.05648844600000302,
"count": 1,
"is_parallel": true,
"self": 0.0005485300000600546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045783099994878285,
"count": 1,
"is_parallel": true,
"self": 0.00045783099994878285
},
"communicator.exchange": {
"total": 0.0538243670000611,
"count": 1,
"is_parallel": true,
"self": 0.0538243670000611
},
"steps_from_proto": {
"total": 0.0016577179999330838,
"count": 1,
"is_parallel": true,
"self": 0.0003162839998367417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001341434000096342,
"count": 10,
"is_parallel": true,
"self": 0.001341434000096342
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 205.45718967999915,
"count": 18191,
"is_parallel": true,
"self": 9.829208403979578,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.725924539998914,
"count": 18191,
"is_parallel": true,
"self": 5.725924539998914
},
"communicator.exchange": {
"total": 158.16650834000768,
"count": 18191,
"is_parallel": true,
"self": 158.16650834000768
},
"steps_from_proto": {
"total": 31.735548396012973,
"count": 18191,
"is_parallel": true,
"self": 5.759096720993398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.976451675019575,
"count": 181910,
"is_parallel": true,
"self": 25.976451675019575
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 129.7285501090048,
"count": 18192,
"self": 0.4528101570116405,
"children": {
"process_trajectory": {
"total": 28.18175630299379,
"count": 18192,
"self": 27.71212173499373,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4696345680000604,
"count": 4,
"self": 0.4696345680000604
}
}
},
"_update_policy": {
"total": 101.09398364899937,
"count": 90,
"self": 40.1389781510037,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.95500549799567,
"count": 4587,
"self": 60.95500549799567
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.690000529692043e-07,
"count": 1,
"self": 7.690000529692043e-07
},
"TrainerController._save_models": {
"total": 0.08179652300009366,
"count": 1,
"self": 0.0008274429999346467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08096908000015901,
"count": 1,
"self": 0.08096908000015901
}
}
}
}
}
}
}