email81227's picture
Init
fcc939a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5034646391868591,
"min": 0.44306880235671997,
"max": 2.796967029571533,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4818.15673828125,
"min": 4537.09521484375,
"max": 28674.505859375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.44078254699707,
"min": 0.4161001741886139,
"max": 12.44078254699707,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2425.95263671875,
"min": 80.72343444824219,
"max": 2518.72705078125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07514223079802011,
"min": 0.06294433847187794,
"max": 0.07514223079802011,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.30056892319208045,
"min": 0.25342130232014576,
"max": 0.36119170095908504,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18795302649046858,
"min": 0.13101567421222615,
"max": 0.3141874353967461,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7518121059618743,
"min": 0.5240626968489046,
"max": 1.5008130079390956,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.110776,
"min": 0.110776,
"max": 0.489176,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.443104,
"min": 0.443104,
"max": 2.34688,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.704545454545453,
"min": 3.3636363636363638,
"max": 24.704545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1087.0,
"min": 148.0,
"max": 1341.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.704545454545453,
"min": 3.3636363636363638,
"max": 24.704545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1087.0,
"min": 148.0,
"max": 1341.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682274666",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682275146"
},
"total": 479.67850082100006,
"count": 1,
"self": 0.5774259090001124,
"children": {
"run_training.setup": {
"total": 0.11175315999997792,
"count": 1,
"self": 0.11175315999997792
},
"TrainerController.start_learning": {
"total": 478.98932175199997,
"count": 1,
"self": 0.5649310840085491,
"children": {
"TrainerController._reset_env": {
"total": 4.7006287529999895,
"count": 1,
"self": 4.7006287529999895
},
"TrainerController.advance": {
"total": 473.4794798539915,
"count": 18214,
"self": 0.27893431098522115,
"children": {
"env_step": {
"total": 473.2005455430063,
"count": 18214,
"self": 342.87203616201714,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.04834699399538,
"count": 18214,
"self": 1.6586528759960402,
"children": {
"TorchPolicy.evaluate": {
"total": 128.38969411799934,
"count": 18214,
"self": 128.38969411799934
}
}
},
"workers": {
"total": 0.2801623869937657,
"count": 18214,
"self": 0.0,
"children": {
"worker_root": {
"total": 477.4673537560134,
"count": 18214,
"is_parallel": true,
"self": 225.56550512501252,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004670622999981333,
"count": 1,
"is_parallel": true,
"self": 0.003204200000027413,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014664229999539202,
"count": 10,
"is_parallel": true,
"self": 0.0014664229999539202
}
}
},
"UnityEnvironment.step": {
"total": 0.07468854199998987,
"count": 1,
"is_parallel": true,
"self": 0.0006517609999718843,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004276679999861699,
"count": 1,
"is_parallel": true,
"self": 0.0004276679999861699
},
"communicator.exchange": {
"total": 0.06621936999999889,
"count": 1,
"is_parallel": true,
"self": 0.06621936999999889
},
"steps_from_proto": {
"total": 0.007389743000032922,
"count": 1,
"is_parallel": true,
"self": 0.0006579100000294602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006731833000003462,
"count": 10,
"is_parallel": true,
"self": 0.006731833000003462
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 251.90184863100086,
"count": 18213,
"is_parallel": true,
"self": 10.023365329007902,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.836196204992291,
"count": 18213,
"is_parallel": true,
"self": 5.836196204992291
},
"communicator.exchange": {
"total": 202.52322733799593,
"count": 18213,
"is_parallel": true,
"self": 202.52322733799593
},
"steps_from_proto": {
"total": 33.51905975900473,
"count": 18213,
"is_parallel": true,
"self": 6.599159476023317,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.919900282981416,
"count": 182130,
"is_parallel": true,
"self": 26.919900282981416
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019125400001485104,
"count": 1,
"self": 0.00019125400001485104,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 469.6557142709946,
"count": 428465,
"is_parallel": true,
"self": 10.552271645006442,
"children": {
"process_trajectory": {
"total": 259.98662806698843,
"count": 428465,
"is_parallel": true,
"self": 258.7754690819883,
"children": {
"RLTrainer._checkpoint": {
"total": 1.211158985000111,
"count": 4,
"is_parallel": true,
"self": 1.211158985000111
}
}
},
"_update_policy": {
"total": 199.11681455899975,
"count": 90,
"is_parallel": true,
"self": 70.83796451400099,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.27885004499876,
"count": 4584,
"is_parallel": true,
"self": 128.27885004499876
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24409080699990682,
"count": 1,
"self": 0.0015469369999436822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24254386999996314,
"count": 1,
"self": 0.24254386999996314
}
}
}
}
}
}
}