neaven77's picture
First Push
277d4ab verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9282233119010925,
"min": 0.9282233119010925,
"max": 2.8571064472198486,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8872.88671875,
"min": 8872.88671875,
"max": 29291.0546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.746694564819336,
"min": 0.354756623506546,
"max": 12.746694564819336,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2485.60546875,
"min": 68.82278442382812,
"max": 2591.01708984375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0674808266327697,
"min": 0.060206140000624214,
"max": 0.07240911743761613,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2699233065310788,
"min": 0.24082456000249686,
"max": 0.3586490299077887,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2002700192890331,
"min": 0.12406513871460716,
"max": 0.28250086599705265,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8010800771561324,
"min": 0.4962605548584286,
"max": 1.4125043299852633,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.954545454545453,
"min": 3.340909090909091,
"max": 25.054545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1098.0,
"min": 147.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.954545454545453,
"min": 3.340909090909091,
"max": 25.054545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1098.0,
"min": 147.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729300622",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729301195"
},
"total": 573.0838532140006,
"count": 1,
"self": 0.5404820090006979,
"children": {
"run_training.setup": {
"total": 0.08223738800006686,
"count": 1,
"self": 0.08223738800006686
},
"TrainerController.start_learning": {
"total": 572.4611338169998,
"count": 1,
"self": 0.8968332049807941,
"children": {
"TrainerController._reset_env": {
"total": 2.054462130000502,
"count": 1,
"self": 2.054462130000502
},
"TrainerController.advance": {
"total": 569.4189443550185,
"count": 18203,
"self": 0.43013124091612553,
"children": {
"env_step": {
"total": 568.9888131141024,
"count": 18203,
"self": 438.292979754151,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.29416343905177,
"count": 18203,
"self": 2.298808769999596,
"children": {
"TorchPolicy.evaluate": {
"total": 127.99535466905218,
"count": 18203,
"self": 127.99535466905218
}
}
},
"workers": {
"total": 0.4016699208996215,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 570.6115949860714,
"count": 18203,
"is_parallel": true,
"self": 266.88652347299194,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003393527999833168,
"count": 1,
"is_parallel": true,
"self": 0.0011108759990747785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022826520007583895,
"count": 10,
"is_parallel": true,
"self": 0.0022826520007583895
}
}
},
"UnityEnvironment.step": {
"total": 0.04615061799995601,
"count": 1,
"is_parallel": true,
"self": 0.000915324000743567,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047664599969721166,
"count": 1,
"is_parallel": true,
"self": 0.00047664599969721166
},
"communicator.exchange": {
"total": 0.04207351999957609,
"count": 1,
"is_parallel": true,
"self": 0.04207351999957609
},
"steps_from_proto": {
"total": 0.002685127999939141,
"count": 1,
"is_parallel": true,
"self": 0.0004993579987058183,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021857700012333225,
"count": 10,
"is_parallel": true,
"self": 0.0021857700012333225
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 303.72507151307946,
"count": 18202,
"is_parallel": true,
"self": 14.63684295802068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.397761070020351,
"count": 18202,
"is_parallel": true,
"self": 7.397761070020351
},
"communicator.exchange": {
"total": 238.48928855608574,
"count": 18202,
"is_parallel": true,
"self": 238.48928855608574
},
"steps_from_proto": {
"total": 43.201178928952686,
"count": 18202,
"is_parallel": true,
"self": 8.431372530801127,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.76980639815156,
"count": 182020,
"is_parallel": true,
"self": 34.76980639815156
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002099090006595361,
"count": 1,
"self": 0.0002099090006595361,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 562.6710339836636,
"count": 774614,
"is_parallel": true,
"self": 17.5894138624235,
"children": {
"process_trajectory": {
"total": 306.28731542324203,
"count": 774614,
"is_parallel": true,
"self": 305.6523715372423,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6349438859997463,
"count": 4,
"is_parallel": true,
"self": 0.6349438859997463
}
}
},
"_update_policy": {
"total": 238.7943046979981,
"count": 90,
"is_parallel": true,
"self": 69.2997793920058,
"children": {
"TorchPPOOptimizer.update": {
"total": 169.49452530599228,
"count": 4584,
"is_parallel": true,
"self": 169.49452530599228
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09068421799929638,
"count": 1,
"self": 0.0013885809994462761,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0892956369998501,
"count": 1,
"self": 0.0892956369998501
}
}
}
}
}
}
}