arenbeglaryan's picture
First Push
e2ca884
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8163177371025085,
"min": 0.8163177371025085,
"max": 2.8668289184570312,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7794.20166015625,
"min": 7794.20166015625,
"max": 29359.1953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.971254348754883,
"min": 0.31895139813423157,
"max": 12.971254348754883,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2529.39453125,
"min": 61.87657165527344,
"max": 2634.77001953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06569671035343644,
"min": 0.061652117720305065,
"max": 0.07618785057228723,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26278684141374575,
"min": 0.24660847088122026,
"max": 0.36162513928081585,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21155375030403045,
"min": 0.10002276273649734,
"max": 0.3242036015087483,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8462150012161218,
"min": 0.40009105094598935,
"max": 1.4983233420872222,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.613636363636363,
"min": 2.9545454545454546,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1127.0,
"min": 130.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.613636363636363,
"min": 2.9545454545454546,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1127.0,
"min": 130.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677753265",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677753714"
},
"total": 449.2378048139999,
"count": 1,
"self": 0.3949806999999055,
"children": {
"run_training.setup": {
"total": 0.11738848599998164,
"count": 1,
"self": 0.11738848599998164
},
"TrainerController.start_learning": {
"total": 448.725435628,
"count": 1,
"self": 0.5289636570065568,
"children": {
"TrainerController._reset_env": {
"total": 8.736609970999893,
"count": 1,
"self": 8.736609970999893
},
"TrainerController.advance": {
"total": 439.33959096999365,
"count": 18202,
"self": 0.2799476069924367,
"children": {
"env_step": {
"total": 439.0596433630012,
"count": 18202,
"self": 304.1439733840018,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.64623393899592,
"count": 18202,
"self": 1.67297308399543,
"children": {
"TorchPolicy.evaluate": {
"total": 132.9732608550005,
"count": 18202,
"self": 29.354850838009384,
"children": {
"TorchPolicy.sample_actions": {
"total": 103.6184100169911,
"count": 18202,
"self": 103.6184100169911
}
}
}
}
},
"workers": {
"total": 0.26943604000348387,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 447.22619531400426,
"count": 18202,
"is_parallel": true,
"self": 215.6201404810073,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006014012000036928,
"count": 1,
"is_parallel": true,
"self": 0.004046711000114556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019673009999223723,
"count": 10,
"is_parallel": true,
"self": 0.0019673009999223723
}
}
},
"UnityEnvironment.step": {
"total": 0.04345821499998692,
"count": 1,
"is_parallel": true,
"self": 0.0004665379999551078,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031553899998471024,
"count": 1,
"is_parallel": true,
"self": 0.00031553899998471024
},
"communicator.exchange": {
"total": 0.04071378100002221,
"count": 1,
"is_parallel": true,
"self": 0.04071378100002221
},
"steps_from_proto": {
"total": 0.0019623570000248947,
"count": 1,
"is_parallel": true,
"self": 0.00042850900013036153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015338479998945331,
"count": 10,
"is_parallel": true,
"self": 0.0015338479998945331
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 231.60605483299696,
"count": 18201,
"is_parallel": true,
"self": 9.464126110010966,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.1024821109919,
"count": 18201,
"is_parallel": true,
"self": 5.1024821109919
},
"communicator.exchange": {
"total": 186.86733237999397,
"count": 18201,
"is_parallel": true,
"self": 186.86733237999397
},
"steps_from_proto": {
"total": 30.172114232000126,
"count": 18201,
"is_parallel": true,
"self": 6.646199816001399,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.525914415998727,
"count": 182010,
"is_parallel": true,
"self": 23.525914415998727
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010385199993834249,
"count": 1,
"self": 0.00010385199993834249,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 436.1773764059718,
"count": 395243,
"is_parallel": true,
"self": 9.331125815977089,
"children": {
"process_trajectory": {
"total": 249.67759738599523,
"count": 395243,
"is_parallel": true,
"self": 248.99611939799524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6814779879999833,
"count": 4,
"is_parallel": true,
"self": 0.6814779879999833
}
}
},
"_update_policy": {
"total": 177.1686532039995,
"count": 90,
"is_parallel": true,
"self": 62.24593924800001,
"children": {
"TorchPPOOptimizer.update": {
"total": 114.92271395599948,
"count": 4587,
"is_parallel": true,
"self": 114.92271395599948
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12016717799997423,
"count": 1,
"self": 0.0008414549999997689,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11932572299997446,
"count": 1,
"self": 0.11932572299997446
}
}
}
}
}
}
}