jfelgate's picture
Second Push
f9c29ac
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0011041164398193,
"min": 0.9854599833488464,
"max": 2.8815367221832275,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9569.5546875,
"min": 9569.5546875,
"max": 29509.818359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.578500747680664,
"min": 0.19108004868030548,
"max": 12.578500747680664,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2452.8076171875,
"min": 37.06953048706055,
"max": 2524.880859375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07006441180762041,
"min": 0.06301057839386083,
"max": 0.07316058867373595,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28025764723048163,
"min": 0.2520423135754433,
"max": 0.36580294336867974,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12994428041080633,
"min": 0.057764980759398614,
"max": 0.18183373160221997,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5197771216432253,
"min": 0.23105992303759446,
"max": 0.9091686580110998,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.3999999999999999,
"min": 0.3999999999999999,
"max": 0.4999999999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.320060000000001e-05,
"min": 2.320060000000001e-05,
"max": 0.00048674060000000003,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 9.280240000000004e-05,
"min": 9.280240000000004e-05,
"max": 0.002312428,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.181818181818183,
"min": 2.75,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1108.0,
"min": 121.0,
"max": 1380.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.181818181818183,
"min": 2.75,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1108.0,
"min": 121.0,
"max": 1380.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697022379",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget3 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697022838"
},
"total": 459.29682413800003,
"count": 1,
"self": 0.42865565100009917,
"children": {
"run_training.setup": {
"total": 0.07023954599998206,
"count": 1,
"self": 0.07023954599998206
},
"TrainerController.start_learning": {
"total": 458.79792894099995,
"count": 1,
"self": 0.5331379969945829,
"children": {
"TrainerController._reset_env": {
"total": 8.022404402999996,
"count": 1,
"self": 8.022404402999996
},
"TrainerController.advance": {
"total": 450.1529573780053,
"count": 18201,
"self": 0.27231736501255455,
"children": {
"env_step": {
"total": 449.8806400129927,
"count": 18201,
"self": 308.3696353230033,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.23643191399898,
"count": 18201,
"self": 1.3901599339927486,
"children": {
"TorchPolicy.evaluate": {
"total": 139.84627198000624,
"count": 18201,
"self": 139.84627198000624
}
}
},
"workers": {
"total": 0.2745727759904355,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 457.5795183950001,
"count": 18201,
"is_parallel": true,
"self": 221.65946265499912,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006454921999988983,
"count": 1,
"is_parallel": true,
"self": 0.0043369920000202455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021179299999687373,
"count": 10,
"is_parallel": true,
"self": 0.0021179299999687373
}
}
},
"UnityEnvironment.step": {
"total": 0.046302088999993884,
"count": 1,
"is_parallel": true,
"self": 0.0007049269999868102,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041053000001056716,
"count": 1,
"is_parallel": true,
"self": 0.00041053000001056716
},
"communicator.exchange": {
"total": 0.0430446409999945,
"count": 1,
"is_parallel": true,
"self": 0.0430446409999945
},
"steps_from_proto": {
"total": 0.002141991000002008,
"count": 1,
"is_parallel": true,
"self": 0.0005320419999748083,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016099490000271999,
"count": 10,
"is_parallel": true,
"self": 0.0016099490000271999
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.92005574000098,
"count": 18200,
"is_parallel": true,
"self": 10.346040903989376,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.280943166008768,
"count": 18200,
"is_parallel": true,
"self": 5.280943166008768
},
"communicator.exchange": {
"total": 187.50584912600237,
"count": 18200,
"is_parallel": true,
"self": 187.50584912600237
},
"steps_from_proto": {
"total": 32.78722254400046,
"count": 18200,
"is_parallel": true,
"self": 6.083201813000926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.70402073099953,
"count": 182000,
"is_parallel": true,
"self": 26.70402073099953
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011632400003236398,
"count": 1,
"self": 0.00011632400003236398,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 446.50971218698254,
"count": 461109,
"is_parallel": true,
"self": 9.826753579985109,
"children": {
"process_trajectory": {
"total": 252.41192288799775,
"count": 461109,
"is_parallel": true,
"self": 251.5467383439977,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8651845440000443,
"count": 4,
"is_parallel": true,
"self": 0.8651845440000443
}
}
},
"_update_policy": {
"total": 184.27103571899968,
"count": 90,
"is_parallel": true,
"self": 57.27244771899555,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.99858800000413,
"count": 4584,
"is_parallel": true,
"self": 126.99858800000413
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08931283900005837,
"count": 1,
"self": 0.0011601680000694614,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08815267099998891,
"count": 1,
"self": 0.08815267099998891
}
}
}
}
}
}
}