Yagorka's picture
First Push
07b1031
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.925213098526001,
"min": 0.925213098526001,
"max": 2.8585891723632812,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8864.466796875,
"min": 8864.466796875,
"max": 29274.8125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.746957778930664,
"min": 0.3209478259086609,
"max": 12.746957778930664,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2485.65673828125,
"min": 62.263877868652344,
"max": 2564.00048828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06899184023367363,
"min": 0.06156330414837701,
"max": 0.07614557366183612,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27596736093469454,
"min": 0.24625321659350805,
"max": 0.3657402845266523,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20289672093064176,
"min": 0.14027985467292442,
"max": 0.3101334621508916,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.811586883722567,
"min": 0.5611194186916977,
"max": 1.4683130973694372,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.113636363636363,
"min": 3.8181818181818183,
"max": 25.381818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1105.0,
"min": 168.0,
"max": 1396.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.113636363636363,
"min": 3.8181818181818183,
"max": 25.381818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1105.0,
"min": 168.0,
"max": 1396.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676583028",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676583497"
},
"total": 469.23009892100004,
"count": 1,
"self": 0.7396982379999599,
"children": {
"run_training.setup": {
"total": 0.1311603169999671,
"count": 1,
"self": 0.1311603169999671
},
"TrainerController.start_learning": {
"total": 468.3592403660001,
"count": 1,
"self": 0.5607264649980834,
"children": {
"TrainerController._reset_env": {
"total": 9.844563205999975,
"count": 1,
"self": 9.844563205999975
},
"TrainerController.advance": {
"total": 457.757250982002,
"count": 18212,
"self": 0.2797503879976375,
"children": {
"env_step": {
"total": 457.47750059400437,
"count": 18212,
"self": 315.8496789570238,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.35347992698576,
"count": 18212,
"self": 1.4908530629877532,
"children": {
"TorchPolicy.evaluate": {
"total": 139.862626863998,
"count": 18212,
"self": 31.123123798996176,
"children": {
"TorchPolicy.sample_actions": {
"total": 108.73950306500183,
"count": 18212,
"self": 108.73950306500183
}
}
}
}
},
"workers": {
"total": 0.27434170999481466,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.5379312129885,
"count": 18212,
"is_parallel": true,
"self": 224.1913957739897,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00724834599998303,
"count": 1,
"is_parallel": true,
"self": 0.004184386000019913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030639599999631173,
"count": 10,
"is_parallel": true,
"self": 0.0030639599999631173
}
}
},
"UnityEnvironment.step": {
"total": 0.04587427599994953,
"count": 1,
"is_parallel": true,
"self": 0.0004622859999017237,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000315169000032256,
"count": 1,
"is_parallel": true,
"self": 0.000315169000032256
},
"communicator.exchange": {
"total": 0.043145910000021104,
"count": 1,
"is_parallel": true,
"self": 0.043145910000021104
},
"steps_from_proto": {
"total": 0.0019509109999944485,
"count": 1,
"is_parallel": true,
"self": 0.00043188200015720213,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015190289998372464,
"count": 10,
"is_parallel": true,
"self": 0.0015190289998372464
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 242.34653543899879,
"count": 18211,
"is_parallel": true,
"self": 9.71718103799708,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.170183440006667,
"count": 18211,
"is_parallel": true,
"self": 5.170183440006667
},
"communicator.exchange": {
"total": 193.7007465749938,
"count": 18211,
"is_parallel": true,
"self": 193.7007465749938
},
"steps_from_proto": {
"total": 33.75842438600125,
"count": 18211,
"is_parallel": true,
"self": 7.057424245977927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.701000140023325,
"count": 182110,
"is_parallel": true,
"self": 26.701000140023325
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001509120002083364,
"count": 1,
"self": 0.0001509120002083364,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 454.38028372799954,
"count": 416848,
"is_parallel": true,
"self": 10.172590864040671,
"children": {
"process_trajectory": {
"total": 260.3283215629591,
"count": 416848,
"is_parallel": true,
"self": 259.0100915969589,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3182299660002172,
"count": 4,
"is_parallel": true,
"self": 1.3182299660002172
}
}
},
"_update_policy": {
"total": 183.87937130099976,
"count": 90,
"is_parallel": true,
"self": 66.22974741999735,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.64962388100241,
"count": 4587,
"is_parallel": true,
"self": 117.64962388100241
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1965488009998353,
"count": 1,
"self": 0.001176705999796468,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19537209500003883,
"count": 1,
"self": 0.19537209500003883
}
}
}
}
}
}
}