Feldi's picture
PPO-SnowballTarget-v1
03cbd17
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8024298548698425,
"min": 0.8024298548698425,
"max": 2.872267484664917,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7670.4267578125,
"min": 7670.4267578125,
"max": 29446.486328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.83403205871582,
"min": 0.44975194334983826,
"max": 12.868099212646484,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2502.63623046875,
"min": 87.25187683105469,
"max": 2625.09228515625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06275991762955599,
"min": 0.06275991762955599,
"max": 0.07571067692022589,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25103967051822396,
"min": 0.25103967051822396,
"max": 0.3785533846011294,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20971538944571624,
"min": 0.1111070458131258,
"max": 0.3079967078040628,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.838861557782865,
"min": 0.4444281832525032,
"max": 1.539983539020314,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.068181818181817,
"min": 3.2045454545454546,
"max": 25.327272727272728,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1103.0,
"min": 141.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.068181818181817,
"min": 3.2045454545454546,
"max": 25.327272727272728,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1103.0,
"min": 141.0,
"max": 1393.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678778243",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678778929"
},
"total": 685.1998027309999,
"count": 1,
"self": 1.0426031269996656,
"children": {
"run_training.setup": {
"total": 0.28005446000003076,
"count": 1,
"self": 0.28005446000003076
},
"TrainerController.start_learning": {
"total": 683.8771451440001,
"count": 1,
"self": 0.928548115001945,
"children": {
"TrainerController._reset_env": {
"total": 7.320756281000001,
"count": 1,
"self": 7.320756281000001
},
"TrainerController.advance": {
"total": 675.3509058069982,
"count": 18217,
"self": 0.49072839299128646,
"children": {
"env_step": {
"total": 674.8601774140069,
"count": 18217,
"self": 536.8130689689926,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.39733837100556,
"count": 18217,
"self": 3.254553738006223,
"children": {
"TorchPolicy.evaluate": {
"total": 134.14278463299934,
"count": 18217,
"self": 134.14278463299934
}
}
},
"workers": {
"total": 0.6497700740086998,
"count": 18217,
"self": 0.0,
"children": {
"worker_root": {
"total": 681.2276135809878,
"count": 18217,
"is_parallel": true,
"self": 312.96270214297823,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006094259000008151,
"count": 1,
"is_parallel": true,
"self": 0.004178212999988773,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001916046000019378,
"count": 10,
"is_parallel": true,
"self": 0.001916046000019378
}
}
},
"UnityEnvironment.step": {
"total": 0.045776802999967,
"count": 1,
"is_parallel": true,
"self": 0.0007210969999391637,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004915700000083234,
"count": 1,
"is_parallel": true,
"self": 0.0004915700000083234
},
"communicator.exchange": {
"total": 0.0417535370000337,
"count": 1,
"is_parallel": true,
"self": 0.0417535370000337
},
"steps_from_proto": {
"total": 0.002810598999985814,
"count": 1,
"is_parallel": true,
"self": 0.0007562599999459962,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020543390000398176,
"count": 10,
"is_parallel": true,
"self": 0.0020543390000398176
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 368.2649114380096,
"count": 18216,
"is_parallel": true,
"self": 14.655143977017985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.492039156994963,
"count": 18216,
"is_parallel": true,
"self": 8.492039156994963
},
"communicator.exchange": {
"total": 291.13803694098743,
"count": 18216,
"is_parallel": true,
"self": 291.13803694098743
},
"steps_from_proto": {
"total": 53.97969136300924,
"count": 18216,
"is_parallel": true,
"self": 12.026628389992197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.95306297301704,
"count": 182160,
"is_parallel": true,
"self": 41.95306297301704
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017853300005299388,
"count": 1,
"self": 0.00017853300005299388,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 667.6194189090231,
"count": 623580,
"is_parallel": true,
"self": 21.38768757002913,
"children": {
"process_trajectory": {
"total": 361.43850817899533,
"count": 623580,
"is_parallel": true,
"self": 359.1163004379953,
"children": {
"RLTrainer._checkpoint": {
"total": 2.3222077410000566,
"count": 4,
"is_parallel": true,
"self": 2.3222077410000566
}
}
},
"_update_policy": {
"total": 284.7932231599987,
"count": 90,
"is_parallel": true,
"self": 101.82104081299303,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.97218234700568,
"count": 4587,
"is_parallel": true,
"self": 182.97218234700568
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2767564079999829,
"count": 1,
"self": 0.0015134939999370545,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27524291400004586,
"count": 1,
"self": 0.27524291400004586
}
}
}
}
}
}
}