FlavienDeseure's picture
Second Push
b10aa8c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6584024429321289,
"min": 0.6377054452896118,
"max": 2.8660054206848145,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6358.8505859375,
"min": 6204.9140625,
"max": 29350.76171875,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.596480369567871,
"min": 0.18912027776241302,
"max": 13.782879829406738,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2787.278564453125,
"min": 36.689334869384766,
"max": 2813.719482421875,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06713951977363186,
"min": 0.06061871705862679,
"max": 0.07890735516397684,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3356975988681593,
"min": 0.24247486823450717,
"max": 0.3726857754620997,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17987366701458013,
"min": 0.13786501033395968,
"max": 0.28235779001432304,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8993683350729007,
"min": 0.5514600413358387,
"max": 1.3884966259493547,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.963636363636365,
"min": 3.477272727272727,
"max": 27.272727272727273,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1483.0,
"min": 153.0,
"max": 1490.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.963636363636365,
"min": 3.477272727272727,
"max": 27.272727272727273,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1483.0,
"min": 153.0,
"max": 1490.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678106945",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget4 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678108089"
},
"total": 1144.4999620620001,
"count": 1,
"self": 0.43354562199965585,
"children": {
"run_training.setup": {
"total": 0.11154366100026891,
"count": 1,
"self": 0.11154366100026891
},
"TrainerController.start_learning": {
"total": 1143.9548727790002,
"count": 1,
"self": 1.3863385830363768,
"children": {
"TrainerController._reset_env": {
"total": 6.490910432999954,
"count": 1,
"self": 6.490910432999954
},
"TrainerController.advance": {
"total": 1135.954435625964,
"count": 45475,
"self": 0.6751686299667199,
"children": {
"env_step": {
"total": 1135.2792669959972,
"count": 45475,
"self": 778.530197312994,
"children": {
"SubprocessEnvManager._take_step": {
"total": 356.079699092983,
"count": 45475,
"self": 3.8413285930319034,
"children": {
"TorchPolicy.evaluate": {
"total": 352.2383704999511,
"count": 45475,
"self": 80.03025011101818,
"children": {
"TorchPolicy.sample_actions": {
"total": 272.2081203889329,
"count": 45475,
"self": 272.2081203889329
}
}
}
}
},
"workers": {
"total": 0.6693705900202076,
"count": 45475,
"self": 0.0,
"children": {
"worker_root": {
"total": 1140.2167437700073,
"count": 45475,
"is_parallel": true,
"self": 546.1317151500266,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022660869999526767,
"count": 1,
"is_parallel": true,
"self": 0.0008068400002230192,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014592469997296575,
"count": 10,
"is_parallel": true,
"self": 0.0014592469997296575
}
}
},
"UnityEnvironment.step": {
"total": 0.06560465499978818,
"count": 1,
"is_parallel": true,
"self": 0.0005528659994524787,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043381400018915883,
"count": 1,
"is_parallel": true,
"self": 0.00043381400018915883
},
"communicator.exchange": {
"total": 0.0627294549999533,
"count": 1,
"is_parallel": true,
"self": 0.0627294549999533
},
"steps_from_proto": {
"total": 0.001888520000193239,
"count": 1,
"is_parallel": true,
"self": 0.0004475799996725982,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014409400005206408,
"count": 10,
"is_parallel": true,
"self": 0.0014409400005206408
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 594.0850286199807,
"count": 45474,
"is_parallel": true,
"self": 23.958175236846728,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.079113124082596,
"count": 45474,
"is_parallel": true,
"self": 13.079113124082596
},
"communicator.exchange": {
"total": 479.3195172059727,
"count": 45474,
"is_parallel": true,
"self": 479.3195172059727
},
"steps_from_proto": {
"total": 77.72822305307864,
"count": 45474,
"is_parallel": true,
"self": 16.88363547904555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.844587574033085,
"count": 454740,
"is_parallel": true,
"self": 60.844587574033085
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.7427999814099167e-05,
"count": 1,
"self": 3.7427999814099167e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1126.826872669791,
"count": 992910,
"is_parallel": true,
"self": 25.790201262689607,
"children": {
"process_trajectory": {
"total": 642.3771378911047,
"count": 992910,
"is_parallel": true,
"self": 639.7736844831052,
"children": {
"RLTrainer._checkpoint": {
"total": 2.6034534079994955,
"count": 10,
"is_parallel": true,
"self": 2.6034534079994955
}
}
},
"_update_policy": {
"total": 458.6595335159968,
"count": 227,
"is_parallel": true,
"self": 161.34020498499376,
"children": {
"TorchPPOOptimizer.update": {
"total": 297.31932853100307,
"count": 11574,
"is_parallel": true,
"self": 297.31932853100307
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1231507090001287,
"count": 1,
"self": 0.0008405850003327942,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1223101239997959,
"count": 1,
"self": 0.1223101239997959
}
}
}
}
}
}
}