Seungwoo Kim
First commit
3eb3aeb
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7911876440048218,
"min": 0.791079580783844,
"max": 2.878732681274414,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7641.2900390625,
"min": 7614.18505859375,
"max": 29576.099609375,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.68270492553711,
"min": 0.2713659107685089,
"max": 13.90966796875,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2804.95458984375,
"min": 52.64498519897461,
"max": 2832.61181640625,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06880285770326451,
"min": 0.06177866245099508,
"max": 0.08080066400953893,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34401428851632254,
"min": 0.25509035301294325,
"max": 0.38174097984230293,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.207539245836875,
"min": 0.0991173353703583,
"max": 0.27265834165554426,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.037696229184375,
"min": 0.3964693414814332,
"max": 1.3632917082777212,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.581818181818182,
"min": 2.6136363636363638,
"max": 27.418181818181818,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1462.0,
"min": 115.0,
"max": 1508.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.581818181818182,
"min": 2.6136363636363638,
"max": 27.418181818181818,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1462.0,
"min": 115.0,
"max": 1508.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679212330",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679213511"
},
"total": 1180.9618246950001,
"count": 1,
"self": 0.792702558000201,
"children": {
"run_training.setup": {
"total": 0.11277025799995499,
"count": 1,
"self": 0.11277025799995499
},
"TrainerController.start_learning": {
"total": 1180.056351879,
"count": 1,
"self": 1.6021399749877219,
"children": {
"TrainerController._reset_env": {
"total": 9.400693646000036,
"count": 1,
"self": 9.400693646000036
},
"TrainerController.advance": {
"total": 1168.8404656980122,
"count": 45495,
"self": 0.7857115080175845,
"children": {
"env_step": {
"total": 1168.0547541899946,
"count": 45495,
"self": 845.7856628180228,
"children": {
"SubprocessEnvManager._take_step": {
"total": 321.50887300298535,
"count": 45495,
"self": 5.428258705960957,
"children": {
"TorchPolicy.evaluate": {
"total": 316.0806142970244,
"count": 45495,
"self": 316.0806142970244
}
}
},
"workers": {
"total": 0.7602183689863864,
"count": 45495,
"self": 0.0,
"children": {
"worker_root": {
"total": 1176.0252198950252,
"count": 45495,
"is_parallel": true,
"self": 547.3755070960176,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0065710769999896,
"count": 1,
"is_parallel": true,
"self": 0.005060961000026509,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015101159999630909,
"count": 10,
"is_parallel": true,
"self": 0.0015101159999630909
}
}
},
"UnityEnvironment.step": {
"total": 0.03844393200000695,
"count": 1,
"is_parallel": true,
"self": 0.0004239360000042325,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029376899999533634,
"count": 1,
"is_parallel": true,
"self": 0.00029376899999533634
},
"communicator.exchange": {
"total": 0.035835655000028055,
"count": 1,
"is_parallel": true,
"self": 0.035835655000028055
},
"steps_from_proto": {
"total": 0.0018905719999793291,
"count": 1,
"is_parallel": true,
"self": 0.00039798200015184193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014925899998274872,
"count": 10,
"is_parallel": true,
"self": 0.0014925899998274872
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 628.6497127990076,
"count": 45494,
"is_parallel": true,
"self": 24.206100386006142,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.059098923993531,
"count": 45494,
"is_parallel": true,
"self": 13.059098923993531
},
"communicator.exchange": {
"total": 513.5904641040077,
"count": 45494,
"is_parallel": true,
"self": 513.5904641040077
},
"steps_from_proto": {
"total": 77.79404938500022,
"count": 45494,
"is_parallel": true,
"self": 15.913811704942532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.88023768005769,
"count": 454940,
"is_parallel": true,
"self": 61.88023768005769
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015494400008719822,
"count": 1,
"self": 0.00015494400008719822,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1160.167762879997,
"count": 1070462,
"is_parallel": true,
"self": 25.950263198022185,
"children": {
"process_trajectory": {
"total": 645.1009657049754,
"count": 1070462,
"is_parallel": true,
"self": 641.8716238609754,
"children": {
"RLTrainer._checkpoint": {
"total": 3.229341843999862,
"count": 10,
"is_parallel": true,
"self": 3.229341843999862
}
}
},
"_update_policy": {
"total": 489.11653397699945,
"count": 227,
"is_parallel": true,
"self": 172.52183155699635,
"children": {
"TorchPPOOptimizer.update": {
"total": 316.5947024200031,
"count": 11574,
"is_parallel": true,
"self": 316.5947024200031
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21289761599996382,
"count": 1,
"self": 0.0012409769999521814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21165663900001164,
"count": 1,
"self": 0.21165663900001164
}
}
}
}
}
}
}