carro's picture
initial commit
1294a1b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8021188974380493,
"min": 0.7979788780212402,
"max": 2.86203670501709,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7746.8642578125,
"min": 7635.1201171875,
"max": 29341.599609375,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.746527671813965,
"min": 0.45988598465919495,
"max": 13.851004600524902,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2818.0380859375,
"min": 89.21788024902344,
"max": 2833.871826171875,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06539639475910614,
"min": 0.05843579287394794,
"max": 0.07708618770926014,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3269819737955307,
"min": 0.23374317149579177,
"max": 0.3772654923993875,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19366922358087466,
"min": 0.18219471459879596,
"max": 0.3492299100347594,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9683461179043733,
"min": 0.7287788583951839,
"max": 1.746149550173797,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.130624000000007e-05,
"min": 9.130624000000007e-05,
"max": 0.00791351624,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004565312000000003,
"min": 0.0004565312000000003,
"max": 0.0387765712,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.145454545454545,
"min": 3.8636363636363638,
"max": 27.42222222222222,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1493.0,
"min": 170.0,
"max": 1496.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.145454545454545,
"min": 3.8636363636363638,
"max": 27.42222222222222,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1493.0,
"min": 170.0,
"max": 1496.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679808088",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679809242"
},
"total": 1153.944886152,
"count": 1,
"self": 0.4371777500000462,
"children": {
"run_training.setup": {
"total": 0.11569500400000265,
"count": 1,
"self": 0.11569500400000265
},
"TrainerController.start_learning": {
"total": 1153.392013398,
"count": 1,
"self": 1.3017439060085962,
"children": {
"TrainerController._reset_env": {
"total": 8.523067112999996,
"count": 1,
"self": 8.523067112999996
},
"TrainerController.advance": {
"total": 1143.3948394979914,
"count": 45479,
"self": 0.7018928100028461,
"children": {
"env_step": {
"total": 1142.6929466879885,
"count": 45479,
"self": 820.827743987005,
"children": {
"SubprocessEnvManager._take_step": {
"total": 321.0255710809923,
"count": 45479,
"self": 4.893861480997941,
"children": {
"TorchPolicy.evaluate": {
"total": 316.13170959999434,
"count": 45479,
"self": 316.13170959999434
}
}
},
"workers": {
"total": 0.8396316199912803,
"count": 45479,
"self": 0.0,
"children": {
"worker_root": {
"total": 1149.8573008339997,
"count": 45479,
"is_parallel": true,
"self": 545.1469834310024,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005578036999992264,
"count": 1,
"is_parallel": true,
"self": 0.004013977999989038,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015640590000032262,
"count": 10,
"is_parallel": true,
"self": 0.0015640590000032262
}
}
},
"UnityEnvironment.step": {
"total": 0.03847815199998195,
"count": 1,
"is_parallel": true,
"self": 0.000431289999994533,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004413050000096064,
"count": 1,
"is_parallel": true,
"self": 0.0004413050000096064
},
"communicator.exchange": {
"total": 0.03578582699998378,
"count": 1,
"is_parallel": true,
"self": 0.03578582699998378
},
"steps_from_proto": {
"total": 0.0018197299999940242,
"count": 1,
"is_parallel": true,
"self": 0.00035780700000032084,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014619229999937033,
"count": 10,
"is_parallel": true,
"self": 0.0014619229999937033
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 604.7103174029974,
"count": 45478,
"is_parallel": true,
"self": 24.278713210045566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.874303270982665,
"count": 45478,
"is_parallel": true,
"self": 12.874303270982665
},
"communicator.exchange": {
"total": 489.465483264977,
"count": 45478,
"is_parallel": true,
"self": 489.465483264977
},
"steps_from_proto": {
"total": 78.09181765699216,
"count": 45478,
"is_parallel": true,
"self": 15.39810408193324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.69371357505892,
"count": 454780,
"is_parallel": true,
"self": 62.69371357505892
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011668499996630999,
"count": 1,
"self": 0.00011668499996630999,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1134.725633848029,
"count": 1025778,
"is_parallel": true,
"self": 26.85586667699272,
"children": {
"process_trajectory": {
"total": 628.0888675020374,
"count": 1025778,
"is_parallel": true,
"self": 625.4638992430375,
"children": {
"RLTrainer._checkpoint": {
"total": 2.624968258999843,
"count": 10,
"is_parallel": true,
"self": 2.624968258999843
}
}
},
"_update_policy": {
"total": 479.78089966899887,
"count": 227,
"is_parallel": true,
"self": 176.58907831199514,
"children": {
"TorchPPOOptimizer.update": {
"total": 303.19182135700373,
"count": 11574,
"is_parallel": true,
"self": 303.19182135700373
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.17224619600006008,
"count": 1,
"self": 0.0019318440001825365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17031435199987754,
"count": 1,
"self": 0.17031435199987754
}
}
}
}
}
}
}