OlgaVityuk's picture
Push_1
aad960f
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7075538635253906,
"min": 0.7075538635253906,
"max": 2.8043413162231445,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 14017.349609375,
"min": 14017.349609375,
"max": 57592.7578125,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 19992.0,
"max": 499976.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 19992.0,
"max": 499976.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.579208374023438,
"min": 1.1244392395019531,
"max": 13.686137199401855,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5418.10400390625,
"min": 448.6512451171875,
"max": 5581.6015625,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 21890.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06655429399821883,
"min": 0.06279135271432568,
"max": 0.07476437832904678,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.5989886459839695,
"min": 0.5651221744289311,
"max": 0.7450655834080524,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1880288576962901,
"min": 0.16108783148279848,
"max": 0.26527705795105244,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.692259719266611,
"min": 1.4497904833451862,
"max": 2.387493521559472,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.6928981024000006e-06,
"min": 5.6928981024000006e-06,
"max": 0.00029345280218239997,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.123608292160001e-05,
"min": 5.123608292160001e-05,
"max": 0.0026410752196415997,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1018976,
"min": 0.1018976,
"max": 0.1978176,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9170784000000001,
"min": 0.9170784000000001,
"max": 1.7803584000000001,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010469024000000005,
"min": 0.00010469024000000005,
"max": 0.004891098240000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0009422121600000004,
"min": 0.0009422121600000004,
"max": 0.04401988416000001,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.626262626262626,
"min": 5.212121212121212,
"max": 27.161616161616163,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2636.0,
"min": 516.0,
"max": 2893.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.626262626262626,
"min": 5.212121212121212,
"max": 27.161616161616163,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2636.0,
"min": 516.0,
"max": 2893.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680774537",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680775722"
},
"total": 1184.8593959159998,
"count": 1,
"self": 0.4378732719999334,
"children": {
"run_training.setup": {
"total": 0.1117448229999809,
"count": 1,
"self": 0.1117448229999809
},
"TrainerController.start_learning": {
"total": 1184.309777821,
"count": 1,
"self": 1.416391119026457,
"children": {
"TrainerController._reset_env": {
"total": 3.882526287000019,
"count": 1,
"self": 3.882526287000019
},
"TrainerController.advance": {
"total": 1178.8773426619734,
"count": 45475,
"self": 0.7272312859788599,
"children": {
"env_step": {
"total": 1178.1501113759946,
"count": 45475,
"self": 860.90843273299,
"children": {
"SubprocessEnvManager._take_step": {
"total": 316.46908275998385,
"count": 45475,
"self": 4.756226260997437,
"children": {
"TorchPolicy.evaluate": {
"total": 311.7128564989864,
"count": 45475,
"self": 311.7128564989864
}
}
},
"workers": {
"total": 0.7725958830206991,
"count": 45475,
"self": 0.0,
"children": {
"worker_root": {
"total": 1180.401751632,
"count": 45475,
"is_parallel": true,
"self": 540.8452850389951,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005205272000011973,
"count": 1,
"is_parallel": true,
"self": 0.0034120699999675708,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017932020000444027,
"count": 10,
"is_parallel": true,
"self": 0.0017932020000444027
}
}
},
"UnityEnvironment.step": {
"total": 0.05259403000002294,
"count": 1,
"is_parallel": true,
"self": 0.0005784550000953459,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003251949999594217,
"count": 1,
"is_parallel": true,
"self": 0.0003251949999594217
},
"communicator.exchange": {
"total": 0.049638737000009314,
"count": 1,
"is_parallel": true,
"self": 0.049638737000009314
},
"steps_from_proto": {
"total": 0.002051642999958858,
"count": 1,
"is_parallel": true,
"self": 0.00040490699996098556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016467359999978726,
"count": 10,
"is_parallel": true,
"self": 0.0016467359999978726
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 639.5564665930049,
"count": 45474,
"is_parallel": true,
"self": 25.48882078899544,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.29706779398748,
"count": 45474,
"is_parallel": true,
"self": 14.29706779398748
},
"communicator.exchange": {
"total": 515.1173508060162,
"count": 45474,
"is_parallel": true,
"self": 515.1173508060162
},
"steps_from_proto": {
"total": 84.6532272040057,
"count": 45474,
"is_parallel": true,
"self": 17.020486588969447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.63274061503625,
"count": 454740,
"is_parallel": true,
"self": 67.63274061503625
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015471500000785454,
"count": 1,
"self": 0.00015471500000785454,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1169.491580247015,
"count": 1052660,
"is_parallel": true,
"self": 25.97469250304698,
"children": {
"process_trajectory": {
"total": 645.9768183949662,
"count": 1052660,
"is_parallel": true,
"self": 643.3743421099663,
"children": {
"RLTrainer._checkpoint": {
"total": 2.602476284999966,
"count": 10,
"is_parallel": true,
"self": 2.602476284999966
}
}
},
"_update_policy": {
"total": 497.5400693490018,
"count": 227,
"is_parallel": true,
"self": 183.34636095000155,
"children": {
"TorchPPOOptimizer.update": {
"total": 314.19370839900023,
"count": 11574,
"is_parallel": true,
"self": 314.19370839900023
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1333630379999704,
"count": 1,
"self": 0.0010004899997966277,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13236254800017377,
"count": 1,
"self": 0.13236254800017377
}
}
}
}
}
}
}