ameyachitnis's picture
First Push ballTarget1
af5765e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9332401752471924,
"min": 0.9332401752471924,
"max": 2.8715546131134033,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8920.8427734375,
"min": 8920.8427734375,
"max": 29502.3515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.810624122619629,
"min": 0.2395886927843094,
"max": 12.810624122619629,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2498.07177734375,
"min": 46.48020553588867,
"max": 2579.550048828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07382602529658328,
"min": 0.061060596631803354,
"max": 0.07792719563169122,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2953041011863331,
"min": 0.24424238652721342,
"max": 0.374795091576137,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18856658578357277,
"min": 0.11116058849866557,
"max": 0.32007426490970686,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7542663431342911,
"min": 0.4446423539946623,
"max": 1.4437842302426112,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.34090909090909,
"min": 3.4545454545454546,
"max": 25.472727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1115.0,
"min": 152.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.34090909090909,
"min": 3.4545454545454546,
"max": 25.472727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1115.0,
"min": 152.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684460089",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684460553"
},
"total": 464.210370977,
"count": 1,
"self": 0.43416197600004125,
"children": {
"run_training.setup": {
"total": 0.04136653300000148,
"count": 1,
"self": 0.04136653300000148
},
"TrainerController.start_learning": {
"total": 463.734842468,
"count": 1,
"self": 0.5211108710026338,
"children": {
"TrainerController._reset_env": {
"total": 3.889043003000012,
"count": 1,
"self": 3.889043003000012
},
"TrainerController.advance": {
"total": 459.1817235869973,
"count": 18202,
"self": 0.2586054779966389,
"children": {
"env_step": {
"total": 458.92311810900065,
"count": 18202,
"self": 339.1782648640111,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.4878140989986,
"count": 18202,
"self": 1.6633931000022528,
"children": {
"TorchPolicy.evaluate": {
"total": 117.82442099899635,
"count": 18202,
"self": 117.82442099899635
}
}
},
"workers": {
"total": 0.2570391459909729,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 462.197416274001,
"count": 18202,
"is_parallel": true,
"self": 217.47619036600182,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005228436000010106,
"count": 1,
"is_parallel": true,
"self": 0.0038088770000115346,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014195589999985714,
"count": 10,
"is_parallel": true,
"self": 0.0014195589999985714
}
}
},
"UnityEnvironment.step": {
"total": 0.038007772000014484,
"count": 1,
"is_parallel": true,
"self": 0.0006655550000118637,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003100109999820688,
"count": 1,
"is_parallel": true,
"self": 0.0003100109999820688
},
"communicator.exchange": {
"total": 0.03483055300000615,
"count": 1,
"is_parallel": true,
"self": 0.03483055300000615
},
"steps_from_proto": {
"total": 0.002201653000014403,
"count": 1,
"is_parallel": true,
"self": 0.0003789250000068023,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018227280000076007,
"count": 10,
"is_parallel": true,
"self": 0.0018227280000076007
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 244.72122590799918,
"count": 18201,
"is_parallel": true,
"self": 9.745855553003594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.244891422001388,
"count": 18201,
"is_parallel": true,
"self": 5.244891422001388
},
"communicator.exchange": {
"total": 196.58718713299544,
"count": 18201,
"is_parallel": true,
"self": 196.58718713299544
},
"steps_from_proto": {
"total": 33.143291799998764,
"count": 18201,
"is_parallel": true,
"self": 6.342996493011185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.80029530698758,
"count": 182010,
"is_parallel": true,
"self": 26.80029530698758
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001326390000713218,
"count": 1,
"self": 0.0001326390000713218,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 455.66796367094196,
"count": 427771,
"is_parallel": true,
"self": 9.61764841797492,
"children": {
"process_trajectory": {
"total": 247.1015149669672,
"count": 427771,
"is_parallel": true,
"self": 245.55280152196704,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5487134450001463,
"count": 4,
"is_parallel": true,
"self": 1.5487134450001463
}
}
},
"_update_policy": {
"total": 198.94880028599982,
"count": 90,
"is_parallel": true,
"self": 80.15929752299846,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.78950276300137,
"count": 4584,
"is_parallel": true,
"self": 118.78950276300137
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14283236799997212,
"count": 1,
"self": 0.000818410000078984,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14201395799989314,
"count": 1,
"self": 0.14201395799989314
}
}
}
}
}
}
}