Nasree's picture
First Push
8c78e11
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0821439027786255,
"min": 1.0821439027786255,
"max": 2.8646936416625977,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10368.0205078125,
"min": 10368.0205078125,
"max": 29337.328125,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.131266593933105,
"min": 0.4599519968032837,
"max": 12.131266593933105,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2365.596923828125,
"min": 89.2306900024414,
"max": 2414.58447265625,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06497834889730666,
"min": 0.06319415741446016,
"max": 0.07435261581079376,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25991339558922666,
"min": 0.25277662965784065,
"max": 0.3670477070663051,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19505600287925964,
"min": 0.1286619952925499,
"max": 0.2551515409138565,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7802240115170386,
"min": 0.5146479811701996,
"max": 1.2757577045692825,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291906002698,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19730199999999998,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004865369800000001,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.0,
"min": 3.772727272727273,
"max": 24.381818181818183,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1056.0,
"min": 166.0,
"max": 1341.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.0,
"min": 3.772727272727273,
"max": 24.381818181818183,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1056.0,
"min": 166.0,
"max": 1341.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678644731",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678645222"
},
"total": 490.7671269570001,
"count": 1,
"self": 0.43302493000021514,
"children": {
"run_training.setup": {
"total": 0.1797794909999766,
"count": 1,
"self": 0.1797794909999766
},
"TrainerController.start_learning": {
"total": 490.1543225359999,
"count": 1,
"self": 0.6180219950070978,
"children": {
"TrainerController._reset_env": {
"total": 10.888456376000022,
"count": 1,
"self": 10.888456376000022
},
"TrainerController.advance": {
"total": 478.5292816759928,
"count": 18216,
"self": 0.3118605890110757,
"children": {
"env_step": {
"total": 478.21742108698174,
"count": 18216,
"self": 337.0039272349816,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.76531941099256,
"count": 18216,
"self": 1.904945231995157,
"children": {
"TorchPolicy.evaluate": {
"total": 138.8603741789974,
"count": 18216,
"self": 138.8603741789974
}
}
},
"workers": {
"total": 0.44817444100760895,
"count": 18216,
"self": 0.0,
"children": {
"worker_root": {
"total": 488.2552073989974,
"count": 18216,
"is_parallel": true,
"self": 236.66358961999777,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002789744999972754,
"count": 1,
"is_parallel": true,
"self": 0.0007507049998594084,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020390400001133457,
"count": 10,
"is_parallel": true,
"self": 0.0020390400001133457
}
}
},
"UnityEnvironment.step": {
"total": 0.07656992500005799,
"count": 1,
"is_parallel": true,
"self": 0.0006211400001348011,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040462200001911697,
"count": 1,
"is_parallel": true,
"self": 0.00040462200001911697
},
"communicator.exchange": {
"total": 0.06718655299994225,
"count": 1,
"is_parallel": true,
"self": 0.06718655299994225
},
"steps_from_proto": {
"total": 0.008357609999961824,
"count": 1,
"is_parallel": true,
"self": 0.0004024520000029952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007955157999958828,
"count": 10,
"is_parallel": true,
"self": 0.007955157999958828
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 251.59161777899965,
"count": 18215,
"is_parallel": true,
"self": 9.792848131001847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.176156897993906,
"count": 18215,
"is_parallel": true,
"self": 5.176156897993906
},
"communicator.exchange": {
"total": 204.3602397150055,
"count": 18215,
"is_parallel": true,
"self": 204.3602397150055
},
"steps_from_proto": {
"total": 32.262373034998404,
"count": 18215,
"is_parallel": true,
"self": 6.53333437498236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.729038660016045,
"count": 182150,
"is_parallel": true,
"self": 25.729038660016045
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001655300000038551,
"count": 1,
"self": 0.0001655300000038551,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 474.6668227181093,
"count": 452101,
"is_parallel": true,
"self": 11.091373714068936,
"children": {
"process_trajectory": {
"total": 265.95055973504043,
"count": 452101,
"is_parallel": true,
"self": 264.4282893930407,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5222703419997288,
"count": 4,
"is_parallel": true,
"self": 1.5222703419997288
}
}
},
"_update_policy": {
"total": 197.62488926899994,
"count": 90,
"is_parallel": true,
"self": 75.86693473801336,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.75795453098658,
"count": 4581,
"is_parallel": true,
"self": 121.75795453098658
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11839695899993785,
"count": 1,
"self": 0.0009528099999442929,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11744414899999356,
"count": 1,
"self": 0.11744414899999356
}
}
}
}
}
}
}