Emperor-WS's picture
First Push
24a8369
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.8903605937957764,
"min": 2.890360116958618,
"max": 2.8903610706329346,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 27660.751953125,
"min": 27597.162109375,
"max": 29790.94921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03437237814068794,
"min": 0.03437237814068794,
"max": 0.038156554102897644,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 6.702613830566406,
"min": 6.702613830566406,
"max": 7.44052791595459,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 2.75,
"min": 2.5681818181818183,
"max": 3.1454545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 121.0,
"min": 113.0,
"max": 173.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 2.75,
"min": 2.5681818181818183,
"max": 3.1454545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 121.0,
"min": 113.0,
"max": 173.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690401988",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690402427"
},
"total": 439.6489580749999,
"count": 1,
"self": 0.8115195700002005,
"children": {
"run_training.setup": {
"total": 0.033248616999799196,
"count": 1,
"self": 0.033248616999799196
},
"TrainerController.start_learning": {
"total": 438.8041898879999,
"count": 1,
"self": 0.6500964990293596,
"children": {
"TrainerController._reset_env": {
"total": 3.8579211819999273,
"count": 1,
"self": 3.8579211819999273
},
"TrainerController.advance": {
"total": 434.02176600897087,
"count": 18204,
"self": 0.3331430579744392,
"children": {
"env_step": {
"total": 433.68862295099643,
"count": 18204,
"self": 316.5329062470055,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.83524732901128,
"count": 18204,
"self": 3.193932956001163,
"children": {
"TorchPolicy.evaluate": {
"total": 113.64131437301012,
"count": 18204,
"self": 113.64131437301012
}
}
},
"workers": {
"total": 0.3204693749796661,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 436.93223228998,
"count": 18204,
"is_parallel": true,
"self": 166.97159389298372,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020721360001516587,
"count": 1,
"is_parallel": true,
"self": 0.0006205710001268017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001451565000024857,
"count": 10,
"is_parallel": true,
"self": 0.001451565000024857
}
}
},
"UnityEnvironment.step": {
"total": 0.04227552899988041,
"count": 1,
"is_parallel": true,
"self": 0.0007010739998349891,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039399699994646653,
"count": 1,
"is_parallel": true,
"self": 0.00039399699994646653
},
"communicator.exchange": {
"total": 0.037604858000122476,
"count": 1,
"is_parallel": true,
"self": 0.037604858000122476
},
"steps_from_proto": {
"total": 0.0035755999999764754,
"count": 1,
"is_parallel": true,
"self": 0.0005768639998677827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029987360001086927,
"count": 10,
"is_parallel": true,
"self": 0.0029987360001086927
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 269.9606383969963,
"count": 18203,
"is_parallel": true,
"self": 11.382147324018433,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.93830372398611,
"count": 18203,
"is_parallel": true,
"self": 5.93830372398611
},
"communicator.exchange": {
"total": 212.5445939769861,
"count": 18203,
"is_parallel": true,
"self": 212.5445939769861
},
"steps_from_proto": {
"total": 40.095593372005624,
"count": 18203,
"is_parallel": true,
"self": 7.822980359963594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.27261301204203,
"count": 182030,
"is_parallel": true,
"self": 32.27261301204203
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.843399986180884e-05,
"count": 1,
"self": 3.843399986180884e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 427.0077443700659,
"count": 750328,
"is_parallel": true,
"self": 17.759405850136545,
"children": {
"process_trajectory": {
"total": 409.24833851992935,
"count": 750328,
"is_parallel": true,
"self": 407.84690788292914,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4014306370002032,
"count": 4,
"is_parallel": true,
"self": 1.4014306370002032
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2743677639998623,
"count": 1,
"self": 0.0056401139997888095,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2687276500000735,
"count": 1,
"self": 0.2687276500000735
}
}
}
}
}
}
}