Shooter01 / run_logs /timers.json
AtilliO's picture
shooter setup
811a2a9
{
"name": "root",
"gauges": {
"ShooterBasic.Policy.Entropy.mean": {
"value": 1.4824821949005127,
"min": 1.0895097255706787,
"max": 2.638326644897461,
"count": 250
},
"ShooterBasic.Policy.Entropy.sum": {
"value": 2963.48193359375,
"min": 2180.10888671875,
"max": 5279.29150390625,
"count": 250
},
"ShooterBasic.Step.mean": {
"value": 499998.0,
"min": 1998.0,
"max": 499998.0,
"count": 250
},
"ShooterBasic.Step.sum": {
"value": 499998.0,
"min": 1998.0,
"max": 499998.0,
"count": 250
},
"ShooterBasic.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.006963093765079975,
"min": -0.21738854050636292,
"max": 0.023092469200491905,
"count": 250
},
"ShooterBasic.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.644383430480957,
"min": -145.86770629882812,
"max": 15.402676582336426,
"count": 250
},
"ShooterBasic.Losses.PolicyLoss.mean": {
"value": 0.12747625884852218,
"min": 0.10908348486797573,
"max": 0.15455300048051868,
"count": 250
},
"ShooterBasic.Losses.PolicyLoss.sum": {
"value": 1.0198100707881774,
"min": 0.786331330309622,
"max": 1.2364240038441494,
"count": 250
},
"ShooterBasic.Losses.ValueLoss.mean": {
"value": 9.136862847934173e-07,
"min": 5.960540469066948e-08,
"max": 0.02674982081483298,
"count": 250
},
"ShooterBasic.Losses.ValueLoss.sum": {
"value": 7.309490278347338e-06,
"min": 4.768432375253558e-07,
"max": 0.21399856651866384,
"count": 250
},
"ShooterBasic.Policy.LearningRate.mean": {
"value": 5.972498009499977e-07,
"min": 5.972498009499977e-07,
"max": 0.00029938080020639997,
"count": 250
},
"ShooterBasic.Policy.LearningRate.sum": {
"value": 4.777998407599982e-06,
"min": 4.777998407599982e-06,
"max": 0.0023857572047476,
"count": 250
},
"ShooterBasic.Policy.Epsilon.mean": {
"value": 0.10019905000000001,
"min": 0.10019905000000001,
"max": 0.19979360000000002,
"count": 250
},
"ShooterBasic.Policy.Epsilon.sum": {
"value": 0.8015924000000001,
"min": 0.7069943999999999,
"max": 1.5952524000000001,
"count": 250
},
"ShooterBasic.Policy.Beta.mean": {
"value": 1.9932594999999962e-05,
"min": 1.9932594999999962e-05,
"max": 0.00498970064,
"count": 250
},
"ShooterBasic.Policy.Beta.sum": {
"value": 0.0001594607599999997,
"min": 0.0001594607599999997,
"max": 0.039763094760000006,
"count": 250
},
"ShooterBasic.Environment.EpisodeLength.mean": {
"value": 2499.0,
"min": 31.161290322580644,
"max": 2499.0,
"count": 205
},
"ShooterBasic.Environment.EpisodeLength.sum": {
"value": 2499.0,
"min": 146.0,
"max": 4250.0,
"count": 205
},
"ShooterBasic.Environment.CumulativeReward.mean": {
"value": -0.9995999505044892,
"min": -2.879599778330885,
"max": 1.8279999308288097,
"count": 205
},
"ShooterBasic.Environment.CumulativeReward.sum": {
"value": -0.9995999505044892,
"min": -63.19960029050708,
"max": 2.3895999547094107,
"count": 205
},
"ShooterBasic.Policy.ExtrinsicReward.mean": {
"value": -0.9995999505044892,
"min": -2.879599778330885,
"max": 1.8279999308288097,
"count": 205
},
"ShooterBasic.Policy.ExtrinsicReward.sum": {
"value": -0.9995999505044892,
"min": -63.19960029050708,
"max": 2.3895999547094107,
"count": 205
},
"ShooterBasic.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 250
},
"ShooterBasic.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 250
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692209791",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/ShooterOrange.yaml --env=./training-envs-executables/linux/Shooter_00/Shooter_00 --run-id=Shooter01 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692212778"
},
"total": 2987.562962117,
"count": 1,
"self": 0.5939823600001546,
"children": {
"run_training.setup": {
"total": 0.0525785629999973,
"count": 1,
"self": 0.0525785629999973
},
"TrainerController.start_learning": {
"total": 2986.916401194,
"count": 1,
"self": 11.806434428058765,
"children": {
"TrainerController._reset_env": {
"total": 1.626698766000004,
"count": 1,
"self": 1.626698766000004
},
"TrainerController.advance": {
"total": 2973.3964364819412,
"count": 500309,
"self": 10.706079446877084,
"children": {
"env_step": {
"total": 2362.8686963399596,
"count": 500309,
"self": 1719.2106472249543,
"children": {
"SubprocessEnvManager._take_step": {
"total": 636.6136659698503,
"count": 500309,
"self": 33.50798116086196,
"children": {
"TorchPolicy.evaluate": {
"total": 603.1056848089884,
"count": 500001,
"self": 603.1056848089884
}
}
},
"workers": {
"total": 7.044383145154939,
"count": 500309,
"self": 0.0,
"children": {
"worker_root": {
"total": 2965.624904697787,
"count": 500309,
"is_parallel": true,
"self": 1816.9000337049486,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005608940999991319,
"count": 1,
"is_parallel": true,
"self": 0.0048137390000420055,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007952019999493132,
"count": 4,
"is_parallel": true,
"self": 0.0007952019999493132
}
}
},
"UnityEnvironment.step": {
"total": 0.026568327000006775,
"count": 1,
"is_parallel": true,
"self": 0.0001840620000166382,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024291500000117594,
"count": 1,
"is_parallel": true,
"self": 0.00024291500000117594
},
"communicator.exchange": {
"total": 0.025493911000012304,
"count": 1,
"is_parallel": true,
"self": 0.025493911000012304
},
"steps_from_proto": {
"total": 0.0006474389999766572,
"count": 1,
"is_parallel": true,
"self": 0.0003238440000359333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00032359499994072394,
"count": 4,
"is_parallel": true,
"self": 0.00032359499994072394
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1148.7248709928383,
"count": 500308,
"is_parallel": true,
"self": 53.21501695778875,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 42.855336293951666,
"count": 500308,
"is_parallel": true,
"self": 42.855336293951666
},
"communicator.exchange": {
"total": 823.2242927070469,
"count": 500308,
"is_parallel": true,
"self": 823.2242927070469
},
"steps_from_proto": {
"total": 229.43022503405103,
"count": 500308,
"is_parallel": true,
"self": 126.35356219302241,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.07666284102862,
"count": 2001232,
"is_parallel": true,
"self": 103.07666284102862
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 599.8216606951044,
"count": 500309,
"self": 12.822596677169713,
"children": {
"process_trajectory": {
"total": 259.8214377189371,
"count": 500309,
"self": 259.6711001989371,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15033751999999367,
"count": 1,
"self": 0.15033751999999367
}
}
},
"_update_policy": {
"total": 327.1776262989976,
"count": 1937,
"self": 126.58520726297724,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.59241903602037,
"count": 46488,
"self": 200.59241903602037
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1980000635958277e-06,
"count": 1,
"self": 1.1980000635958277e-06
},
"TrainerController._save_models": {
"total": 0.08683031999999002,
"count": 1,
"self": 0.0005780699998467753,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08625225000014325,
"count": 1,
"self": 0.08625225000014325
}
}
}
}
}
}
}