unit5 / run_logs /timers.json
Praga-6000's picture
First Push
8b2442b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6387420296669006,
"min": 0.6327793002128601,
"max": 1.0834821462631226,
"count": 81
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6084.65673828125,
"min": 6062.658203125,
"max": 10170.32421875,
"count": 81
},
"SnowballTarget.Step.mean": {
"value": 999968.0,
"min": 199992.0,
"max": 999968.0,
"count": 81
},
"SnowballTarget.Step.sum": {
"value": 999968.0,
"min": 199992.0,
"max": 999968.0,
"count": 81
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.081706047058105,
"min": 12.850728988647461,
"max": 14.137797355651855,
"count": 81
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2745.9326171875,
"min": 1659.88916015625,
"max": 2893.48779296875,
"count": 81
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 81
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 6567.0,
"max": 10945.0,
"count": 81
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0703826310534222,
"min": 0.06245207467901648,
"max": 0.07407977656146292,
"count": 81
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2815305242136888,
"min": 0.2182903964227686,
"max": 0.3703988828073146,
"count": 81
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15891914109827257,
"min": 0.15105286941808813,
"max": 0.213871904374922,
"count": 81
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6356765643930903,
"min": 0.5644757202552522,
"max": 1.0519777287714882,
"count": 81
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4292995235999971e-06,
"min": 1.4292995235999971e-06,
"max": 0.0002406792197736,
"count": 81
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.717198094399989e-06,
"min": 5.717198094399989e-06,
"max": 0.001176996107668,
"count": 81
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1004764,
"min": 0.1004764,
"max": 0.1802264,
"count": 81
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4019056,
"min": 0.4019056,
"max": 0.8923320000000001,
"count": 81
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.377235999999996e-05,
"min": 3.377235999999996e-05,
"max": 0.00401329736,
"count": 81
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00013508943999999983,
"min": 0.00013508943999999983,
"max": 0.0196273668,
"count": 81
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.818181818181817,
"min": 25.6,
"max": 28.163636363636364,
"count": 81
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1224.0,
"min": 752.0,
"max": 1549.0,
"count": 81
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.818181818181817,
"min": 25.6,
"max": 28.163636363636364,
"count": 81
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1224.0,
"min": 752.0,
"max": 1549.0,
"count": 81
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 81
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 81
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696787465",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696789911"
},
"total": 2445.7466050200005,
"count": 1,
"self": 0.5262043780007843,
"children": {
"run_training.setup": {
"total": 0.1078088299996125,
"count": 1,
"self": 0.1078088299996125
},
"TrainerController.start_learning": {
"total": 2445.112591812,
"count": 1,
"self": 3.538825564038234,
"children": {
"TrainerController._reset_env": {
"total": 1.6163898869999684,
"count": 1,
"self": 1.6163898869999684
},
"TrainerController.advance": {
"total": 2439.866838730962,
"count": 73338,
"self": 1.6385281669158758,
"children": {
"env_step": {
"total": 2438.2283105640463,
"count": 73338,
"self": 1928.7964934030601,
"children": {
"SubprocessEnvManager._take_step": {
"total": 507.7968274750001,
"count": 73338,
"self": 9.076216953976655,
"children": {
"TorchPolicy.evaluate": {
"total": 498.72061052102345,
"count": 73338,
"self": 498.72061052102345
}
}
},
"workers": {
"total": 1.6349896859860564,
"count": 73338,
"self": 0.0,
"children": {
"worker_root": {
"total": 2436.716611641107,
"count": 73338,
"is_parallel": true,
"self": 1136.0442708630117,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032841679999364715,
"count": 1,
"is_parallel": true,
"self": 0.0008225539991144615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00246161400082201,
"count": 10,
"is_parallel": true,
"self": 0.00246161400082201
}
}
},
"UnityEnvironment.step": {
"total": 0.13225638999983858,
"count": 1,
"is_parallel": true,
"self": 0.0007449060003636987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005100240000501799,
"count": 1,
"is_parallel": true,
"self": 0.0005100240000501799
},
"communicator.exchange": {
"total": 0.1286362329997246,
"count": 1,
"is_parallel": true,
"self": 0.1286362329997246
},
"steps_from_proto": {
"total": 0.0023652269997000985,
"count": 1,
"is_parallel": true,
"self": 0.0004277669991097355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001937460000590363,
"count": 10,
"is_parallel": true,
"self": 0.001937460000590363
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1300.6723407780955,
"count": 73337,
"is_parallel": true,
"self": 57.34887038916622,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.22861080296161,
"count": 73337,
"is_parallel": true,
"self": 31.22861080296161
},
"communicator.exchange": {
"total": 1015.1126084040106,
"count": 73337,
"is_parallel": true,
"self": 1015.1126084040106
},
"steps_from_proto": {
"total": 196.98225118195705,
"count": 73337,
"is_parallel": true,
"self": 38.846725553930355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 158.1355256280267,
"count": 733370,
"is_parallel": true,
"self": 158.1355256280267
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016458899972349172,
"count": 1,
"self": 0.00016458899972349172,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 2418.7039180392794,
"count": 2444408,
"is_parallel": true,
"self": 58.017053019495506,
"children": {
"process_trajectory": {
"total": 1285.300306515785,
"count": 2444408,
"is_parallel": true,
"self": 1281.1163833047872,
"children": {
"RLTrainer._checkpoint": {
"total": 4.183923210997818,
"count": 21,
"is_parallel": true,
"self": 4.183923210997818
}
}
},
"_update_policy": {
"total": 1075.386558503999,
"count": 366,
"is_parallel": true,
"self": 376.61259414306323,
"children": {
"TorchPPOOptimizer.update": {
"total": 698.7739643609357,
"count": 18660,
"is_parallel": true,
"self": 698.7739643609357
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09037304100002075,
"count": 1,
"self": 0.0014845739997326746,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08888846700028807,
"count": 1,
"self": 0.08888846700028807
}
}
}
}
}
}
}