Shlomo's picture
First Push
3f4a88e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.866123616695404,
"min": 0.866123616695404,
"max": 2.8659348487854004,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8288.802734375,
"min": 8288.802734375,
"max": 29507.6640625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.560389518737793,
"min": 0.2627295255661011,
"max": 12.560389518737793,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2449.27587890625,
"min": 50.96952819824219,
"max": 2557.24609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07042061783431564,
"min": 0.06446436146307308,
"max": 0.07739384441069264,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28168247133726254,
"min": 0.2578574458522923,
"max": 0.3869692220534632,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20534481239669464,
"min": 0.128920767529338,
"max": 0.2781420468115339,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8213792495867785,
"min": 0.515683070117352,
"max": 1.3907102340576696,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.318181818181817,
"min": 2.977272727272727,
"max": 25.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1070.0,
"min": 131.0,
"max": 1354.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.318181818181817,
"min": 2.977272727272727,
"max": 25.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1070.0,
"min": 131.0,
"max": 1354.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695907124",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695907593"
},
"total": 468.778285795,
"count": 1,
"self": 0.4355016660001638,
"children": {
"run_training.setup": {
"total": 0.06884354399994663,
"count": 1,
"self": 0.06884354399994663
},
"TrainerController.start_learning": {
"total": 468.2739405849999,
"count": 1,
"self": 0.5412101499945265,
"children": {
"TrainerController._reset_env": {
"total": 4.297778610000023,
"count": 1,
"self": 4.297778610000023
},
"TrainerController.advance": {
"total": 463.29005085400536,
"count": 18203,
"self": 0.2544272620021957,
"children": {
"env_step": {
"total": 463.03562359200316,
"count": 18203,
"self": 337.2515586609977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.50442170999361,
"count": 18203,
"self": 1.745852411003284,
"children": {
"TorchPolicy.evaluate": {
"total": 123.75856929899032,
"count": 18203,
"self": 123.75856929899032
}
}
},
"workers": {
"total": 0.27964322101183825,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.58232459500937,
"count": 18203,
"is_parallel": true,
"self": 218.56494221000844,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005825275999995938,
"count": 1,
"is_parallel": true,
"self": 0.004407393000178672,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001417882999817266,
"count": 10,
"is_parallel": true,
"self": 0.001417882999817266
}
}
},
"UnityEnvironment.step": {
"total": 0.03656646000001729,
"count": 1,
"is_parallel": true,
"self": 0.000605662999987544,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044972700004564103,
"count": 1,
"is_parallel": true,
"self": 0.00044972700004564103
},
"communicator.exchange": {
"total": 0.03212499599999319,
"count": 1,
"is_parallel": true,
"self": 0.03212499599999319
},
"steps_from_proto": {
"total": 0.003386073999990913,
"count": 1,
"is_parallel": true,
"self": 0.0014838089999784643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019022650000124486,
"count": 10,
"is_parallel": true,
"self": 0.0019022650000124486
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 248.01738238500093,
"count": 18202,
"is_parallel": true,
"self": 10.47625961599067,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.305245245001629,
"count": 18202,
"is_parallel": true,
"self": 5.305245245001629
},
"communicator.exchange": {
"total": 196.33579053201117,
"count": 18202,
"is_parallel": true,
"self": 196.33579053201117
},
"steps_from_proto": {
"total": 35.900086991997455,
"count": 18202,
"is_parallel": true,
"self": 6.483015639994278,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.417071352003177,
"count": 182020,
"is_parallel": true,
"self": 29.417071352003177
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011381900003470946,
"count": 1,
"self": 0.00011381900003470946,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 459.8164071630482,
"count": 437977,
"is_parallel": true,
"self": 9.377139709099765,
"children": {
"process_trajectory": {
"total": 250.32700651494872,
"count": 437977,
"is_parallel": true,
"self": 249.1457047459487,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1813017690000152,
"count": 4,
"is_parallel": true,
"self": 1.1813017690000152
}
}
},
"_update_policy": {
"total": 200.11226093899973,
"count": 90,
"is_parallel": true,
"self": 80.99257709299684,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.11968384600289,
"count": 4584,
"is_parallel": true,
"self": 119.11968384600289
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1447871519999353,
"count": 1,
"self": 0.0008884690000741102,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1438986829998612,
"count": 1,
"self": 0.1438986829998612
}
}
}
}
}
}
}