moghis's picture
First Push
8046384
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8470109701156616,
"min": 0.8470109701156616,
"max": 2.8587684631347656,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8087.2607421875,
"min": 8087.2607421875,
"max": 29276.6484375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.138153076171875,
"min": 0.41456520557403564,
"max": 13.138153076171875,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2561.93994140625,
"min": 80.42565155029297,
"max": 2643.594970703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06910309731442704,
"min": 0.06355704139166525,
"max": 0.07385587483369425,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27641238925770817,
"min": 0.254228165566661,
"max": 0.3653181085554773,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19853222961811456,
"min": 0.1186050335926862,
"max": 0.29642294610247893,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7941289184724583,
"min": 0.4744201343707448,
"max": 1.3792628754003375,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 3.3181818181818183,
"max": 26.054545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 146.0,
"max": 1433.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 3.3181818181818183,
"max": 26.054545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 146.0,
"max": 1433.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684355584",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684356078"
},
"total": 493.947006634,
"count": 1,
"self": 0.4328935519999959,
"children": {
"run_training.setup": {
"total": 0.04097433499987346,
"count": 1,
"self": 0.04097433499987346
},
"TrainerController.start_learning": {
"total": 493.4731387470001,
"count": 1,
"self": 0.5492519390106736,
"children": {
"TrainerController._reset_env": {
"total": 3.8136252639999384,
"count": 1,
"self": 3.8136252639999384
},
"TrainerController.advance": {
"total": 488.95723147198964,
"count": 18201,
"self": 0.2879244100042797,
"children": {
"env_step": {
"total": 488.66930706198536,
"count": 18201,
"self": 355.76230014199655,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.6250976459985,
"count": 18201,
"self": 1.7889291880071596,
"children": {
"TorchPolicy.evaluate": {
"total": 130.83616845799133,
"count": 18201,
"self": 130.83616845799133
}
}
},
"workers": {
"total": 0.2819092739903226,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 491.8466093369898,
"count": 18201,
"is_parallel": true,
"self": 233.49197164297902,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005717032999882576,
"count": 1,
"is_parallel": true,
"self": 0.004033792999962316,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016832399999202607,
"count": 10,
"is_parallel": true,
"self": 0.0016832399999202607
}
}
},
"UnityEnvironment.step": {
"total": 0.04620794100014791,
"count": 1,
"is_parallel": true,
"self": 0.0005749450001530931,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031853399991632614,
"count": 1,
"is_parallel": true,
"self": 0.00031853399991632614
},
"communicator.exchange": {
"total": 0.0431669619999866,
"count": 1,
"is_parallel": true,
"self": 0.0431669619999866
},
"steps_from_proto": {
"total": 0.0021475000000918953,
"count": 1,
"is_parallel": true,
"self": 0.00040566200004832353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017418380000435718,
"count": 10,
"is_parallel": true,
"self": 0.0017418380000435718
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 258.35463769401076,
"count": 18200,
"is_parallel": true,
"self": 10.147949872979552,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.768337832027328,
"count": 18200,
"is_parallel": true,
"self": 5.768337832027328
},
"communicator.exchange": {
"total": 206.20778164400053,
"count": 18200,
"is_parallel": true,
"self": 206.20778164400053
},
"steps_from_proto": {
"total": 36.23056834500335,
"count": 18200,
"is_parallel": true,
"self": 6.8796493700253905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.350918974977958,
"count": 182000,
"is_parallel": true,
"self": 29.350918974977958
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013928999987911084,
"count": 1,
"self": 0.00013928999987911084,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 484.81816672909395,
"count": 465050,
"is_parallel": true,
"self": 11.080338316066218,
"children": {
"process_trajectory": {
"total": 267.0622961060269,
"count": 465050,
"is_parallel": true,
"self": 265.92067656802715,
"children": {
"RLTrainer._checkpoint": {
"total": 1.141619537999759,
"count": 4,
"is_parallel": true,
"self": 1.141619537999759
}
}
},
"_update_policy": {
"total": 206.67553230700082,
"count": 90,
"is_parallel": true,
"self": 81.18629333300441,
"children": {
"TorchPPOOptimizer.update": {
"total": 125.48923897399641,
"count": 4587,
"is_parallel": true,
"self": 125.48923897399641
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15289078199998585,
"count": 1,
"self": 0.0009133399998972891,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15197744200008856,
"count": 1,
"self": 0.15197744200008856
}
}
}
}
}
}
}