krishnadasar-sudheer-kumar's picture
Sudheer First Push
8cbf40b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9062545895576477,
"min": 0.9062545895576477,
"max": 2.8662357330322266,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8662.8876953125,
"min": 8662.8876953125,
"max": 29384.6484375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.95397663116455,
"min": 0.39465582370758057,
"max": 12.95397663116455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2526.025390625,
"min": 76.563232421875,
"max": 2625.048095703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07240058024706064,
"min": 0.06323740775452262,
"max": 0.07651685997780969,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28960232098824257,
"min": 0.2529496310180905,
"max": 0.36873490000075165,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2056419721201939,
"min": 0.11756701732748279,
"max": 0.27299668879120376,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8225678884807756,
"min": 0.47026806930993115,
"max": 1.3649834439560187,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.75,
"min": 3.4318181818181817,
"max": 25.75,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1133.0,
"min": 151.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.75,
"min": 3.4318181818181817,
"max": 25.75,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1133.0,
"min": 151.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704159441",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704159873"
},
"total": 431.1565656790001,
"count": 1,
"self": 0.7354853830001957,
"children": {
"run_training.setup": {
"total": 0.05072718399992482,
"count": 1,
"self": 0.05072718399992482
},
"TrainerController.start_learning": {
"total": 430.370353112,
"count": 1,
"self": 0.5268770639893319,
"children": {
"TrainerController._reset_env": {
"total": 3.517438179999999,
"count": 1,
"self": 3.517438179999999
},
"TrainerController.advance": {
"total": 426.19596026401064,
"count": 18211,
"self": 0.24890683702687966,
"children": {
"env_step": {
"total": 425.94705342698376,
"count": 18211,
"self": 278.8246891389492,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.75413611902218,
"count": 18211,
"self": 1.4020522160228666,
"children": {
"TorchPolicy.evaluate": {
"total": 145.3520839029993,
"count": 18211,
"self": 145.3520839029993
}
}
},
"workers": {
"total": 0.36822816901235456,
"count": 18211,
"self": 0.0,
"children": {
"worker_root": {
"total": 429.18078413599574,
"count": 18211,
"is_parallel": true,
"self": 213.20650706400534,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004669726999964041,
"count": 1,
"is_parallel": true,
"self": 0.0032227370002146927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014469899997493485,
"count": 10,
"is_parallel": true,
"self": 0.0014469899997493485
}
}
},
"UnityEnvironment.step": {
"total": 0.07336522099990361,
"count": 1,
"is_parallel": true,
"self": 0.0007331389999762905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003641929999957938,
"count": 1,
"is_parallel": true,
"self": 0.0003641929999957938
},
"communicator.exchange": {
"total": 0.07048882499998399,
"count": 1,
"is_parallel": true,
"self": 0.07048882499998399
},
"steps_from_proto": {
"total": 0.0017790639999475388,
"count": 1,
"is_parallel": true,
"self": 0.00031679300013820466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014622709998093342,
"count": 10,
"is_parallel": true,
"self": 0.0014622709998093342
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 215.9742770719904,
"count": 18210,
"is_parallel": true,
"self": 10.48487153596193,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.056249928015177,
"count": 18210,
"is_parallel": true,
"self": 5.056249928015177
},
"communicator.exchange": {
"total": 168.15148970800942,
"count": 18210,
"is_parallel": true,
"self": 168.15148970800942
},
"steps_from_proto": {
"total": 32.28166590000387,
"count": 18210,
"is_parallel": true,
"self": 5.865290694049236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.416375205954637,
"count": 182100,
"is_parallel": true,
"self": 26.416375205954637
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019001000009666313,
"count": 1,
"self": 0.00019001000009666313,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 420.9134108250787,
"count": 649833,
"is_parallel": true,
"self": 13.29238614284509,
"children": {
"process_trajectory": {
"total": 233.21080846323161,
"count": 649833,
"is_parallel": true,
"self": 231.6674853172317,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5433231459999206,
"count": 4,
"is_parallel": true,
"self": 1.5433231459999206
}
}
},
"_update_policy": {
"total": 174.410216219002,
"count": 90,
"is_parallel": true,
"self": 52.20494983800904,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.20526638099295,
"count": 4584,
"is_parallel": true,
"self": 122.20526638099295
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12988759399991068,
"count": 1,
"self": 0.0012794290000783803,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1286081649998323,
"count": 1,
"self": 0.1286081649998323
}
}
}
}
}
}
}