andyleow's picture
First training of SnowballTarget
d96a186
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8403100371360779,
"min": 0.8263494372367859,
"max": 2.556847095489502,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 41946.59765625,
"min": 41240.62109375,
"max": 129151.4609375,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.724784851074219,
"min": 2.451233148574829,
"max": 13.724784851074219,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 13765.958984375,
"min": 2431.623291015625,
"max": 13765.958984375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 50347.0,
"min": 48158.0,
"max": 50347.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07151354829555123,
"min": 0.06421296256795843,
"max": 0.07151354829555123,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 1.6448116107976785,
"min": 1.4126851764950854,
"max": 1.6448116107976785,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.171875410817265,
"min": 0.171875410817265,
"max": 0.27297027942622104,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 3.9531344487970954,
"min": 3.845575377491175,
"max": 6.278316426803084,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4932895022400003e-05,
"min": 1.4932895022400003e-05,
"max": 0.00028487280504239996,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0003434565855152001,
"min": 0.0003434565855152001,
"max": 0.006267201710932799,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10497759999999999,
"min": 0.10497759999999999,
"max": 0.19495760000000004,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 2.4144848,
"min": 2.4144848,
"max": 4.289067200000001,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002583822400000001,
"min": 0.0002583822400000001,
"max": 0.0047483842399999995,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0059427915200000025,
"min": 0.0059427915200000025,
"max": 0.10446445328,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.889328063241106,
"min": 8.210743801652892,
"max": 26.889328063241106,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6803.0,
"min": 1987.0,
"max": 6803.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.889328063241106,
"min": 8.210743801652892,
"max": 26.889328063241106,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6803.0,
"min": 1987.0,
"max": 6803.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681724768",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681725863"
},
"total": 1095.6582530249998,
"count": 1,
"self": 0.3869021969994719,
"children": {
"run_training.setup": {
"total": 0.11017046000006303,
"count": 1,
"self": 0.11017046000006303
},
"TrainerController.start_learning": {
"total": 1095.1611803680003,
"count": 1,
"self": 1.2609349130057126,
"children": {
"TrainerController._reset_env": {
"total": 4.211399405000066,
"count": 1,
"self": 4.211399405000066
},
"TrainerController.advance": {
"total": 1089.5590087799942,
"count": 45475,
"self": 0.6641217049959778,
"children": {
"env_step": {
"total": 1088.8948870749982,
"count": 45475,
"self": 795.516571918939,
"children": {
"SubprocessEnvManager._take_step": {
"total": 292.75431477702114,
"count": 45475,
"self": 3.926513818928015,
"children": {
"TorchPolicy.evaluate": {
"total": 288.8278009580931,
"count": 45475,
"self": 288.8278009580931
}
}
},
"workers": {
"total": 0.6240003790380797,
"count": 45475,
"self": 0.0,
"children": {
"worker_root": {
"total": 1091.685549399022,
"count": 45475,
"is_parallel": true,
"self": 508.1682289429946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005813861000206089,
"count": 1,
"is_parallel": true,
"self": 0.0041252470000472385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00168861400015885,
"count": 10,
"is_parallel": true,
"self": 0.00168861400015885
}
}
},
"UnityEnvironment.step": {
"total": 0.08429478599964568,
"count": 1,
"is_parallel": true,
"self": 0.0005969879994154326,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004047420002279978,
"count": 1,
"is_parallel": true,
"self": 0.0004047420002279978
},
"communicator.exchange": {
"total": 0.0816162890000669,
"count": 1,
"is_parallel": true,
"self": 0.0816162890000669
},
"steps_from_proto": {
"total": 0.001676766999935353,
"count": 1,
"is_parallel": true,
"self": 0.00034214500010421034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013346219998311426,
"count": 10,
"is_parallel": true,
"self": 0.0013346219998311426
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 583.5173204560274,
"count": 45474,
"is_parallel": true,
"self": 24.263216632022704,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.244547802052239,
"count": 45474,
"is_parallel": true,
"self": 13.244547802052239
},
"communicator.exchange": {
"total": 469.14776076697353,
"count": 45474,
"is_parallel": true,
"self": 469.14776076697353
},
"steps_from_proto": {
"total": 76.86179525497892,
"count": 45474,
"is_parallel": true,
"self": 14.645028023956002,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.216767231022914,
"count": 454740,
"is_parallel": true,
"self": 62.216767231022914
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015730500035715522,
"count": 1,
"self": 0.00015730500035715522,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1081.3479028990432,
"count": 961017,
"is_parallel": true,
"self": 24.042134057207477,
"children": {
"process_trajectory": {
"total": 594.276238944838,
"count": 961017,
"is_parallel": true,
"self": 594.0767484148382,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19949052999982086,
"count": 1,
"is_parallel": true,
"self": 0.19949052999982086
}
}
},
"_update_policy": {
"total": 463.0295298969977,
"count": 227,
"is_parallel": true,
"self": 166.3484731920189,
"children": {
"TorchPPOOptimizer.update": {
"total": 296.6810567049788,
"count": 11574,
"is_parallel": true,
"self": 296.6810567049788
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12967996499992296,
"count": 1,
"self": 0.0008751529999244667,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1288048119999985,
"count": 1,
"self": 0.1288048119999985
}
}
}
}
}
}
}