MetaAnomie's picture
First attempt
548a2e9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0021921396255493,
"min": 1.0021921396255493,
"max": 2.856139898300171,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9602.0029296875,
"min": 9602.0029296875,
"max": 29312.564453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.36907958984375,
"min": 0.3789811134338379,
"max": 13.36907958984375,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2606.970458984375,
"min": 73.5223388671875,
"max": 2710.776611328125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.060274857009633205,
"min": 0.060274857009633205,
"max": 0.07401087750636803,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24109942803853282,
"min": 0.24109942803853282,
"max": 0.36115960359593907,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18278574370139952,
"min": 0.1325587125417466,
"max": 0.2685516300154667,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7311429748055981,
"min": 0.5302348501669863,
"max": 1.3427581500773336,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.6940097306000015e-05,
"min": 2.6940097306000015e-05,
"max": 0.000972940002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00010776038922400006,
"min": 0.00010776038922400006,
"max": 0.004617200038280001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.363636363636363,
"min": 3.6818181818181817,
"max": 26.381818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1160.0,
"min": 162.0,
"max": 1451.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.363636363636363,
"min": 3.6818181818181817,
"max": 26.381818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1160.0,
"min": 162.0,
"max": 1451.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681978052",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681978659"
},
"total": 607.1494858819999,
"count": 1,
"self": 0.5448061489998963,
"children": {
"run_training.setup": {
"total": 0.14229666700009602,
"count": 1,
"self": 0.14229666700009602
},
"TrainerController.start_learning": {
"total": 606.4623830659999,
"count": 1,
"self": 0.8669223429874364,
"children": {
"TrainerController._reset_env": {
"total": 1.1251950289999968,
"count": 1,
"self": 1.1251950289999968
},
"TrainerController.advance": {
"total": 604.3065561400125,
"count": 18219,
"self": 0.4727914920171088,
"children": {
"env_step": {
"total": 603.8337646479954,
"count": 18219,
"self": 488.5744498209966,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.74360645099637,
"count": 18219,
"self": 3.1584199729886677,
"children": {
"TorchPolicy.evaluate": {
"total": 111.5851864780077,
"count": 18219,
"self": 111.5851864780077
}
}
},
"workers": {
"total": 0.5157083760024079,
"count": 18219,
"self": 0.0,
"children": {
"worker_root": {
"total": 604.0579600969976,
"count": 18219,
"is_parallel": true,
"self": 264.0842073920031,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0080187080000087,
"count": 1,
"is_parallel": true,
"self": 0.005666873999985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023518340000237004,
"count": 10,
"is_parallel": true,
"self": 0.0023518340000237004
}
}
},
"UnityEnvironment.step": {
"total": 0.048483002000011766,
"count": 1,
"is_parallel": true,
"self": 0.0008277580001276874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005408470000247689,
"count": 1,
"is_parallel": true,
"self": 0.0005408470000247689
},
"communicator.exchange": {
"total": 0.04395661999990352,
"count": 1,
"is_parallel": true,
"self": 0.04395661999990352
},
"steps_from_proto": {
"total": 0.003157776999955786,
"count": 1,
"is_parallel": true,
"self": 0.0008198619998438517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002337915000111934,
"count": 10,
"is_parallel": true,
"self": 0.002337915000111934
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 339.97375270499447,
"count": 18218,
"is_parallel": true,
"self": 14.468753163994506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.707607290987539,
"count": 18218,
"is_parallel": true,
"self": 7.707607290987539
},
"communicator.exchange": {
"total": 272.3970119050108,
"count": 18218,
"is_parallel": true,
"self": 272.3970119050108
},
"steps_from_proto": {
"total": 45.40038034500162,
"count": 18218,
"is_parallel": true,
"self": 9.438870183973336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.961510161028286,
"count": 182180,
"is_parallel": true,
"self": 35.961510161028286
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002293659999850206,
"count": 1,
"self": 0.0002293659999850206,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 598.5402126321495,
"count": 604501,
"is_parallel": true,
"self": 16.539424808215927,
"children": {
"process_trajectory": {
"total": 323.93127487193306,
"count": 604501,
"is_parallel": true,
"self": 322.2869383229331,
"children": {
"RLTrainer._checkpoint": {
"total": 1.644336548999945,
"count": 4,
"is_parallel": true,
"self": 1.644336548999945
}
}
},
"_update_policy": {
"total": 258.06951295200054,
"count": 90,
"is_parallel": true,
"self": 87.3400163740099,
"children": {
"TorchPPOOptimizer.update": {
"total": 170.72949657799063,
"count": 4587,
"is_parallel": true,
"self": 170.72949657799063
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1634801879999941,
"count": 1,
"self": 0.0015275700000074721,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16195261799998661,
"count": 1,
"self": 0.16195261799998661
}
}
}
}
}
}
}