soonchang's picture
First Push
03a1162
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0591418743133545,
"min": 1.0591418743133545,
"max": 2.864712715148926,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10205.890625,
"min": 10205.890625,
"max": 29337.5234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.073869705200195,
"min": 0.3839539587497711,
"max": 12.073869705200195,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2354.404541015625,
"min": 74.48706817626953,
"max": 2426.869873046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06646537197786316,
"min": 0.061501281660086685,
"max": 0.07729435361500955,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26586148791145264,
"min": 0.26364241908167435,
"max": 0.36283532822990844,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20401351449682434,
"min": 0.13215004675222708,
"max": 0.2682116824183978,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8160540579872974,
"min": 0.5286001870089083,
"max": 1.3369165208994174,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.15909090909091,
"min": 3.4318181818181817,
"max": 24.15909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1063.0,
"min": 151.0,
"max": 1325.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.15909090909091,
"min": 3.4318181818181817,
"max": 24.15909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1063.0,
"min": 151.0,
"max": 1325.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703515103",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703515577"
},
"total": 473.791866251,
"count": 1,
"self": 0.6339675919998626,
"children": {
"run_training.setup": {
"total": 0.05127992200004883,
"count": 1,
"self": 0.05127992200004883
},
"TrainerController.start_learning": {
"total": 473.1066187370001,
"count": 1,
"self": 0.6244801569957872,
"children": {
"TrainerController._reset_env": {
"total": 3.467802487999961,
"count": 1,
"self": 3.467802487999961
},
"TrainerController.advance": {
"total": 468.87878358200453,
"count": 18215,
"self": 0.29205099800742573,
"children": {
"env_step": {
"total": 468.5867325839971,
"count": 18215,
"self": 309.4357022999808,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.85094266099475,
"count": 18215,
"self": 1.558314215999303,
"children": {
"TorchPolicy.evaluate": {
"total": 157.29262844499544,
"count": 18215,
"self": 157.29262844499544
}
}
},
"workers": {
"total": 0.3000876230215681,
"count": 18215,
"self": 0.0,
"children": {
"worker_root": {
"total": 471.67992350799045,
"count": 18215,
"is_parallel": true,
"self": 232.39732348799532,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005212222000068323,
"count": 1,
"is_parallel": true,
"self": 0.0031697779999149134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020424440001534094,
"count": 10,
"is_parallel": true,
"self": 0.0020424440001534094
}
}
},
"UnityEnvironment.step": {
"total": 0.046039331999963906,
"count": 1,
"is_parallel": true,
"self": 0.0006478619999370494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044607799998175324,
"count": 1,
"is_parallel": true,
"self": 0.00044607799998175324
},
"communicator.exchange": {
"total": 0.04278746099998898,
"count": 1,
"is_parallel": true,
"self": 0.04278746099998898
},
"steps_from_proto": {
"total": 0.0021579310000561236,
"count": 1,
"is_parallel": true,
"self": 0.00042819499992674537,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017297360001293782,
"count": 10,
"is_parallel": true,
"self": 0.0017297360001293782
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.28260001999513,
"count": 18214,
"is_parallel": true,
"self": 11.016970321012423,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.684162869991724,
"count": 18214,
"is_parallel": true,
"self": 5.684162869991724
},
"communicator.exchange": {
"total": 186.65125855799283,
"count": 18214,
"is_parallel": true,
"self": 186.65125855799283
},
"steps_from_proto": {
"total": 35.93020827099815,
"count": 18214,
"is_parallel": true,
"self": 6.679311237018624,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.250897033979527,
"count": 182140,
"is_parallel": true,
"self": 29.250897033979527
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012070899992977502,
"count": 1,
"self": 0.00012070899992977502,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 463.21933048406277,
"count": 679195,
"is_parallel": true,
"self": 14.380588106021378,
"children": {
"process_trajectory": {
"total": 255.5627697700421,
"count": 679195,
"is_parallel": true,
"self": 254.83835440904204,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7244153610000694,
"count": 4,
"is_parallel": true,
"self": 0.7244153610000694
}
}
},
"_update_policy": {
"total": 193.27597260799928,
"count": 90,
"is_parallel": true,
"self": 60.68561321700372,
"children": {
"TorchPPOOptimizer.update": {
"total": 132.59035939099556,
"count": 4584,
"is_parallel": true,
"self": 132.59035939099556
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13543180099986785,
"count": 1,
"self": 0.0012936010000430542,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1341381999998248,
"count": 1,
"self": 0.1341381999998248
}
}
}
}
}
}
}