JKuang96's picture
First Push
21f23d9 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7455326318740845,
"min": 0.6947506070137024,
"max": 2.856491804122925,
"count": 57
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8168.0556640625,
"min": 6167.30126953125,
"max": 31578.515625,
"count": 57
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 57
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 57
},
"SnowballTarget.Step.mean": {
"value": 569800.0,
"min": 9800.0,
"max": 569800.0,
"count": 57
},
"SnowballTarget.Step.sum": {
"value": 569800.0,
"min": 9800.0,
"max": 569800.0,
"count": 57
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.652848243713379,
"min": 0.523383378982544,
"max": 13.960793495178223,
"count": 57
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 682.6423950195312,
"min": 25.645784378051758,
"max": 698.0396728515625,
"count": 57
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.94,
"min": 4.3061224489795915,
"max": 27.94,
"count": 57
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1397.0,
"min": 211.0,
"max": 1397.0,
"count": 57
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.94,
"min": 4.3061224489795915,
"max": 27.94,
"count": 57
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1397.0,
"min": 211.0,
"max": 1397.0,
"count": 57
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0692216797985762,
"min": 0.05952989333389126,
"max": 0.07566596837540263,
"count": 57
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.346108398992881,
"min": 0.2476057921274903,
"max": 0.37081778616575056,
"count": 57
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1814915816281356,
"min": 0.15723642129816262,
"max": 0.29019320135315263,
"count": 57
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9074579081406781,
"min": 0.6289456851926505,
"max": 1.3134892965648688,
"count": 57
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00026607601130799997,
"min": 0.00026607601130799997,
"max": 0.00029967000010999994,
"count": 57
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0013303800565399998,
"min": 0.00106932004356,
"max": 0.00149538000154,
"count": 57
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.18869200000000003,
"min": 0.18869200000000003,
"max": 0.19989,
"count": 57
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9434600000000001,
"min": 0.7564400000000002,
"max": 0.9984600000000003,
"count": 57
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.004435730800000001,
"min": 0.004435730800000001,
"max": 0.004994511,
"count": 57
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.022178654000000003,
"min": 0.017826355999999998,
"max": 0.024923153999999996,
"count": 57
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 57
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 57
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706274862",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706276086"
},
"total": 1223.224189303,
"count": 1,
"self": 0.2607851780001056,
"children": {
"run_training.setup": {
"total": 0.05205427900000359,
"count": 1,
"self": 0.05205427900000359
},
"TrainerController.start_learning": {
"total": 1222.911349846,
"count": 1,
"self": 1.5524370540385917,
"children": {
"TrainerController._reset_env": {
"total": 3.4645792039999606,
"count": 1,
"self": 3.4645792039999606
},
"TrainerController.advance": {
"total": 1217.7482878109613,
"count": 52295,
"self": 0.7853480379671964,
"children": {
"env_step": {
"total": 1216.962939772994,
"count": 52295,
"self": 763.3916380819993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 452.7889390070019,
"count": 52295,
"self": 4.067349219001699,
"children": {
"TorchPolicy.evaluate": {
"total": 448.7215897880002,
"count": 52295,
"self": 448.7215897880002
}
}
},
"workers": {
"total": 0.7823626839929148,
"count": 52294,
"self": 0.0,
"children": {
"worker_root": {
"total": 1219.4185270639878,
"count": 52294,
"is_parallel": true,
"self": 582.9643639289817,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004336449999982506,
"count": 1,
"is_parallel": true,
"self": 0.0030386449999468823,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012978050000356234,
"count": 10,
"is_parallel": true,
"self": 0.0012978050000356234
}
}
},
"UnityEnvironment.step": {
"total": 0.06717407799999364,
"count": 1,
"is_parallel": true,
"self": 0.0007510849999334823,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004051290000006702,
"count": 1,
"is_parallel": true,
"self": 0.0004051290000006702
},
"communicator.exchange": {
"total": 0.06355860200005736,
"count": 1,
"is_parallel": true,
"self": 0.06355860200005736
},
"steps_from_proto": {
"total": 0.002459262000002127,
"count": 1,
"is_parallel": true,
"self": 0.0006287819999215571,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018304800000805699,
"count": 10,
"is_parallel": true,
"self": 0.0018304800000805699
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 636.4541631350061,
"count": 52293,
"is_parallel": true,
"self": 30.213903139048398,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.223697234998554,
"count": 52293,
"is_parallel": true,
"self": 15.223697234998554
},
"communicator.exchange": {
"total": 496.0269671799722,
"count": 52293,
"is_parallel": true,
"self": 496.0269671799722
},
"steps_from_proto": {
"total": 94.98959558098693,
"count": 52293,
"is_parallel": true,
"self": 17.44154770700709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.54804787397984,
"count": 522930,
"is_parallel": true,
"self": 77.54804787397984
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001902669998798956,
"count": 1,
"self": 0.0001902669998798956,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1205.3195555667915,
"count": 1533255,
"is_parallel": true,
"self": 33.43555758165735,
"children": {
"process_trajectory": {
"total": 549.632392468136,
"count": 1533255,
"is_parallel": true,
"self": 548.287163627136,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3452288409999937,
"count": 11,
"is_parallel": true,
"self": 1.3452288409999937
}
}
},
"_update_policy": {
"total": 622.251605516998,
"count": 261,
"is_parallel": true,
"self": 155.37658699601184,
"children": {
"TorchPPOOptimizer.update": {
"total": 466.87501852098615,
"count": 13311,
"is_parallel": true,
"self": 466.87501852098615
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1458555100000467,
"count": 1,
"self": 0.001421006999862584,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14443450300018412,
"count": 1,
"self": 0.14443450300018412
}
}
}
}
}
}
}