ppo-SnowBall / run_logs /timers.json
RMAV's picture
snowball ppo
a8216ff
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0622516870498657,
"min": 1.0622516870498657,
"max": 1.7195435762405396,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10901.888671875,
"min": 10674.796875,
"max": 17647.67578125,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 109976.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 109976.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.550299644470215,
"min": 8.74144172668457,
"max": 11.550299644470215,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2367.8115234375,
"min": 1678.3568115234375,
"max": 2367.8115234375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06668230118361237,
"min": 0.06311503924414008,
"max": 0.07331826372800268,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3334115059180619,
"min": 0.2524601569765603,
"max": 0.3665913186400134,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20397974054018655,
"min": 0.2004963494106835,
"max": 0.29280298711805075,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0198987027009327,
"min": 0.801985397642734,
"max": 1.2914691792399275,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.004097332000004e-06,
"min": 8.004097332000004e-06,
"max": 0.000141654052782,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.002048666000002e-05,
"min": 4.002048666000002e-05,
"max": 0.00063402028866,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10266800000000002,
"min": 0.10266800000000002,
"max": 0.14721800000000002,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5133400000000001,
"min": 0.430472,
"max": 0.7113400000000001,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014313320000000008,
"min": 0.00014313320000000008,
"max": 0.0023661782,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0007156660000000004,
"min": 0.0007156660000000004,
"max": 0.010595866000000002,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.436363636363637,
"min": 18.454545454545453,
"max": 22.818181818181817,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1234.0,
"min": 812.0,
"max": 1251.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.436363636363637,
"min": 18.454545454545453,
"max": 22.818181818181817,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1234.0,
"min": 812.0,
"max": 1251.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678195628",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678195866"
},
"total": 237.99495000299987,
"count": 1,
"self": 0.6418777099999033,
"children": {
"run_training.setup": {
"total": 0.11312870500000827,
"count": 1,
"self": 0.11312870500000827
},
"TrainerController.start_learning": {
"total": 237.23994358799996,
"count": 1,
"self": 0.34016296200366014,
"children": {
"TrainerController._reset_env": {
"total": 7.13693175800006,
"count": 1,
"self": 7.13693175800006
},
"TrainerController.advance": {
"total": 229.6486705449962,
"count": 9135,
"self": 0.1452942349960722,
"children": {
"env_step": {
"total": 229.50337631000014,
"count": 9135,
"self": 157.64262083499648,
"children": {
"SubprocessEnvManager._take_step": {
"total": 71.71309445000293,
"count": 9135,
"self": 0.9491048070010493,
"children": {
"TorchPolicy.evaluate": {
"total": 70.76398964300188,
"count": 9135,
"self": 15.995086332012306,
"children": {
"TorchPolicy.sample_actions": {
"total": 54.76890331098957,
"count": 9135,
"self": 54.76890331098957
}
}
}
}
},
"workers": {
"total": 0.14766102500072975,
"count": 9135,
"self": 0.0,
"children": {
"worker_root": {
"total": 236.39901941300332,
"count": 9135,
"is_parallel": true,
"self": 113.93852676299866,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002187408000054347,
"count": 1,
"is_parallel": true,
"self": 0.0007914070000651918,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013960009999891554,
"count": 10,
"is_parallel": true,
"self": 0.0013960009999891554
}
}
},
"UnityEnvironment.step": {
"total": 0.03376759699995091,
"count": 1,
"is_parallel": true,
"self": 0.0005362899998999637,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000373188000025948,
"count": 1,
"is_parallel": true,
"self": 0.000373188000025948
},
"communicator.exchange": {
"total": 0.030980238999973153,
"count": 1,
"is_parallel": true,
"self": 0.030980238999973153
},
"steps_from_proto": {
"total": 0.0018778800000518459,
"count": 1,
"is_parallel": true,
"self": 0.0004343849999486338,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001443495000103212,
"count": 10,
"is_parallel": true,
"self": 0.001443495000103212
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 122.46049265000465,
"count": 9134,
"is_parallel": true,
"self": 4.792471791006392,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.6153288339952496,
"count": 9134,
"is_parallel": true,
"self": 2.6153288339952496
},
"communicator.exchange": {
"total": 99.40110669900571,
"count": 9134,
"is_parallel": true,
"self": 99.40110669900571
},
"steps_from_proto": {
"total": 15.651585325997303,
"count": 9134,
"is_parallel": true,
"self": 3.4859827090164117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.165602616980891,
"count": 91340,
"is_parallel": true,
"self": 12.165602616980891
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0004002030000265222,
"count": 1,
"self": 0.0004002030000265222,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 227.98558070497995,
"count": 206526,
"is_parallel": true,
"self": 5.128718028015328,
"children": {
"process_trajectory": {
"total": 129.96635526596503,
"count": 206526,
"is_parallel": true,
"self": 129.5551147529651,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4112405129999388,
"count": 2,
"is_parallel": true,
"self": 0.4112405129999388
}
}
},
"_update_policy": {
"total": 92.89050741099959,
"count": 45,
"is_parallel": true,
"self": 31.511478425999826,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.37902898499976,
"count": 2292,
"is_parallel": true,
"self": 61.37902898499976
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11377812000000631,
"count": 1,
"self": 0.0012125089999699412,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11256561100003637,
"count": 1,
"self": 0.11256561100003637
}
}
}
}
}
}
}