ppo-Huggy / run_logs /timers.json
SebastianS's picture
Huggy
2a70b7e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4264087677001953,
"min": 1.419917345046997,
"max": 1.4264087677001953,
"count": 5
},
"Huggy.Policy.Entropy.sum": {
"value": 70582.984375,
"min": 69511.0546875,
"max": 77365.6171875,
"count": 5
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 196.2687747035573,
"min": 196.2687747035573,
"max": 397.58730158730157,
"count": 5
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49656.0,
"min": 49656.0,
"max": 50096.0,
"count": 5
},
"Huggy.Step.mean": {
"value": 249846.0,
"min": 49966.0,
"max": 249846.0,
"count": 5
},
"Huggy.Step.sum": {
"value": 249846.0,
"min": 49966.0,
"max": 249846.0,
"count": 5
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.1906222105026245,
"min": 0.13595494627952576,
"max": 1.1906222105026245,
"count": 5
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 301.2274169921875,
"min": 16.994367599487305,
"max": 301.2274169921875,
"count": 5
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6464908360963753,
"min": 1.6980733127593994,
"max": 3.6464908360963753,
"count": 5
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 922.562181532383,
"min": 212.25916409492493,
"max": 922.562181532383,
"count": 5
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6464908360963753,
"min": 1.6980733127593994,
"max": 3.6464908360963753,
"count": 5
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 922.562181532383,
"min": 212.25916409492493,
"max": 922.562181532383,
"count": 5
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01811859828303568,
"min": 0.012848905237721434,
"max": 0.01811859828303568,
"count": 5
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05435579484910704,
"min": 0.025697810475442867,
"max": 0.05435579484910704,
"count": 5
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.029815138089987966,
"min": 0.02045394352947672,
"max": 0.029815138089987966,
"count": 5
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.0894454142699639,
"min": 0.04090788705895344,
"max": 0.0894454142699639,
"count": 5
},
"Huggy.Policy.LearningRate.mean": {
"value": 0.00026582036139321657,
"min": 0.00026582036139321657,
"max": 0.00029533417655527497,
"count": 5
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.0007974610841796498,
"min": 0.0005471232176255999,
"max": 0.0008440206186597999,
"count": 5
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.18860678333333344,
"min": 0.18860678333333344,
"max": 0.198444725,
"count": 5
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.5658203500000003,
"min": 0.38237439999999995,
"max": 0.5813402000000001,
"count": 5
},
"Huggy.Policy.Beta.mean": {
"value": 0.004431478488333334,
"min": 0.004431478488333334,
"max": 0.004922391777500001,
"count": 5
},
"Huggy.Policy.Beta.sum": {
"value": 0.013294435465000002,
"min": 0.009120482559999999,
"max": 0.014068875979999998,
"count": 5
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673381774",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673382076"
},
"total": 301.537530989,
"count": 1,
"self": 0.19832968599996548,
"children": {
"run_training.setup": {
"total": 0.12034416900002043,
"count": 1,
"self": 0.12034416900002043
},
"TrainerController.start_learning": {
"total": 301.218857134,
"count": 1,
"self": 0.5126490449882226,
"children": {
"TrainerController._reset_env": {
"total": 9.30041789500001,
"count": 1,
"self": 9.30041789500001
},
"TrainerController.advance": {
"total": 291.2181661600118,
"count": 28918,
"self": 0.5635716290152573,
"children": {
"env_step": {
"total": 234.95501514099124,
"count": 28918,
"self": 196.22451774100375,
"children": {
"SubprocessEnvManager._take_step": {
"total": 38.379471486,
"count": 28918,
"self": 1.9602081490000387,
"children": {
"TorchPolicy.evaluate": {
"total": 36.41926333699996,
"count": 28467,
"self": 9.076978616007125,
"children": {
"TorchPolicy.sample_actions": {
"total": 27.342284720992836,
"count": 28467,
"self": 27.342284720992836
}
}
}
}
},
"workers": {
"total": 0.35102591398748473,
"count": 28917,
"self": 0.0,
"children": {
"worker_root": {
"total": 300.1083008200015,
"count": 28917,
"is_parallel": true,
"self": 140.03663003599877,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002005072000031305,
"count": 1,
"is_parallel": true,
"self": 0.0003923979999740368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016126740000572681,
"count": 2,
"is_parallel": true,
"self": 0.0016126740000572681
}
}
},
"UnityEnvironment.step": {
"total": 0.02980478699998912,
"count": 1,
"is_parallel": true,
"self": 0.00029331299998602844,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001984440000342147,
"count": 1,
"is_parallel": true,
"self": 0.0001984440000342147
},
"communicator.exchange": {
"total": 0.02840744799999584,
"count": 1,
"is_parallel": true,
"self": 0.02840744799999584
},
"steps_from_proto": {
"total": 0.0009055819999730375,
"count": 1,
"is_parallel": true,
"self": 0.0004197949999706907,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00048578700000234676,
"count": 2,
"is_parallel": true,
"self": 0.00048578700000234676
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 160.07167078400272,
"count": 28916,
"is_parallel": true,
"self": 4.5144605780114375,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.63715646200177,
"count": 28916,
"is_parallel": true,
"self": 10.63715646200177
},
"communicator.exchange": {
"total": 132.34510055699428,
"count": 28916,
"is_parallel": true,
"self": 132.34510055699428
},
"steps_from_proto": {
"total": 12.57495318699523,
"count": 28916,
"is_parallel": true,
"self": 5.474137778007275,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.100815408987955,
"count": 57832,
"is_parallel": true,
"self": 7.100815408987955
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 55.69957939000528,
"count": 28917,
"self": 0.8723082410051575,
"children": {
"process_trajectory": {
"total": 16.09007579800027,
"count": 28917,
"self": 15.957282285000247,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13279351300002418,
"count": 1,
"self": 0.13279351300002418
}
}
},
"_update_policy": {
"total": 38.737195350999855,
"count": 12,
"self": 32.15757900800054,
"children": {
"TorchPPOOptimizer.update": {
"total": 6.579616342999316,
"count": 360,
"self": 6.579616342999316
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4749999763807864e-06,
"count": 1,
"self": 1.4749999763807864e-06
},
"TrainerController._save_models": {
"total": 0.1876225590000331,
"count": 1,
"self": 0.0028714180000406486,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18475114099999246,
"count": 1,
"self": 0.18475114099999246
}
}
}
}
}
}
}