ppo-Huggy / run_logs /timers.json
css919's picture
Huggy
cd346df
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4059021472930908,
"min": 1.4059021472930908,
"max": 1.4281151294708252,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70659.234375,
"min": 68832.484375,
"max": 78399.7265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 75.01369863013699,
"min": 69.31697054698458,
"max": 438.00877192982455,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49284.0,
"min": 48856.0,
"max": 49933.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999926.0,
"min": 49297.0,
"max": 1999926.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999926.0,
"min": 49297.0,
"max": 1999926.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.490791082382202,
"min": 0.11372275650501251,
"max": 2.5769548416137695,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1636.44970703125,
"min": 12.850671768188477,
"max": 1734.801513671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.827007929846939,
"min": 1.865167474060987,
"max": 4.0487529831522835,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2514.344209909439,
"min": 210.76392456889153,
"max": 2789.666543662548,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.827007929846939,
"min": 1.865167474060987,
"max": 4.0487529831522835,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2514.344209909439,
"min": 210.76392456889153,
"max": 2789.666543662548,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017186777402013024,
"min": 0.01305256654504774,
"max": 0.020378608367173,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051560332206039075,
"min": 0.02610513309009548,
"max": 0.05506816798636767,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0593686715596252,
"min": 0.020538847831388315,
"max": 0.0664119141176343,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1781060146788756,
"min": 0.04107769566277663,
"max": 0.18165834484001003,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.673448775550008e-06,
"min": 3.673448775550008e-06,
"max": 0.00029534257655247496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1020346326650024e-05,
"min": 1.1020346326650024e-05,
"max": 0.0008439924186691998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122445000000001,
"min": 0.10122445000000001,
"max": 0.198447525,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30367335,
"min": 0.20757880000000006,
"max": 0.5813308000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.110005500000017e-05,
"min": 7.110005500000017e-05,
"max": 0.004922531497500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021330016500000051,
"min": 0.00021330016500000051,
"max": 0.014068406920000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673796878",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673799075"
},
"total": 2197.205023922,
"count": 1,
"self": 0.38940362099992853,
"children": {
"run_training.setup": {
"total": 0.11108902900002704,
"count": 1,
"self": 0.11108902900002704
},
"TrainerController.start_learning": {
"total": 2196.7045312719997,
"count": 1,
"self": 3.836709724997945,
"children": {
"TrainerController._reset_env": {
"total": 10.30257045299993,
"count": 1,
"self": 10.30257045299993
},
"TrainerController.advance": {
"total": 2182.4522775710016,
"count": 233346,
"self": 3.8680380630148647,
"children": {
"env_step": {
"total": 1714.2058023169757,
"count": 233346,
"self": 1445.359350871946,
"children": {
"SubprocessEnvManager._take_step": {
"total": 266.2969483410586,
"count": 233346,
"self": 13.857610688950558,
"children": {
"TorchPolicy.evaluate": {
"total": 252.43933765210807,
"count": 222948,
"self": 63.24619329994346,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.1931443521646,
"count": 222948,
"self": 189.1931443521646
}
}
}
}
},
"workers": {
"total": 2.5495031039710057,
"count": 233346,
"self": 0.0,
"children": {
"worker_root": {
"total": 2186.6556323530444,
"count": 233346,
"is_parallel": true,
"self": 995.4577468391221,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0037780499999371386,
"count": 1,
"is_parallel": true,
"self": 0.00033876599991344847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00343928400002369,
"count": 2,
"is_parallel": true,
"self": 0.00343928400002369
}
}
},
"UnityEnvironment.step": {
"total": 0.027847413000017696,
"count": 1,
"is_parallel": true,
"self": 0.00028343699989363813,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022130400009245932,
"count": 1,
"is_parallel": true,
"self": 0.00022130400009245932
},
"communicator.exchange": {
"total": 0.02647282999998879,
"count": 1,
"is_parallel": true,
"self": 0.02647282999998879
},
"steps_from_proto": {
"total": 0.0008698420000428086,
"count": 1,
"is_parallel": true,
"self": 0.00024323700006334548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006266049999794632,
"count": 2,
"is_parallel": true,
"self": 0.0006266049999794632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1191.1978855139223,
"count": 233345,
"is_parallel": true,
"self": 35.36548255609978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.51591081692595,
"count": 233345,
"is_parallel": true,
"self": 77.51591081692595
},
"communicator.exchange": {
"total": 981.6695352769444,
"count": 233345,
"is_parallel": true,
"self": 981.6695352769444
},
"steps_from_proto": {
"total": 96.64695686395214,
"count": 233345,
"is_parallel": true,
"self": 38.998213818052704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.64874304589944,
"count": 466690,
"is_parallel": true,
"self": 57.64874304589944
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 464.37843719101113,
"count": 233346,
"self": 5.908522043923313,
"children": {
"process_trajectory": {
"total": 147.98361985608813,
"count": 233346,
"self": 146.79760435908906,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1860154969990617,
"count": 10,
"self": 1.1860154969990617
}
}
},
"_update_policy": {
"total": 310.4862952909997,
"count": 97,
"self": 257.6271300289942,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.85916526200549,
"count": 2910,
"self": 52.85916526200549
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.490001164318528e-07,
"count": 1,
"self": 8.490001164318528e-07
},
"TrainerController._save_models": {
"total": 0.11297267400004785,
"count": 1,
"self": 0.004024979999940115,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10894769400010773,
"count": 1,
"self": 0.10894769400010773
}
}
}
}
}
}
}