ppo-Huggy / run_logs /timers.json
4eonsbl4ck's picture
Huggy
e4284eb
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4007903337478638,
"min": 1.4007903337478638,
"max": 1.4246126413345337,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69587.0625,
"min": 66873.828125,
"max": 78514.8515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.76923076923077,
"min": 75.74159021406727,
"max": 387.28682170542635,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49350.0,
"min": 48855.0,
"max": 50064.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999928.0,
"min": 49850.0,
"max": 1999928.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999928.0,
"min": 49850.0,
"max": 1999928.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4779748916625977,
"min": 0.0008318515028804541,
"max": 2.5057716369628906,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1514.042724609375,
"min": 0.10647699236869812,
"max": 1597.867919921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9048165178533076,
"min": 1.8307078869547695,
"max": 4.017097489344768,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2385.842892408371,
"min": 234.3306095302105,
"max": 2485.3912789821625,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9048165178533076,
"min": 1.8307078869547695,
"max": 4.017097489344768,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2385.842892408371,
"min": 234.3306095302105,
"max": 2485.3912789821625,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01518778050679733,
"min": 0.013823646286457209,
"max": 0.021031479294874266,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04556334152039199,
"min": 0.02797427335268973,
"max": 0.0630944378846228,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05935809624691805,
"min": 0.022962432789305848,
"max": 0.06008724750330051,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17807428874075415,
"min": 0.045924865578611695,
"max": 0.17807428874075415,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6832987722666745e-06,
"min": 3.6832987722666745e-06,
"max": 0.0002952992265669249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1049896316800024e-05,
"min": 1.1049896316800024e-05,
"max": 0.0008442033185988998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122773333333335,
"min": 0.10122773333333335,
"max": 0.19843307500000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30368320000000004,
"min": 0.20760210000000007,
"max": 0.5814011,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.12638933333335e-05,
"min": 7.12638933333335e-05,
"max": 0.0049218104425000015,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002137916800000005,
"min": 0.0002137916800000005,
"max": 0.01407191489,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670678235",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670680607"
},
"total": 2371.755040972,
"count": 1,
"self": 0.43797935900011,
"children": {
"run_training.setup": {
"total": 0.11385134700003618,
"count": 1,
"self": 0.11385134700003618
},
"TrainerController.start_learning": {
"total": 2371.203210266,
"count": 1,
"self": 3.9947984689324585,
"children": {
"TrainerController._reset_env": {
"total": 10.411376589999918,
"count": 1,
"self": 10.411376589999918
},
"TrainerController.advance": {
"total": 2356.6832206870677,
"count": 232816,
"self": 4.375101168981928,
"children": {
"env_step": {
"total": 1867.4350756711144,
"count": 232816,
"self": 1568.8108930690275,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.8008867059398,
"count": 232816,
"self": 15.257952610803613,
"children": {
"TorchPolicy.evaluate": {
"total": 280.54293409513616,
"count": 222902,
"self": 69.78448718009213,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.75844691504403,
"count": 222902,
"self": 210.75844691504403
}
}
}
}
},
"workers": {
"total": 2.8232958961471013,
"count": 232816,
"self": 0.0,
"children": {
"worker_root": {
"total": 2362.6940766759585,
"count": 232816,
"is_parallel": true,
"self": 1075.1563932508743,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0038894070000878855,
"count": 1,
"is_parallel": true,
"self": 0.0003454899999724148,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0035439170001154707,
"count": 2,
"is_parallel": true,
"self": 0.0035439170001154707
}
}
},
"UnityEnvironment.step": {
"total": 0.02809561100002611,
"count": 1,
"is_parallel": true,
"self": 0.0002680439999949158,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021973799994157162,
"count": 1,
"is_parallel": true,
"self": 0.00021973799994157162
},
"communicator.exchange": {
"total": 0.026867334000144183,
"count": 1,
"is_parallel": true,
"self": 0.026867334000144183
},
"steps_from_proto": {
"total": 0.0007404949999454402,
"count": 1,
"is_parallel": true,
"self": 0.00026647300001059193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004740219999348483,
"count": 2,
"is_parallel": true,
"self": 0.0004740219999348483
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1287.5376834250842,
"count": 232815,
"is_parallel": true,
"self": 35.85035995190924,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.12622844693965,
"count": 232815,
"is_parallel": true,
"self": 84.12622844693965
},
"communicator.exchange": {
"total": 1068.1955870061656,
"count": 232815,
"is_parallel": true,
"self": 1068.1955870061656
},
"steps_from_proto": {
"total": 99.36550802006968,
"count": 232815,
"is_parallel": true,
"self": 43.21193969004162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.153568330028065,
"count": 465630,
"is_parallel": true,
"self": 56.153568330028065
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 484.87304384697154,
"count": 232816,
"self": 6.28011608296174,
"children": {
"process_trajectory": {
"total": 159.7056481900131,
"count": 232816,
"self": 159.21711505401345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48853313599965986,
"count": 4,
"self": 0.48853313599965986
}
}
},
"_update_policy": {
"total": 318.8872795739967,
"count": 97,
"self": 264.9396178859997,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.94766168799697,
"count": 2910,
"self": 53.94766168799697
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1909996828762814e-06,
"count": 1,
"self": 1.1909996828762814e-06
},
"TrainerController._save_models": {
"total": 0.11381332900009511,
"count": 1,
"self": 0.0026611009998305235,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11115222800026459,
"count": 1,
"self": 0.11115222800026459
}
}
}
}
}
}
}