ppo-Huggy / run_logs /timers.json
chandc's picture
Huggy
0654c94
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4063880443572998,
"min": 1.4063880443572998,
"max": 1.4274165630340576,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71385.4453125,
"min": 68178.1953125,
"max": 77415.75,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.40642722117202,
"min": 80.25,
"max": 387.15503875968994,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48883.0,
"min": 48883.0,
"max": 50031.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999939.0,
"min": 49414.0,
"max": 1999939.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999939.0,
"min": 49414.0,
"max": 1999939.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3981385231018066,
"min": 0.010816328227519989,
"max": 2.455244779586792,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1268.615234375,
"min": 1.427755355834961,
"max": 1470.07080078125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.70932038497384,
"min": 1.6683800534316988,
"max": 3.934551328573996,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1962.2304836511612,
"min": 220.22616705298424,
"max": 2304.004391312599,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.70932038497384,
"min": 1.6683800534316988,
"max": 3.934551328573996,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1962.2304836511612,
"min": 220.22616705298424,
"max": 2304.004391312599,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016224897988771168,
"min": 0.012550815612080947,
"max": 0.019629943114705384,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0486746939663135,
"min": 0.02733689018447573,
"max": 0.056413890188317356,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.050697999654544725,
"min": 0.023626550007611512,
"max": 0.06283940045783917,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15209399896363418,
"min": 0.047253100015223024,
"max": 0.1815347863982121,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.535548821516661e-06,
"min": 3.535548821516661e-06,
"max": 0.00029533455155514993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0606646464549984e-05,
"min": 1.0606646464549984e-05,
"max": 0.0008438631187122999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117848333333335,
"min": 0.10117848333333335,
"max": 0.19844485,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30353545000000004,
"min": 0.20749010000000007,
"max": 0.5812877000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.880631833333326e-05,
"min": 6.880631833333326e-05,
"max": 0.004922398014999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020641895499999978,
"min": 0.00020641895499999978,
"max": 0.014066256229999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678120666",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678123076"
},
"total": 2410.174478085,
"count": 1,
"self": 0.44229590399982044,
"children": {
"run_training.setup": {
"total": 0.10720988100001705,
"count": 1,
"self": 0.10720988100001705
},
"TrainerController.start_learning": {
"total": 2409.6249723,
"count": 1,
"self": 4.291351102040153,
"children": {
"TrainerController._reset_env": {
"total": 10.334928362000028,
"count": 1,
"self": 10.334928362000028
},
"TrainerController.advance": {
"total": 2394.8866850619597,
"count": 232354,
"self": 4.4984767538526285,
"children": {
"env_step": {
"total": 1869.593785184036,
"count": 232354,
"self": 1562.4123013960102,
"children": {
"SubprocessEnvManager._take_step": {
"total": 304.3270606500595,
"count": 232354,
"self": 16.067807352095315,
"children": {
"TorchPolicy.evaluate": {
"total": 288.25925329796416,
"count": 223084,
"self": 73.47018492504321,
"children": {
"TorchPolicy.sample_actions": {
"total": 214.78906837292095,
"count": 223084,
"self": 214.78906837292095
}
}
}
}
},
"workers": {
"total": 2.8544231379663643,
"count": 232354,
"self": 0.0,
"children": {
"worker_root": {
"total": 2401.0619282429325,
"count": 232354,
"is_parallel": true,
"self": 1134.3591888069827,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000939028999994207,
"count": 1,
"is_parallel": true,
"self": 0.0003122550000398405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006267739999543664,
"count": 2,
"is_parallel": true,
"self": 0.0006267739999543664
}
}
},
"UnityEnvironment.step": {
"total": 0.04090987600000062,
"count": 1,
"is_parallel": true,
"self": 0.00031139899988374964,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020467800004553283,
"count": 1,
"is_parallel": true,
"self": 0.00020467800004553283
},
"communicator.exchange": {
"total": 0.03969434100008584,
"count": 1,
"is_parallel": true,
"self": 0.03969434100008584
},
"steps_from_proto": {
"total": 0.0006994579999854977,
"count": 1,
"is_parallel": true,
"self": 0.00022888499995588063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004705730000296171,
"count": 2,
"is_parallel": true,
"self": 0.0004705730000296171
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.7027394359498,
"count": 232353,
"is_parallel": true,
"self": 39.121518741038926,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.4032684829358,
"count": 232353,
"is_parallel": true,
"self": 79.4032684829358
},
"communicator.exchange": {
"total": 1055.7206888349776,
"count": 232353,
"is_parallel": true,
"self": 1055.7206888349776
},
"steps_from_proto": {
"total": 92.45726337699739,
"count": 232353,
"is_parallel": true,
"self": 36.978624876104845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.478638500892544,
"count": 464706,
"is_parallel": true,
"self": 55.478638500892544
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 520.7944231240713,
"count": 232354,
"self": 6.468276947085087,
"children": {
"process_trajectory": {
"total": 166.66414087698638,
"count": 232354,
"self": 165.42682632598655,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2373145509998267,
"count": 10,
"self": 1.2373145509998267
}
}
},
"_update_policy": {
"total": 347.6620052999998,
"count": 97,
"self": 290.29297510700144,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.369030192998366,
"count": 2910,
"self": 57.369030192998366
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0700000530050602e-06,
"count": 1,
"self": 1.0700000530050602e-06
},
"TrainerController._save_models": {
"total": 0.11200670400012314,
"count": 1,
"self": 0.002156972000193491,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10984973199992965,
"count": 1,
"self": 0.10984973199992965
}
}
}
}
}
}
}