ppo-Huggy / run_logs /timers.json
myklicious's picture
Huggy
e26b750
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4011021852493286,
"min": 1.4011021852493286,
"max": 1.427593469619751,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70726.234375,
"min": 69316.078125,
"max": 77762.1171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 72.83431952662721,
"min": 69.91926345609065,
"max": 392.9765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49236.0,
"min": 49213.0,
"max": 50301.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999955.0,
"min": 49679.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999955.0,
"min": 49679.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.489666700363159,
"min": 0.1069381833076477,
"max": 2.5103800296783447,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1683.0146484375,
"min": 13.581149101257324,
"max": 1710.056884765625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.903237691116051,
"min": 1.8229602227999469,
"max": 4.003725035616078,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2638.5886791944504,
"min": 231.51594829559326,
"max": 2714.5255741477013,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.903237691116051,
"min": 1.8229602227999469,
"max": 4.003725035616078,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2638.5886791944504,
"min": 231.51594829559326,
"max": 2714.5255741477013,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01804963985729652,
"min": 0.01384294024319388,
"max": 0.019541528277720016,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05414891957188956,
"min": 0.02768588048638776,
"max": 0.055142752786559876,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05547264019648234,
"min": 0.02219648069391648,
"max": 0.058931495311359564,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16641792058944702,
"min": 0.04439296138783296,
"max": 0.16804981902241706,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.881748706116669e-06,
"min": 3.881748706116669e-06,
"max": 0.00029527830157389997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1645246118350009e-05,
"min": 1.1645246118350009e-05,
"max": 0.0008437156687614501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10129388333333333,
"min": 0.10129388333333333,
"max": 0.19842610000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30388165,
"min": 0.20772684999999996,
"max": 0.58123855,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.456477833333341e-05,
"min": 7.456477833333341e-05,
"max": 0.00492146239,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022369433500000024,
"min": 0.00022369433500000024,
"max": 0.014063803645000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670968004",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670970142"
},
"total": 2137.5470137989996,
"count": 1,
"self": 0.39196494499992696,
"children": {
"run_training.setup": {
"total": 0.1185692909999716,
"count": 1,
"self": 0.1185692909999716
},
"TrainerController.start_learning": {
"total": 2137.036479563,
"count": 1,
"self": 3.6727689309923335,
"children": {
"TrainerController._reset_env": {
"total": 10.056882420999955,
"count": 1,
"self": 10.056882420999955
},
"TrainerController.advance": {
"total": 2123.1966547340076,
"count": 233285,
"self": 3.8479915850225552,
"children": {
"env_step": {
"total": 1661.7595732219402,
"count": 233285,
"self": 1393.487231886998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 265.7785721650498,
"count": 233285,
"self": 14.057743435057773,
"children": {
"TorchPolicy.evaluate": {
"total": 251.72082872999204,
"count": 222953,
"self": 64.25939168703474,
"children": {
"TorchPolicy.sample_actions": {
"total": 187.4614370429573,
"count": 222953,
"self": 187.4614370429573
}
}
}
}
},
"workers": {
"total": 2.4937691698925164,
"count": 233285,
"self": 0.0,
"children": {
"worker_root": {
"total": 2129.3497934469888,
"count": 233285,
"is_parallel": true,
"self": 986.2176610249917,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0034224419999873135,
"count": 1,
"is_parallel": true,
"self": 0.00032954299990706204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0030928990000802514,
"count": 2,
"is_parallel": true,
"self": 0.0030928990000802514
}
}
},
"UnityEnvironment.step": {
"total": 0.026919498999973257,
"count": 1,
"is_parallel": true,
"self": 0.0002701610000030996,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018289199999799166,
"count": 1,
"is_parallel": true,
"self": 0.00018289199999799166
},
"communicator.exchange": {
"total": 0.025750199999947654,
"count": 1,
"is_parallel": true,
"self": 0.025750199999947654
},
"steps_from_proto": {
"total": 0.0007162460000245119,
"count": 1,
"is_parallel": true,
"self": 0.00023813400002836715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047811199999614473,
"count": 2,
"is_parallel": true,
"self": 0.00047811199999614473
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1143.132132421997,
"count": 233284,
"is_parallel": true,
"self": 34.06326960901265,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.10255893201156,
"count": 233284,
"is_parallel": true,
"self": 75.10255893201156
},
"communicator.exchange": {
"total": 942.7954661710199,
"count": 233284,
"is_parallel": true,
"self": 942.7954661710199
},
"steps_from_proto": {
"total": 91.17083770995293,
"count": 233284,
"is_parallel": true,
"self": 37.321931021089654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.84890668886328,
"count": 466568,
"is_parallel": true,
"self": 53.84890668886328
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.5890899270447,
"count": 233285,
"self": 5.6370865449766825,
"children": {
"process_trajectory": {
"total": 145.12266899806696,
"count": 233285,
"self": 144.66335307306736,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4593159249996006,
"count": 4,
"self": 0.4593159249996006
}
}
},
"_update_policy": {
"total": 306.8293343840011,
"count": 97,
"self": 254.0074407749978,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.82189360900327,
"count": 2910,
"self": 52.82189360900327
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.720000212430023e-07,
"count": 1,
"self": 7.720000212430023e-07
},
"TrainerController._save_models": {
"total": 0.11017270499996812,
"count": 1,
"self": 0.001926249000007374,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10824645599996074,
"count": 1,
"self": 0.10824645599996074
}
}
}
}
}
}
}