ppo-Huggy / run_logs /timers.json
FelipePasquevich's picture
Huggy
5882a75
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3967071771621704,
"min": 1.3966951370239258,
"max": 1.425831913948059,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70378.6796875,
"min": 68282.609375,
"max": 77154.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.78693623639191,
"min": 76.53798449612403,
"max": 386.7751937984496,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49374.0,
"min": 48742.0,
"max": 50104.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49477.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49477.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4579060077667236,
"min": 0.15624961256980896,
"max": 2.473602056503296,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1580.43359375,
"min": 19.999950408935547,
"max": 1580.43359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8728064155875246,
"min": 1.953639132436365,
"max": 3.93675138871001,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2490.2145252227783,
"min": 250.0658089518547,
"max": 2490.2145252227783,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8728064155875246,
"min": 1.953639132436365,
"max": 3.93675138871001,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2490.2145252227783,
"min": 250.0658089518547,
"max": 2490.2145252227783,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016223519104985623,
"min": 0.013178020065727955,
"max": 0.02184499734527587,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04867055731495687,
"min": 0.02635604013145591,
"max": 0.056708887012791816,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05925985636810462,
"min": 0.02045456785708666,
"max": 0.060315503180027014,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17777956910431386,
"min": 0.04090913571417332,
"max": 0.17777956910431386,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.75119874963334e-06,
"min": 3.75119874963334e-06,
"max": 0.00029534977655007493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.125359624890002e-05,
"min": 1.125359624890002e-05,
"max": 0.0008441388186203999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125036666666666,
"min": 0.10125036666666666,
"max": 0.19844992500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037511,
"min": 0.20763940000000003,
"max": 0.5813796000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.23932966666668e-05,
"min": 7.23932966666668e-05,
"max": 0.004922651257499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021717989000000042,
"min": 0.00021717989000000042,
"max": 0.014070842039999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677816276",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677818668"
},
"total": 2391.2768237620003,
"count": 1,
"self": 0.4070396450001681,
"children": {
"run_training.setup": {
"total": 0.10788574600019274,
"count": 1,
"self": 0.10788574600019274
},
"TrainerController.start_learning": {
"total": 2390.761898371,
"count": 1,
"self": 4.193768496906614,
"children": {
"TrainerController._reset_env": {
"total": 10.498090590999936,
"count": 1,
"self": 10.498090590999936
},
"TrainerController.advance": {
"total": 2375.959882493093,
"count": 232567,
"self": 4.682270565087947,
"children": {
"env_step": {
"total": 1850.3326427630134,
"count": 232567,
"self": 1548.1352594009845,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.36744780399636,
"count": 232567,
"self": 16.24814711888621,
"children": {
"TorchPolicy.evaluate": {
"total": 283.11930068511015,
"count": 222914,
"self": 71.26546890912846,
"children": {
"TorchPolicy.sample_actions": {
"total": 211.8538317759817,
"count": 222914,
"self": 211.8538317759817
}
}
}
}
},
"workers": {
"total": 2.8299355580325027,
"count": 232567,
"self": 0.0,
"children": {
"worker_root": {
"total": 2382.3713998730104,
"count": 232567,
"is_parallel": true,
"self": 1123.1986846480681,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009688709999409184,
"count": 1,
"is_parallel": true,
"self": 0.0003687969999646157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006000739999763027,
"count": 2,
"is_parallel": true,
"self": 0.0006000739999763027
}
}
},
"UnityEnvironment.step": {
"total": 0.05061247799994817,
"count": 1,
"is_parallel": true,
"self": 0.000325199999679171,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019889500003955618,
"count": 1,
"is_parallel": true,
"self": 0.00019889500003955618
},
"communicator.exchange": {
"total": 0.04854468300004555,
"count": 1,
"is_parallel": true,
"self": 0.04854468300004555
},
"steps_from_proto": {
"total": 0.0015437000001838896,
"count": 1,
"is_parallel": true,
"self": 0.00029777500003547175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001245925000148418,
"count": 2,
"is_parallel": true,
"self": 0.001245925000148418
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1259.1727152249423,
"count": 232566,
"is_parallel": true,
"self": 37.94280733180722,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.86930318908162,
"count": 232566,
"is_parallel": true,
"self": 79.86930318908162
},
"communicator.exchange": {
"total": 1049.3403662050932,
"count": 232566,
"is_parallel": true,
"self": 1049.3403662050932
},
"steps_from_proto": {
"total": 92.02023849896023,
"count": 232566,
"is_parallel": true,
"self": 39.21483183489977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.805406664060456,
"count": 465132,
"is_parallel": true,
"self": 52.805406664060456
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 520.9449691649918,
"count": 232567,
"self": 6.403382396956204,
"children": {
"process_trajectory": {
"total": 169.97830335903518,
"count": 232567,
"self": 168.82053631503572,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1577670439994563,
"count": 10,
"self": 1.1577670439994563
}
}
},
"_update_policy": {
"total": 344.5632834090004,
"count": 97,
"self": 287.53753941000764,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.025743998992766,
"count": 2910,
"self": 57.025743998992766
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.440000212634914e-07,
"count": 1,
"self": 9.440000212634914e-07
},
"TrainerController._save_models": {
"total": 0.11015584599999784,
"count": 1,
"self": 0.0019183650001650676,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10823748099983277,
"count": 1,
"self": 0.10823748099983277
}
}
}
}
}
}
}