ppo-Huggy / run_logs /timers.json
Tristan McKinnon
Huggy
e3ef21b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4052847623825073,
"min": 1.4052742719650269,
"max": 1.4258182048797607,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70418.8203125,
"min": 68111.0390625,
"max": 77366.6171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.8202479338843,
"min": 77.44984326018809,
"max": 396.86614173228344,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49281.0,
"min": 48908.0,
"max": 50402.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999942.0,
"min": 49949.0,
"max": 1999942.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999942.0,
"min": 49949.0,
"max": 1999942.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4176509380340576,
"min": 0.15952855348587036,
"max": 2.48333477973938,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1170.14306640625,
"min": 20.100597381591797,
"max": 1555.603271484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.64106819366128,
"min": 1.8307220268817175,
"max": 4.014205108715009,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1762.2770057320595,
"min": 230.6709753870964,
"max": 2439.6273444890976,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.64106819366128,
"min": 1.8307220268817175,
"max": 4.014205108715009,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1762.2770057320595,
"min": 230.6709753870964,
"max": 2439.6273444890976,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01820059107193528,
"min": 0.013962933419194693,
"max": 0.02117094323427106,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05460177321580584,
"min": 0.027925866838389386,
"max": 0.05865944854643507,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0512968444161945,
"min": 0.026138291570047538,
"max": 0.06627051023145517,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15389053324858348,
"min": 0.052276583140095076,
"max": 0.18663031508525213,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4771488409833255e-06,
"min": 3.4771488409833255e-06,
"max": 0.000295261726579425,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0431446522949977e-05,
"min": 1.0431446522949977e-05,
"max": 0.0008439564186811999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10127491833333334,
"min": 0.10127491833333334,
"max": 0.20826263250000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303824755,
"min": 0.20824934000000003,
"max": 0.60945068,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.783493166666655e-05,
"min": 6.783493166666655e-05,
"max": 0.004921186692500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020350479499999964,
"min": 0.00020350479499999964,
"max": 0.014067808120000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675345859",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675348114"
},
"total": 2255.3583877809997,
"count": 1,
"self": 0.3877145970000129,
"children": {
"run_training.setup": {
"total": 0.1158044810000547,
"count": 1,
"self": 0.1158044810000547
},
"TrainerController.start_learning": {
"total": 2254.8548687029997,
"count": 1,
"self": 4.219255436998992,
"children": {
"TrainerController._reset_env": {
"total": 9.57792181100001,
"count": 1,
"self": 9.57792181100001
},
"TrainerController.advance": {
"total": 2240.947063936001,
"count": 232618,
"self": 4.260886193903389,
"children": {
"env_step": {
"total": 1784.4518190260444,
"count": 232618,
"self": 1494.3360272199081,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.42611903706177,
"count": 232618,
"self": 15.014345198003411,
"children": {
"TorchPolicy.evaluate": {
"total": 272.41177383905836,
"count": 223028,
"self": 67.79043333313052,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.62134050592783,
"count": 223028,
"self": 204.62134050592783
}
}
}
}
},
"workers": {
"total": 2.689672769074491,
"count": 232618,
"self": 0.0,
"children": {
"worker_root": {
"total": 2246.9088971221186,
"count": 232618,
"is_parallel": true,
"self": 1015.690721870134,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001631370000040988,
"count": 1,
"is_parallel": true,
"self": 0.0003308490000790698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013005209999619183,
"count": 2,
"is_parallel": true,
"self": 0.0013005209999619183
}
}
},
"UnityEnvironment.step": {
"total": 0.0306850489999988,
"count": 1,
"is_parallel": true,
"self": 0.0006168679999518645,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001892029999908118,
"count": 1,
"is_parallel": true,
"self": 0.0001892029999908118
},
"communicator.exchange": {
"total": 0.02913209900009406,
"count": 1,
"is_parallel": true,
"self": 0.02913209900009406
},
"steps_from_proto": {
"total": 0.000746878999962064,
"count": 1,
"is_parallel": true,
"self": 0.0002511389999426683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004957400000193957,
"count": 2,
"is_parallel": true,
"self": 0.0004957400000193957
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1231.2181752519846,
"count": 232617,
"is_parallel": true,
"self": 34.26908798789327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.24956455500762,
"count": 232617,
"is_parallel": true,
"self": 79.24956455500762
},
"communicator.exchange": {
"total": 1024.4324434989442,
"count": 232617,
"is_parallel": true,
"self": 1024.4324434989442
},
"steps_from_proto": {
"total": 93.26707921013951,
"count": 232617,
"is_parallel": true,
"self": 40.50665913814555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.76042007199396,
"count": 465234,
"is_parallel": true,
"self": 52.76042007199396
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 452.2343587160533,
"count": 232618,
"self": 6.34753571508611,
"children": {
"process_trajectory": {
"total": 153.01313321497025,
"count": 232618,
"self": 152.0402532479701,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9728799670001536,
"count": 8,
"self": 0.9728799670001536
}
}
},
"_update_policy": {
"total": 292.87368978599693,
"count": 97,
"self": 239.93270510698687,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.94098467901006,
"count": 2910,
"self": 52.94098467901006
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0139997357327957e-06,
"count": 1,
"self": 1.0139997357327957e-06
},
"TrainerController._save_models": {
"total": 0.11062650499980009,
"count": 1,
"self": 0.0020255199997336604,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10860098500006643,
"count": 1,
"self": 0.10860098500006643
}
}
}
}
}
}
}