ppo-Huggy / run_logs /timers.json
kzipa's picture
Huggy
2e5cc8f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4027799367904663,
"min": 1.4027799367904663,
"max": 1.4279792308807373,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70980.6640625,
"min": 68494.8046875,
"max": 77107.3828125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.29879101899827,
"min": 74.18318318318319,
"max": 407.390243902439,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49388.0,
"min": 48730.0,
"max": 50109.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49483.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49483.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.501983404159546,
"min": 0.018589092418551445,
"max": 2.5056278705596924,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1448.6484375,
"min": 2.267869234085083,
"max": 1610.464111328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.798884309635261,
"min": 1.7921017810213762,
"max": 3.9927816803560776,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2199.554015278816,
"min": 218.6364172846079,
"max": 2521.3891892433167,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.798884309635261,
"min": 1.7921017810213762,
"max": 3.9927816803560776,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2199.554015278816,
"min": 218.6364172846079,
"max": 2521.3891892433167,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017685548528162245,
"min": 0.012925414739099021,
"max": 0.01863324107252993,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.053056645584486736,
"min": 0.025850829478198042,
"max": 0.05413314502123588,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055533656064007016,
"min": 0.023367206876476608,
"max": 0.06471130469193061,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16660096819202105,
"min": 0.046734413752953216,
"max": 0.18836656883358957,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6008987997333362e-06,
"min": 3.6008987997333362e-06,
"max": 0.0002953297515567499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0802696399200009e-05,
"min": 1.0802696399200009e-05,
"max": 0.0008440233186588999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120026666666666,
"min": 0.10120026666666666,
"max": 0.19844325000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036008,
"min": 0.2075416,
"max": 0.5813410999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.989330666666675e-05,
"min": 6.989330666666675e-05,
"max": 0.004922318174999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020967992000000025,
"min": 0.00020967992000000025,
"max": 0.01406892089,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671536649",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671538911"
},
"total": 2262.0999978789996,
"count": 1,
"self": 0.39180696899938994,
"children": {
"run_training.setup": {
"total": 0.11222137999999404,
"count": 1,
"self": 0.11222137999999404
},
"TrainerController.start_learning": {
"total": 2261.5959695300003,
"count": 1,
"self": 4.104167705032069,
"children": {
"TrainerController._reset_env": {
"total": 9.01790024799999,
"count": 1,
"self": 9.01790024799999
},
"TrainerController.advance": {
"total": 2248.362498050968,
"count": 233234,
"self": 4.122891230981622,
"children": {
"env_step": {
"total": 1778.0022696619828,
"count": 233234,
"self": 1493.0955773120186,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.2670818450356,
"count": 233234,
"self": 15.010181414965075,
"children": {
"TorchPolicy.evaluate": {
"total": 267.2569004300705,
"count": 223070,
"self": 66.41487815910409,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.84202227096642,
"count": 223070,
"self": 200.84202227096642
}
}
}
}
},
"workers": {
"total": 2.639610504928669,
"count": 233234,
"self": 0.0,
"children": {
"worker_root": {
"total": 2253.395918651128,
"count": 233234,
"is_parallel": true,
"self": 1025.6420580040171,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023672120000242103,
"count": 1,
"is_parallel": true,
"self": 0.00036154099996110745,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002005671000063103,
"count": 2,
"is_parallel": true,
"self": 0.002005671000063103
}
}
},
"UnityEnvironment.step": {
"total": 0.026711880999982895,
"count": 1,
"is_parallel": true,
"self": 0.0002179610000325738,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001769299999523355,
"count": 1,
"is_parallel": true,
"self": 0.0001769299999523355
},
"communicator.exchange": {
"total": 0.02569440300004544,
"count": 1,
"is_parallel": true,
"self": 0.02569440300004544
},
"steps_from_proto": {
"total": 0.0006225869999525457,
"count": 1,
"is_parallel": true,
"self": 0.0002817359999198743,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00034085100003267144,
"count": 2,
"is_parallel": true,
"self": 0.00034085100003267144
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1227.7538606471107,
"count": 233233,
"is_parallel": true,
"self": 35.13481204216396,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.13619438296712,
"count": 233233,
"is_parallel": true,
"self": 80.13619438296712
},
"communicator.exchange": {
"total": 1016.0454691099517,
"count": 233233,
"is_parallel": true,
"self": 1016.0454691099517
},
"steps_from_proto": {
"total": 96.43738511202798,
"count": 233233,
"is_parallel": true,
"self": 41.821430368979804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.61595474304818,
"count": 466466,
"is_parallel": true,
"self": 54.61595474304818
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 466.2373371580036,
"count": 233234,
"self": 6.322322013078633,
"children": {
"process_trajectory": {
"total": 153.9137000039247,
"count": 233234,
"self": 152.72107475992476,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1926252439999416,
"count": 10,
"self": 1.1926252439999416
}
}
},
"_update_policy": {
"total": 306.00131514100025,
"count": 97,
"self": 253.22598786599775,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.775327275002496,
"count": 2910,
"self": 52.775327275002496
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.150003279501107e-07,
"count": 1,
"self": 9.150003279501107e-07
},
"TrainerController._save_models": {
"total": 0.11140261099990312,
"count": 1,
"self": 0.001930030000039551,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10947258099986357,
"count": 1,
"self": 0.10947258099986357
}
}
}
}
}
}
}