ppo-Huggy / run_logs /timers.json
Art-phys's picture
Huggy
bfa1a27
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4049432277679443,
"min": 1.4049432277679443,
"max": 1.4290472269058228,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71872.6796875,
"min": 67285.46875,
"max": 75499.1953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 110.62331838565022,
"min": 93.61036468330134,
"max": 403.81451612903226,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49338.0,
"min": 48771.0,
"max": 50270.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999391.0,
"min": 49902.0,
"max": 1999391.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999391.0,
"min": 49902.0,
"max": 1999391.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.302351236343384,
"min": 0.12877270579338074,
"max": 2.3553266525268555,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1026.8486328125,
"min": 15.839042663574219,
"max": 1217.752197265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6316061142848746,
"min": 1.8370518839456202,
"max": 3.8342227865737746,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1619.696326971054,
"min": 225.95738172531128,
"max": 1925.1926447153091,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6316061142848746,
"min": 1.8370518839456202,
"max": 3.8342227865737746,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1619.696326971054,
"min": 225.95738172531128,
"max": 1925.1926447153091,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016288907486078745,
"min": 0.012015179206840306,
"max": 0.019930540691226878,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048866722458236235,
"min": 0.024030358413680612,
"max": 0.056781532024130374,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04980718456208707,
"min": 0.020167529955506323,
"max": 0.06615642060836156,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1494215536862612,
"min": 0.04033505991101265,
"max": 0.19846926182508468,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2455489181833397e-06,
"min": 3.2455489181833397e-06,
"max": 0.0002953209015597,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.73664675455002e-06,
"min": 9.73664675455002e-06,
"max": 0.00084413416862195,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108181666666667,
"min": 0.10108181666666667,
"max": 0.19844030000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30324545,
"min": 0.20729505000000006,
"max": 0.5813780500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.398265166666674e-05,
"min": 6.398265166666674e-05,
"max": 0.0049221709700000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019194795500000023,
"min": 0.00019194795500000023,
"max": 0.014070764695000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673428055",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673430379"
},
"total": 2324.001176915,
"count": 1,
"self": 0.43729416400037735,
"children": {
"run_training.setup": {
"total": 0.12167619600006674,
"count": 1,
"self": 0.12167619600006674
},
"TrainerController.start_learning": {
"total": 2323.4422065549998,
"count": 1,
"self": 4.278911149038322,
"children": {
"TrainerController._reset_env": {
"total": 8.82697779199998,
"count": 1,
"self": 8.82697779199998
},
"TrainerController.advance": {
"total": 2310.211731435961,
"count": 231307,
"self": 4.443828154057883,
"children": {
"env_step": {
"total": 1836.617488366946,
"count": 231307,
"self": 1547.150544830993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 286.69127758999537,
"count": 231307,
"self": 15.391000771950075,
"children": {
"TorchPolicy.evaluate": {
"total": 271.3002768180453,
"count": 223002,
"self": 67.49888218210538,
"children": {
"TorchPolicy.sample_actions": {
"total": 203.80139463593991,
"count": 223002,
"self": 203.80139463593991
}
}
}
}
},
"workers": {
"total": 2.775665945957371,
"count": 231307,
"self": 0.0,
"children": {
"worker_root": {
"total": 2315.178703911994,
"count": 231307,
"is_parallel": true,
"self": 1043.154885027969,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004258840999909808,
"count": 1,
"is_parallel": true,
"self": 0.0003625800000008894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003896260999908918,
"count": 2,
"is_parallel": true,
"self": 0.003896260999908918
}
}
},
"UnityEnvironment.step": {
"total": 0.05610727900000256,
"count": 1,
"is_parallel": true,
"self": 0.00035807099993689917,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001837500000192449,
"count": 1,
"is_parallel": true,
"self": 0.0001837500000192449
},
"communicator.exchange": {
"total": 0.05473784000002979,
"count": 1,
"is_parallel": true,
"self": 0.05473784000002979
},
"steps_from_proto": {
"total": 0.0008276180000166278,
"count": 1,
"is_parallel": true,
"self": 0.0002773049999404975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005503130000761303,
"count": 2,
"is_parallel": true,
"self": 0.0005503130000761303
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1272.023818884025,
"count": 231306,
"is_parallel": true,
"self": 35.36914308802511,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.50481042693355,
"count": 231306,
"is_parallel": true,
"self": 83.50481042693355
},
"communicator.exchange": {
"total": 1051.3523334950205,
"count": 231306,
"is_parallel": true,
"self": 1051.3523334950205
},
"steps_from_proto": {
"total": 101.79753187404594,
"count": 231306,
"is_parallel": true,
"self": 42.496108167840475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.30142370620547,
"count": 462612,
"is_parallel": true,
"self": 59.30142370620547
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 469.1504149149572,
"count": 231307,
"self": 6.155249815988213,
"children": {
"process_trajectory": {
"total": 150.80935006496918,
"count": 231307,
"self": 149.6177383149685,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1916117500006749,
"count": 10,
"self": 1.1916117500006749
}
}
},
"_update_policy": {
"total": 312.1858150339998,
"count": 97,
"self": 258.33064346499157,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.855171569008235,
"count": 2910,
"self": 53.855171569008235
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.419998837576713e-07,
"count": 1,
"self": 7.419998837576713e-07
},
"TrainerController._save_models": {
"total": 0.12458543600041594,
"count": 1,
"self": 0.0019803830005002965,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12260505299991564,
"count": 1,
"self": 0.12260505299991564
}
}
}
}
}
}
}