Huggy / run_logs /timers.json
coldra1n's picture
Upload folder using huggingface_hub
da3fdc0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404130458831787,
"min": 1.404130458831787,
"max": 1.4275075197219849,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70616.53125,
"min": 68816.7265625,
"max": 78302.140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.26475849731663,
"min": 80.91914191419141,
"max": 409.4390243902439,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49340.0,
"min": 48903.0,
"max": 50361.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999994.0,
"min": 49734.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999994.0,
"min": 49734.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.461986541748047,
"min": 0.1937098205089569,
"max": 2.555201768875122,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1376.25048828125,
"min": 23.632598876953125,
"max": 1504.2508544921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8297871994204513,
"min": 1.7499277007872942,
"max": 4.075398928835883,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2140.8510444760323,
"min": 213.49117949604988,
"max": 2323.3448847532272,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8297871994204513,
"min": 1.7499277007872942,
"max": 4.075398928835883,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2140.8510444760323,
"min": 213.49117949604988,
"max": 2323.3448847532272,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016259028484111898,
"min": 0.012961801526883695,
"max": 0.020370850048493594,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.048777085452335694,
"min": 0.02592360305376739,
"max": 0.0537760739554263,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05298023741278384,
"min": 0.020173827620844048,
"max": 0.06032352981468042,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15894071223835152,
"min": 0.040347655241688096,
"max": 0.18018413471678893,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.433798855433328e-06,
"min": 3.433798855433328e-06,
"max": 0.00029528595157134993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0301396566299984e-05,
"min": 1.0301396566299984e-05,
"max": 0.0008440929186357,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114456666666666,
"min": 0.10114456666666666,
"max": 0.19842864999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034337,
"min": 0.20741564999999995,
"max": 0.5813643000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.71138766666666e-05,
"min": 6.71138766666666e-05,
"max": 0.004921589635000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020134162999999977,
"min": 0.00020134162999999977,
"max": 0.014070078570000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682533993",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682535893"
},
"total": 1899.2947608259997,
"count": 1,
"self": 0.2744469029996708,
"children": {
"run_training.setup": {
"total": 0.11660787699997854,
"count": 1,
"self": 0.11660787699997854
},
"TrainerController.start_learning": {
"total": 1898.903706046,
"count": 1,
"self": 3.66681658402058,
"children": {
"TrainerController._reset_env": {
"total": 3.9446108040000354,
"count": 1,
"self": 3.9446108040000354
},
"TrainerController.advance": {
"total": 1891.1866645809798,
"count": 232803,
"self": 3.985469159953709,
"children": {
"env_step": {
"total": 1464.786518842981,
"count": 232803,
"self": 1219.7849045101204,
"children": {
"SubprocessEnvManager._take_step": {
"total": 242.46649483395834,
"count": 232803,
"self": 15.000596323991545,
"children": {
"TorchPolicy.evaluate": {
"total": 227.4658985099668,
"count": 223055,
"self": 227.4658985099668
}
}
},
"workers": {
"total": 2.5351194989021906,
"count": 232803,
"self": 0.0,
"children": {
"worker_root": {
"total": 1891.136218703004,
"count": 232803,
"is_parallel": true,
"self": 899.975120195843,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009509900000921334,
"count": 1,
"is_parallel": true,
"self": 0.00029717400002482464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006538160000673088,
"count": 2,
"is_parallel": true,
"self": 0.0006538160000673088
}
}
},
"UnityEnvironment.step": {
"total": 0.0256441530000302,
"count": 1,
"is_parallel": true,
"self": 0.00024163400007637392,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021528199999920616,
"count": 1,
"is_parallel": true,
"self": 0.00021528199999920616
},
"communicator.exchange": {
"total": 0.02473883899995144,
"count": 1,
"is_parallel": true,
"self": 0.02473883899995144
},
"steps_from_proto": {
"total": 0.00044839800000318064,
"count": 1,
"is_parallel": true,
"self": 0.00012921999996251543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0003191780000406652,
"count": 2,
"is_parallel": true,
"self": 0.0003191780000406652
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 991.161098507161,
"count": 232802,
"is_parallel": true,
"self": 30.537990838958763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 58.870111140045196,
"count": 232802,
"is_parallel": true,
"self": 58.870111140045196
},
"communicator.exchange": {
"total": 834.2119451830365,
"count": 232802,
"is_parallel": true,
"self": 834.2119451830365
},
"steps_from_proto": {
"total": 67.54105134512054,
"count": 232802,
"is_parallel": true,
"self": 25.083515061104094,
"children": {
"_process_rank_one_or_two_observation": {
"total": 42.45753628401644,
"count": 465604,
"is_parallel": true,
"self": 42.45753628401644
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 422.41467657804515,
"count": 232803,
"self": 5.669125812038487,
"children": {
"process_trajectory": {
"total": 112.06686650800873,
"count": 232803,
"self": 110.9398901090093,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1269763989994317,
"count": 10,
"self": 1.1269763989994317
}
}
},
"_update_policy": {
"total": 304.67868425799793,
"count": 97,
"self": 250.5751122820135,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.10357197598444,
"count": 2910,
"self": 54.10357197598444
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.639999578008428e-07,
"count": 1,
"self": 8.639999578008428e-07
},
"TrainerController._save_models": {
"total": 0.10561321299974225,
"count": 1,
"self": 0.002039736999449815,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10357347600029243,
"count": 1,
"self": 0.10357347600029243
}
}
}
}
}
}
}