ppo-Huggy / run_logs /timers.json
PaulMest's picture
Huggy
6f594f0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4037503004074097,
"min": 1.4037503004074097,
"max": 1.4269293546676636,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70159.4375,
"min": 68077.0078125,
"max": 77132.9765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.14814814814815,
"min": 72.04227405247813,
"max": 390.8359375,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49390.0,
"min": 49237.0,
"max": 50027.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999950.0,
"min": 49947.0,
"max": 1999950.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999950.0,
"min": 49947.0,
"max": 1999950.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4526896476745605,
"min": 0.085988849401474,
"max": 2.541956663131714,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1456.897705078125,
"min": 10.920583724975586,
"max": 1707.5582275390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7661159887458338,
"min": 1.837625716380247,
"max": 4.018910626600442,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2237.0728973150253,
"min": 233.37846598029137,
"max": 2656.499924182892,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7661159887458338,
"min": 1.837625716380247,
"max": 4.018910626600442,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2237.0728973150253,
"min": 233.37846598029137,
"max": 2656.499924182892,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015332284343134639,
"min": 0.012713989450033599,
"max": 0.01962770004241368,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.045996853029403914,
"min": 0.025427978900067198,
"max": 0.05627362289233133,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055398261671264964,
"min": 0.021676912841697534,
"max": 0.06550354808568955,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1661947850137949,
"min": 0.04335382568339507,
"max": 0.1906773068010807,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.664298778600006e-06,
"min": 3.664298778600006e-06,
"max": 0.00029527260157580004,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0992896335800019e-05,
"min": 1.0992896335800019e-05,
"max": 0.00084391156869615,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10122140000000002,
"min": 0.10122140000000002,
"max": 0.19842420000000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30366420000000005,
"min": 0.20758680000000002,
"max": 0.5813038500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.094786000000014e-05,
"min": 7.094786000000014e-05,
"max": 0.00492136758,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021284358000000041,
"min": 0.00021284358000000041,
"max": 0.014067062115,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672733258",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672735422"
},
"total": 2163.6632797019997,
"count": 1,
"self": 0.3827759659993717,
"children": {
"run_training.setup": {
"total": 0.12793950299999324,
"count": 1,
"self": 0.12793950299999324
},
"TrainerController.start_learning": {
"total": 2163.1525642330002,
"count": 1,
"self": 3.608502471002339,
"children": {
"TrainerController._reset_env": {
"total": 7.942472914000007,
"count": 1,
"self": 7.942472914000007
},
"TrainerController.advance": {
"total": 2151.4728317979975,
"count": 233700,
"self": 3.967706454944164,
"children": {
"env_step": {
"total": 1688.8684051110054,
"count": 233700,
"self": 1418.5597455180878,
"children": {
"SubprocessEnvManager._take_step": {
"total": 267.8738567238967,
"count": 233700,
"self": 13.7926482638768,
"children": {
"TorchPolicy.evaluate": {
"total": 254.08120846001992,
"count": 223048,
"self": 63.34780024397861,
"children": {
"TorchPolicy.sample_actions": {
"total": 190.7334082160413,
"count": 223048,
"self": 190.7334082160413
}
}
}
}
},
"workers": {
"total": 2.4348028690208707,
"count": 233700,
"self": 0.0,
"children": {
"worker_root": {
"total": 2155.2266913001254,
"count": 233700,
"is_parallel": true,
"self": 988.2566338831132,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021092590000080236,
"count": 1,
"is_parallel": true,
"self": 0.0002957500000206892,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018135089999873344,
"count": 2,
"is_parallel": true,
"self": 0.0018135089999873344
}
}
},
"UnityEnvironment.step": {
"total": 0.025953243999992992,
"count": 1,
"is_parallel": true,
"self": 0.00025245699998777127,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017593799998394388,
"count": 1,
"is_parallel": true,
"self": 0.00017593799998394388
},
"communicator.exchange": {
"total": 0.024839960999997857,
"count": 1,
"is_parallel": true,
"self": 0.024839960999997857
},
"steps_from_proto": {
"total": 0.0006848880000234203,
"count": 1,
"is_parallel": true,
"self": 0.00023649299998851347,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044839500003490684,
"count": 2,
"is_parallel": true,
"self": 0.00044839500003490684
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1166.9700574170122,
"count": 233699,
"is_parallel": true,
"self": 34.06580510889057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.38075782505734,
"count": 233699,
"is_parallel": true,
"self": 74.38075782505734
},
"communicator.exchange": {
"total": 967.4125263780845,
"count": 233699,
"is_parallel": true,
"self": 967.4125263780845
},
"steps_from_proto": {
"total": 91.11096810497986,
"count": 233699,
"is_parallel": true,
"self": 37.491433099001654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.61953500597821,
"count": 467398,
"is_parallel": true,
"self": 53.61953500597821
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 458.6367202320481,
"count": 233700,
"self": 5.922413963184738,
"children": {
"process_trajectory": {
"total": 147.6452921478646,
"count": 233700,
"self": 146.50511843686428,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1401737110003296,
"count": 10,
"self": 1.1401737110003296
}
}
},
"_update_policy": {
"total": 305.06901412099876,
"count": 97,
"self": 252.81874803800042,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.25026608299834,
"count": 2910,
"self": 52.25026608299834
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.58000327955233e-07,
"count": 1,
"self": 9.58000327955233e-07
},
"TrainerController._save_models": {
"total": 0.12875609200000326,
"count": 1,
"self": 0.00211193999984971,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12664415200015355,
"count": 1,
"self": 0.12664415200015355
}
}
}
}
}
}
}