ppo-Huggy / run_logs /timers.json
X X
Huggy
700b096
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4023973941802979,
"min": 1.4023828506469727,
"max": 1.4277911186218262,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70211.0234375,
"min": 68987.9453125,
"max": 77313.171875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.64958677685951,
"min": 79.27652733118971,
"max": 406.4390243902439,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49398.0,
"min": 48840.0,
"max": 50090.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999987.0,
"min": 49764.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999987.0,
"min": 49764.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4604721069335938,
"min": 0.20990444719791412,
"max": 2.484107255935669,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1488.585693359375,
"min": 25.60834312438965,
"max": 1488.585693359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.880130095816841,
"min": 1.930128927846424,
"max": 3.880130095816841,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2347.4787079691887,
"min": 235.47572919726372,
"max": 2347.4787079691887,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.880130095816841,
"min": 1.930128927846424,
"max": 3.880130095816841,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2347.4787079691887,
"min": 235.47572919726372,
"max": 2347.4787079691887,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01742317447400031,
"min": 0.013733680191702055,
"max": 0.019766374190415565,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05226952342200093,
"min": 0.02746736038340411,
"max": 0.05482692109532461,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054870248627331523,
"min": 0.019770208342621724,
"max": 0.059198234726985294,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16461074588199456,
"min": 0.03954041668524345,
"max": 0.17759470418095588,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.346448884550009e-06,
"min": 3.346448884550009e-06,
"max": 0.000295357276547575,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0039346653650028e-05,
"min": 1.0039346653650028e-05,
"max": 0.0008442163685945501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111545,
"min": 0.10111545,
"max": 0.19845242500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30334635,
"min": 0.20736324999999997,
"max": 0.5814054500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.566095500000016e-05,
"min": 6.566095500000016e-05,
"max": 0.004922776007500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019698286500000048,
"min": 0.00019698286500000048,
"max": 0.014072131955,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670978387",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670980529"
},
"total": 2142.197655036,
"count": 1,
"self": 0.39438902199981385,
"children": {
"run_training.setup": {
"total": 0.10122951099998545,
"count": 1,
"self": 0.10122951099998545
},
"TrainerController.start_learning": {
"total": 2141.7020365030003,
"count": 1,
"self": 3.5824251430144614,
"children": {
"TrainerController._reset_env": {
"total": 10.065243643999963,
"count": 1,
"self": 10.065243643999963
},
"TrainerController.advance": {
"total": 2127.9396072929862,
"count": 232664,
"self": 3.9180553459318617,
"children": {
"env_step": {
"total": 1668.6683733050631,
"count": 232664,
"self": 1401.9288663971263,
"children": {
"SubprocessEnvManager._take_step": {
"total": 264.24190320291336,
"count": 232664,
"self": 14.055262152935882,
"children": {
"TorchPolicy.evaluate": {
"total": 250.18664104997748,
"count": 222934,
"self": 62.509993800893994,
"children": {
"TorchPolicy.sample_actions": {
"total": 187.6766472490835,
"count": 222934,
"self": 187.6766472490835
}
}
}
}
},
"workers": {
"total": 2.4976037050234368,
"count": 232664,
"self": 0.0,
"children": {
"worker_root": {
"total": 2134.1453410400313,
"count": 232664,
"is_parallel": true,
"self": 981.696058098036,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00392558599997983,
"count": 1,
"is_parallel": true,
"self": 0.00030985399996552587,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0036157320000143045,
"count": 2,
"is_parallel": true,
"self": 0.0036157320000143045
}
}
},
"UnityEnvironment.step": {
"total": 0.02619575400001395,
"count": 1,
"is_parallel": true,
"self": 0.0002624850000643164,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017906899995523418,
"count": 1,
"is_parallel": true,
"self": 0.00017906899995523418
},
"communicator.exchange": {
"total": 0.025078146999987894,
"count": 1,
"is_parallel": true,
"self": 0.025078146999987894
},
"steps_from_proto": {
"total": 0.0006760530000065046,
"count": 1,
"is_parallel": true,
"self": 0.0002329449999933786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000443108000013126,
"count": 2,
"is_parallel": true,
"self": 0.000443108000013126
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1152.4492829419953,
"count": 232663,
"is_parallel": true,
"self": 33.55941999104948,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.78539059803296,
"count": 232663,
"is_parallel": true,
"self": 72.78539059803296
},
"communicator.exchange": {
"total": 956.5800518360026,
"count": 232663,
"is_parallel": true,
"self": 956.5800518360026
},
"steps_from_proto": {
"total": 89.52442051691025,
"count": 232663,
"is_parallel": true,
"self": 36.826965353867365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.69745516304289,
"count": 465326,
"is_parallel": true,
"self": 52.69745516304289
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 455.35317864199106,
"count": 232664,
"self": 5.773772380844036,
"children": {
"process_trajectory": {
"total": 142.75822234014663,
"count": 232664,
"self": 142.29103196114704,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4671903789995895,
"count": 4,
"self": 0.4671903789995895
}
}
},
"_update_policy": {
"total": 306.8211839210004,
"count": 97,
"self": 254.09697563800046,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.72420828299994,
"count": 2910,
"self": 52.72420828299994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.659997886046767e-07,
"count": 1,
"self": 9.659997886046767e-07
},
"TrainerController._save_models": {
"total": 0.11475945699976364,
"count": 1,
"self": 0.0024415899997620727,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11231786700000157,
"count": 1,
"self": 0.11231786700000157
}
}
}
}
}
}
}