ppo-Huggy / run_logs /timers.json
nzx's picture
Huggy
af5d9d6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3951727151870728,
"min": 1.3951727151870728,
"max": 1.426260232925415,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 66662.75,
"min": 66438.53125,
"max": 79939.109375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 141.69142857142856,
"min": 105.86111111111111,
"max": 414.327868852459,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49592.0,
"min": 48930.0,
"max": 50548.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999952.0,
"min": 49921.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999952.0,
"min": 49921.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.272899866104126,
"min": 0.078694187104702,
"max": 2.354133129119873,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 795.5149536132812,
"min": 9.52199649810791,
"max": 1094.671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.55477978519031,
"min": 1.7753412133652318,
"max": 3.770259633952496,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1244.1729248166084,
"min": 214.81628681719303,
"max": 1725.1897854208946,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.55477978519031,
"min": 1.7753412133652318,
"max": 3.770259633952496,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1244.1729248166084,
"min": 214.81628681719303,
"max": 1725.1897854208946,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015863945602905005,
"min": 0.01361940624313623,
"max": 0.021652236940766064,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03172789120581001,
"min": 0.02723881248627246,
"max": 0.05915843484751046,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0361716722138226,
"min": 0.02122506427889069,
"max": 0.05768171346022024,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.0723433444276452,
"min": 0.04245012855778138,
"max": 0.1730451403806607,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.579448473550004e-06,
"min": 4.579448473550004e-06,
"max": 0.0002953296015568,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.158896947100007e-06,
"min": 9.158896947100007e-06,
"max": 0.0008438806687064498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10152644999999999,
"min": 0.10152644999999999,
"max": 0.19844319999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20305289999999998,
"min": 0.20305289999999998,
"max": 0.5812935500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.616985500000006e-05,
"min": 8.616985500000006e-05,
"max": 0.004922315679999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017233971000000012,
"min": 0.00017233971000000012,
"max": 0.014066548145,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672035150",
"python_version": "3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) \n[GCC 9.4.0]",
"command_line_arguments": "/opt/conda/bin/mlagents-learn ./ml-agents/config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672037631"
},
"total": 2481.1924346630003,
"count": 1,
"self": 0.4409137610000471,
"children": {
"run_training.setup": {
"total": 0.09390113100016606,
"count": 1,
"self": 0.09390113100016606
},
"TrainerController.start_learning": {
"total": 2480.657619771,
"count": 1,
"self": 5.088881359068182,
"children": {
"TrainerController._reset_env": {
"total": 8.137379927999973,
"count": 1,
"self": 8.137379927999973
},
"TrainerController.advance": {
"total": 2467.2973753559318,
"count": 230501,
"self": 5.5699594199522835,
"children": {
"env_step": {
"total": 1905.7130750190001,
"count": 230501,
"self": 1616.9811458919507,
"children": {
"SubprocessEnvManager._take_step": {
"total": 285.6656326150335,
"count": 230501,
"self": 16.406401878908355,
"children": {
"TorchPolicy.evaluate": {
"total": 269.2592307361251,
"count": 222978,
"self": 68.21173597905977,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.04749475706535,
"count": 222978,
"self": 201.04749475706535
}
}
}
}
},
"workers": {
"total": 3.0662965120159242,
"count": 230501,
"self": 0.0,
"children": {
"worker_root": {
"total": 2471.9176000320253,
"count": 230501,
"is_parallel": true,
"self": 1174.2287453389738,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011159470000166039,
"count": 1,
"is_parallel": true,
"self": 0.00043942199977209384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00067652500024451,
"count": 2,
"is_parallel": true,
"self": 0.00067652500024451
}
}
},
"UnityEnvironment.step": {
"total": 0.03373993300010625,
"count": 1,
"is_parallel": true,
"self": 0.00043127499998263374,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00025594200019440905,
"count": 1,
"is_parallel": true,
"self": 0.00025594200019440905
},
"communicator.exchange": {
"total": 0.032240294000075664,
"count": 1,
"is_parallel": true,
"self": 0.032240294000075664
},
"steps_from_proto": {
"total": 0.0008124219998535409,
"count": 1,
"is_parallel": true,
"self": 0.0002985829996760003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005138390001775406,
"count": 2,
"is_parallel": true,
"self": 0.0005138390001775406
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1297.6888546930516,
"count": 230500,
"is_parallel": true,
"self": 44.06035277787464,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 90.20966921906779,
"count": 230500,
"is_parallel": true,
"self": 90.20966921906779
},
"communicator.exchange": {
"total": 1057.7262971160535,
"count": 230500,
"is_parallel": true,
"self": 1057.7262971160535
},
"steps_from_proto": {
"total": 105.69253558005562,
"count": 230500,
"is_parallel": true,
"self": 45.96875445801152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.7237811220441,
"count": 461000,
"is_parallel": true,
"self": 59.7237811220441
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 556.0143409169793,
"count": 230501,
"self": 7.650402702971178,
"children": {
"process_trajectory": {
"total": 177.51732956500837,
"count": 230501,
"self": 176.1897557330085,
"children": {
"RLTrainer._checkpoint": {
"total": 1.327573831999871,
"count": 10,
"self": 1.327573831999871
}
}
},
"_update_policy": {
"total": 370.8466086489998,
"count": 96,
"self": 327.0183789070006,
"children": {
"TorchPPOOptimizer.update": {
"total": 43.828229741999166,
"count": 2880,
"self": 43.828229741999166
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2790001164830755e-06,
"count": 1,
"self": 1.2790001164830755e-06
},
"TrainerController._save_models": {
"total": 0.13398184900006527,
"count": 1,
"self": 0.0027860289997079235,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13119582000035734,
"count": 1,
"self": 0.13119582000035734
}
}
}
}
}
}
}