ppo-Huggy / run_logs /timers.json
jwf5's picture
Huggy
13d45c7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402313232421875,
"min": 1.402313232421875,
"max": 1.4272197484970093,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71365.125,
"min": 67864.046875,
"max": 79353.6640625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.26039783001808,
"min": 85.52280701754385,
"max": 414.53719008264466,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48808.0,
"min": 48748.0,
"max": 50204.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999965.0,
"min": 49874.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999965.0,
"min": 49874.0,
"max": 1999965.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.465590238571167,
"min": 0.034273937344551086,
"max": 2.465590238571167,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1363.471435546875,
"min": 4.11287260055542,
"max": 1363.471435546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.954118733595767,
"min": 1.7908733599508802,
"max": 3.954118733595767,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2186.627659678459,
"min": 214.90480319410563,
"max": 2186.627659678459,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.954118733595767,
"min": 1.7908733599508802,
"max": 3.954118733595767,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2186.627659678459,
"min": 214.90480319410563,
"max": 2186.627659678459,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019092374979663873,
"min": 0.01363469920664405,
"max": 0.021784534698720867,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.038184749959327746,
"min": 0.027715529599421036,
"max": 0.06250104561137657,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.052409492743512,
"min": 0.021062700916081666,
"max": 0.056840427344044056,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.104818985487024,
"min": 0.04212540183216333,
"max": 0.1685129478573799,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.586948471050004e-06,
"min": 4.586948471050004e-06,
"max": 0.00029533552655482495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.173896942100008e-06,
"min": 9.173896942100008e-06,
"max": 0.00084428026857325,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10152894999999998,
"min": 0.10152894999999998,
"max": 0.19844517500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20305789999999996,
"min": 0.20305789999999996,
"max": 0.5814267500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.629460500000005e-05,
"min": 8.629460500000005e-05,
"max": 0.0049224142325,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001725892100000001,
"min": 0.0001725892100000001,
"max": 0.014073194825000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673408859",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673411255"
},
"total": 2396.3210851780004,
"count": 1,
"self": 0.4437173980004445,
"children": {
"run_training.setup": {
"total": 0.20989016299995455,
"count": 1,
"self": 0.20989016299995455
},
"TrainerController.start_learning": {
"total": 2395.6674776170003,
"count": 1,
"self": 4.32432694596946,
"children": {
"TrainerController._reset_env": {
"total": 8.869316272000106,
"count": 1,
"self": 8.869316272000106
},
"TrainerController.advance": {
"total": 2382.356524592031,
"count": 231902,
"self": 4.435899513119693,
"children": {
"env_step": {
"total": 1897.1252911458978,
"count": 231902,
"self": 1595.136818156832,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.16929371305287,
"count": 231902,
"self": 15.320951551996359,
"children": {
"TorchPolicy.evaluate": {
"total": 283.8483421610565,
"count": 222979,
"self": 70.99149266721611,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.8568494938404,
"count": 222979,
"self": 212.8568494938404
}
}
}
}
},
"workers": {
"total": 2.8191792760128465,
"count": 231902,
"self": 0.0,
"children": {
"worker_root": {
"total": 2386.9819294419567,
"count": 231902,
"is_parallel": true,
"self": 1074.2498608249502,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002574419999973543,
"count": 1,
"is_parallel": true,
"self": 0.0005823760000112088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001992043999962334,
"count": 2,
"is_parallel": true,
"self": 0.001992043999962334
}
}
},
"UnityEnvironment.step": {
"total": 0.028754176000120424,
"count": 1,
"is_parallel": true,
"self": 0.0003791330000240123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020278200008760905,
"count": 1,
"is_parallel": true,
"self": 0.00020278200008760905
},
"communicator.exchange": {
"total": 0.027353999000069962,
"count": 1,
"is_parallel": true,
"self": 0.027353999000069962
},
"steps_from_proto": {
"total": 0.0008182619999388407,
"count": 1,
"is_parallel": true,
"self": 0.0002908499998284242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005274120001104166,
"count": 2,
"is_parallel": true,
"self": 0.0005274120001104166
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1312.7320686170065,
"count": 231901,
"is_parallel": true,
"self": 36.42156147011042,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.20781934200409,
"count": 231901,
"is_parallel": true,
"self": 84.20781934200409
},
"communicator.exchange": {
"total": 1086.7483779739757,
"count": 231901,
"is_parallel": true,
"self": 1086.7483779739757
},
"steps_from_proto": {
"total": 105.35430983091624,
"count": 231901,
"is_parallel": true,
"self": 43.83183312909955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.522476701816686,
"count": 463802,
"is_parallel": true,
"self": 61.522476701816686
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 480.7953339330131,
"count": 231902,
"self": 6.310575435012652,
"children": {
"process_trajectory": {
"total": 157.54528546300253,
"count": 231902,
"self": 156.33254267200232,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2127427910002098,
"count": 10,
"self": 1.2127427910002098
}
}
},
"_update_policy": {
"total": 316.93947303499795,
"count": 96,
"self": 263.51227966200395,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.427193372994,
"count": 2880,
"self": 53.427193372994
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2329996934568044e-06,
"count": 1,
"self": 1.2329996934568044e-06
},
"TrainerController._save_models": {
"total": 0.11730857400016248,
"count": 1,
"self": 0.001962147000085679,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1153464270000768,
"count": 1,
"self": 0.1153464270000768
}
}
}
}
}
}
}