ppo-Huggy / run_logs /timers.json
lockylocks's picture
Huggy
6283953 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3934836387634277,
"min": 1.3934836387634277,
"max": 1.4255551099777222,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69428.9296875,
"min": 67707.0546875,
"max": 78260.078125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.3061224489796,
"min": 75.68759571209802,
"max": 413.45454545454544,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49150.0,
"min": 49150.0,
"max": 50132.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999908.0,
"min": 49400.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999908.0,
"min": 49400.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4502170085906982,
"min": 0.11912068724632263,
"max": 2.4876692295074463,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1200.6063232421875,
"min": 14.294482231140137,
"max": 1543.469482421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7711036994749185,
"min": 1.7707814283668994,
"max": 4.005305307338366,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1847.84081274271,
"min": 212.49377140402794,
"max": 2472.053161263466,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7711036994749185,
"min": 1.7707814283668994,
"max": 4.005305307338366,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1847.84081274271,
"min": 212.49377140402794,
"max": 2472.053161263466,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018623704260163423,
"min": 0.01237989542229722,
"max": 0.02064170341068853,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05587111278049027,
"min": 0.02475979084459444,
"max": 0.06192511023206559,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05112952056030432,
"min": 0.02144543599958221,
"max": 0.06512793482591708,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15338856168091297,
"min": 0.04289087199916442,
"max": 0.18493746345241865,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4189988603666584e-06,
"min": 3.4189988603666584e-06,
"max": 0.0002953224765591749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0256996581099975e-05,
"min": 1.0256996581099975e-05,
"max": 0.0008439654186782001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113963333333331,
"min": 0.10113963333333331,
"max": 0.19844082500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30341889999999994,
"min": 0.20744354999999998,
"max": 0.5813217999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.686770333333319e-05,
"min": 6.686770333333319e-05,
"max": 0.004922197167500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020060310999999958,
"min": 0.00020060310999999958,
"max": 0.014067957820000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711979425",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711981803"
},
"total": 2378.119371284,
"count": 1,
"self": 0.43922361700015244,
"children": {
"run_training.setup": {
"total": 0.053504868000004535,
"count": 1,
"self": 0.053504868000004535
},
"TrainerController.start_learning": {
"total": 2377.626642799,
"count": 1,
"self": 4.34026772201787,
"children": {
"TrainerController._reset_env": {
"total": 2.7698743529999774,
"count": 1,
"self": 2.7698743529999774
},
"TrainerController.advance": {
"total": 2370.4024754209822,
"count": 232350,
"self": 4.573682083906533,
"children": {
"env_step": {
"total": 1890.6098843680513,
"count": 232350,
"self": 1564.3344264881075,
"children": {
"SubprocessEnvManager._take_step": {
"total": 323.4952073959495,
"count": 232350,
"self": 16.683998202921657,
"children": {
"TorchPolicy.evaluate": {
"total": 306.8112091930278,
"count": 223010,
"self": 306.8112091930278
}
}
},
"workers": {
"total": 2.780250483994223,
"count": 232350,
"self": 0.0,
"children": {
"worker_root": {
"total": 2370.638622473185,
"count": 232350,
"is_parallel": true,
"self": 1102.8743162980818,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009213459999273255,
"count": 1,
"is_parallel": true,
"self": 0.0002493799999001567,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006719660000271688,
"count": 2,
"is_parallel": true,
"self": 0.0006719660000271688
}
}
},
"UnityEnvironment.step": {
"total": 0.03645147100007762,
"count": 1,
"is_parallel": true,
"self": 0.0004006939999499082,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024833899999521236,
"count": 1,
"is_parallel": true,
"self": 0.00024833899999521236
},
"communicator.exchange": {
"total": 0.03502465100007157,
"count": 1,
"is_parallel": true,
"self": 0.03502465100007157
},
"steps_from_proto": {
"total": 0.0007777870000609255,
"count": 1,
"is_parallel": true,
"self": 0.00021436100007576897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005634259999851565,
"count": 2,
"is_parallel": true,
"self": 0.0005634259999851565
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1267.7643061751032,
"count": 232349,
"is_parallel": true,
"self": 38.64115070720072,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.10083709998082,
"count": 232349,
"is_parallel": true,
"self": 84.10083709998082
},
"communicator.exchange": {
"total": 1053.1517341169724,
"count": 232349,
"is_parallel": true,
"self": 1053.1517341169724
},
"steps_from_proto": {
"total": 91.87058425094949,
"count": 232349,
"is_parallel": true,
"self": 34.06566306293598,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.80492118801351,
"count": 464698,
"is_parallel": true,
"self": 57.80492118801351
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 475.21890896902437,
"count": 232350,
"self": 6.412980722054385,
"children": {
"process_trajectory": {
"total": 150.87543743897083,
"count": 232350,
"self": 149.5299186489707,
"children": {
"RLTrainer._checkpoint": {
"total": 1.345518790000142,
"count": 10,
"self": 1.345518790000142
}
}
},
"_update_policy": {
"total": 317.93049080799915,
"count": 97,
"self": 256.0417011670078,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.88878964099138,
"count": 2910,
"self": 61.88878964099138
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.560000424040481e-07,
"count": 1,
"self": 8.560000424040481e-07
},
"TrainerController._save_models": {
"total": 0.11402444699979242,
"count": 1,
"self": 0.001941388999512128,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11208305800028029,
"count": 1,
"self": 0.11208305800028029
}
}
}
}
}
}
}