ppo-Huggy / run_logs /timers.json
Emperor-WS's picture
Huggy
d29a27f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045771360397339,
"min": 1.4045771360397339,
"max": 1.4294127225875854,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70747.1484375,
"min": 68743.4921875,
"max": 75666.84375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.36380255941499,
"min": 81.49339933993399,
"max": 372.7164179104478,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49429.0,
"min": 48802.0,
"max": 50109.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999908.0,
"min": 49835.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999908.0,
"min": 49835.0,
"max": 1999908.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3836021423339844,
"min": 0.11148186773061752,
"max": 2.4445838928222656,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1303.830322265625,
"min": 14.827088356018066,
"max": 1441.69970703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.678606213010861,
"min": 1.7443667968412988,
"max": 3.924984330414898,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2012.197598516941,
"min": 232.00078397989273,
"max": 2290.9923630952835,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.678606213010861,
"min": 1.7443667968412988,
"max": 3.924984330414898,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2012.197598516941,
"min": 232.00078397989273,
"max": 2290.9923630952835,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019377428689686994,
"min": 0.013423438489472675,
"max": 0.020323900274039866,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.058132286069060984,
"min": 0.027638355128389473,
"max": 0.058132286069060984,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05084464963939456,
"min": 0.021477227720121544,
"max": 0.058445265578726924,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15253394891818367,
"min": 0.04318541704366605,
"max": 0.1728473832209905,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.476298841266674e-06,
"min": 3.476298841266674e-06,
"max": 0.000295307176564275,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0428896523800021e-05,
"min": 1.0428896523800021e-05,
"max": 0.0008442004685998499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115873333333335,
"min": 0.10115873333333335,
"max": 0.198435725,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30347620000000003,
"min": 0.20744260000000003,
"max": 0.5814001500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.782079333333343e-05,
"min": 6.782079333333343e-05,
"max": 0.004921942677500003,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002034623800000003,
"min": 0.0002034623800000003,
"max": 0.014071867485,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689889100",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689891828"
},
"total": 2727.2126073150002,
"count": 1,
"self": 0.9053654360004657,
"children": {
"run_training.setup": {
"total": 0.034366738000016994,
"count": 1,
"self": 0.034366738000016994
},
"TrainerController.start_learning": {
"total": 2726.272875141,
"count": 1,
"self": 5.083163282883561,
"children": {
"TrainerController._reset_env": {
"total": 5.138307347000023,
"count": 1,
"self": 5.138307347000023
},
"TrainerController.advance": {
"total": 2715.8433604201164,
"count": 232102,
"self": 5.1024655630926645,
"children": {
"env_step": {
"total": 2091.330662257058,
"count": 232102,
"self": 1770.0140327449303,
"children": {
"SubprocessEnvManager._take_step": {
"total": 318.1032452339482,
"count": 232102,
"self": 18.609973445880087,
"children": {
"TorchPolicy.evaluate": {
"total": 299.4932717880681,
"count": 223018,
"self": 299.4932717880681
}
}
},
"workers": {
"total": 3.2133842781795465,
"count": 232102,
"self": 0.0,
"children": {
"worker_root": {
"total": 2717.4172125500354,
"count": 232102,
"is_parallel": true,
"self": 1276.0192447519762,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004223481000053653,
"count": 1,
"is_parallel": true,
"self": 0.00036050900007467135,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0038629719999789813,
"count": 2,
"is_parallel": true,
"self": 0.0038629719999789813
}
}
},
"UnityEnvironment.step": {
"total": 0.030605691999994633,
"count": 1,
"is_parallel": true,
"self": 0.0003319559999681587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002359029999752238,
"count": 1,
"is_parallel": true,
"self": 0.0002359029999752238
},
"communicator.exchange": {
"total": 0.029282529000056456,
"count": 1,
"is_parallel": true,
"self": 0.029282529000056456
},
"steps_from_proto": {
"total": 0.0007553039999947941,
"count": 1,
"is_parallel": true,
"self": 0.00022500600005059823,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005302979999441959,
"count": 2,
"is_parallel": true,
"self": 0.0005302979999441959
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1441.3979677980592,
"count": 232101,
"is_parallel": true,
"self": 42.67147021631945,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 91.69312974890659,
"count": 232101,
"is_parallel": true,
"self": 91.69312974890659
},
"communicator.exchange": {
"total": 1200.8365388939733,
"count": 232101,
"is_parallel": true,
"self": 1200.8365388939733
},
"steps_from_proto": {
"total": 106.19682893885977,
"count": 232101,
"is_parallel": true,
"self": 40.68917949089246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.50764944796731,
"count": 464202,
"is_parallel": true,
"self": 65.50764944796731
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 619.4102325999655,
"count": 232102,
"self": 7.297083858921496,
"children": {
"process_trajectory": {
"total": 155.14097598504588,
"count": 232102,
"self": 153.6673991790459,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4735768059999828,
"count": 10,
"self": 1.4735768059999828
}
}
},
"_update_policy": {
"total": 456.97217275599814,
"count": 97,
"self": 395.46417507800277,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.50799767799538,
"count": 2910,
"self": 61.50799767799538
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4110000847722404e-06,
"count": 1,
"self": 1.4110000847722404e-06
},
"TrainerController._save_models": {
"total": 0.20804267999983495,
"count": 1,
"self": 0.004922613999497116,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20312006600033783,
"count": 1,
"self": 0.20312006600033783
}
}
}
}
}
}
}