ppo-Huggy / run_logs /timers.json
KoRiF's picture
Huggy
97c2068
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404628038406372,
"min": 1.404628038406372,
"max": 1.4290534257888794,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70213.140625,
"min": 68192.109375,
"max": 78720.46875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 92.89373814041745,
"min": 82.91778523489933,
"max": 391.48837209302326,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48955.0,
"min": 48892.0,
"max": 50502.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999991.0,
"min": 49881.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999991.0,
"min": 49881.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3928351402282715,
"min": 0.07904969155788422,
"max": 2.4797286987304688,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1261.024169921875,
"min": 10.11836051940918,
"max": 1439.21923828125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5740078420747614,
"min": 1.7176994781475514,
"max": 3.978109139124019,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1883.5021327733994,
"min": 219.86553320288658,
"max": 2370.9530469179153,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5740078420747614,
"min": 1.7176994781475514,
"max": 3.978109139124019,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1883.5021327733994,
"min": 219.86553320288658,
"max": 2370.9530469179153,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01849761906455064,
"min": 0.013874814264191728,
"max": 0.020879803626060796,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05549285719365192,
"min": 0.028899700914068186,
"max": 0.05950612607703079,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05514527969062328,
"min": 0.02072690560792883,
"max": 0.06129494129369656,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16543583907186984,
"min": 0.04145381121585766,
"max": 0.1792365826666355,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.27879890709999e-06,
"min": 3.27879890709999e-06,
"max": 0.0002953326765557751,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.83639672129997e-06,
"min": 9.83639672129997e-06,
"max": 0.0008442874685708499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10109290000000003,
"min": 0.10109290000000003,
"max": 0.19844422500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30327870000000007,
"min": 0.20733280000000004,
"max": 0.5814291500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.453570999999982e-05,
"min": 6.453570999999982e-05,
"max": 0.004922366827500002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019360712999999947,
"min": 0.00019360712999999947,
"max": 0.014073314585000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676712135",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676714939"
},
"total": 2804.3454504329998,
"count": 1,
"self": 0.4978015719993891,
"children": {
"run_training.setup": {
"total": 0.12960042300005625,
"count": 1,
"self": 0.12960042300005625
},
"TrainerController.start_learning": {
"total": 2803.718048438,
"count": 1,
"self": 5.014893849881901,
"children": {
"TrainerController._reset_env": {
"total": 10.871847654000021,
"count": 1,
"self": 10.871847654000021
},
"TrainerController.advance": {
"total": 2787.707443517119,
"count": 231826,
"self": 5.569039614287703,
"children": {
"env_step": {
"total": 2169.6445371810278,
"count": 231826,
"self": 1810.5932277390025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 355.74617715999955,
"count": 231826,
"self": 18.656398678958226,
"children": {
"TorchPolicy.evaluate": {
"total": 337.0897784810413,
"count": 222986,
"self": 82.63458337790985,
"children": {
"TorchPolicy.sample_actions": {
"total": 254.45519510313147,
"count": 222986,
"self": 254.45519510313147
}
}
}
}
},
"workers": {
"total": 3.305132282025852,
"count": 231826,
"self": 0.0,
"children": {
"worker_root": {
"total": 2793.9208350480126,
"count": 231826,
"is_parallel": true,
"self": 1321.2917222009628,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002088005000018711,
"count": 1,
"is_parallel": true,
"self": 0.0004441260000476177,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016438789999710934,
"count": 2,
"is_parallel": true,
"self": 0.0016438789999710934
}
}
},
"UnityEnvironment.step": {
"total": 0.03297302800001489,
"count": 1,
"is_parallel": true,
"self": 0.0003481600000441176,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023472700002002966,
"count": 1,
"is_parallel": true,
"self": 0.00023472700002002966
},
"communicator.exchange": {
"total": 0.03137284699994325,
"count": 1,
"is_parallel": true,
"self": 0.03137284699994325
},
"steps_from_proto": {
"total": 0.001017294000007496,
"count": 1,
"is_parallel": true,
"self": 0.0004703959999687868,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005468980000387091,
"count": 2,
"is_parallel": true,
"self": 0.0005468980000387091
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1472.6291128470498,
"count": 231825,
"is_parallel": true,
"self": 43.70479145107424,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 96.23386574093331,
"count": 231825,
"is_parallel": true,
"self": 96.23386574093331
},
"communicator.exchange": {
"total": 1224.3426412739766,
"count": 231825,
"is_parallel": true,
"self": 1224.3426412739766
},
"steps_from_proto": {
"total": 108.34781438106552,
"count": 231825,
"is_parallel": true,
"self": 46.61674069704031,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.731073684025205,
"count": 463650,
"is_parallel": true,
"self": 61.731073684025205
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 612.4938667218036,
"count": 231826,
"self": 7.729779438812216,
"children": {
"process_trajectory": {
"total": 188.63567201099534,
"count": 231826,
"self": 187.26430465399574,
"children": {
"RLTrainer._checkpoint": {
"total": 1.371367356999599,
"count": 10,
"self": 1.371367356999599
}
}
},
"_update_policy": {
"total": 416.128415271996,
"count": 97,
"self": 355.0913606509898,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.03705462100618,
"count": 2910,
"self": 61.03705462100618
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1909996828762814e-06,
"count": 1,
"self": 1.1909996828762814e-06
},
"TrainerController._save_models": {
"total": 0.12386222599980101,
"count": 1,
"self": 0.0024444880000373814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12141773799976363,
"count": 1,
"self": 0.12141773799976363
}
}
}
}
}
}
}