ppo-Huggy / run_logs /timers.json
TaTo69's picture
Huggy
9e6d089
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402661681175232,
"min": 1.402661681175232,
"max": 1.4292364120483398,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70305.609375,
"min": 68925.109375,
"max": 78533.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.48979591836735,
"min": 70.48428571428572,
"max": 429.5213675213675,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49361.0,
"min": 48864.0,
"max": 50254.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999987.0,
"min": 49764.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999987.0,
"min": 49764.0,
"max": 1999987.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4564549922943115,
"min": 0.06931770592927933,
"max": 2.514643430709839,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1564.7618408203125,
"min": 8.040853500366211,
"max": 1707.687744140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.771496821329395,
"min": 2.061624997006408,
"max": 4.082947337391353,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2402.443475186825,
"min": 239.14849965274334,
"max": 2728.522800922394,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.771496821329395,
"min": 2.061624997006408,
"max": 4.082947337391353,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2402.443475186825,
"min": 239.14849965274334,
"max": 2728.522800922394,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01747844691239556,
"min": 0.012552615662571043,
"max": 0.022239802518864357,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052435340737186684,
"min": 0.025105231325142086,
"max": 0.05700802230591459,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05662983088857598,
"min": 0.021134150897463164,
"max": 0.06371308478216331,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16988949266572795,
"min": 0.04226830179492633,
"max": 0.19113925434648993,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.926598691166677e-06,
"min": 3.926598691166677e-06,
"max": 0.00029535450154849995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1779796073500031e-05,
"min": 1.1779796073500031e-05,
"max": 0.0008441869686043497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10130883333333333,
"min": 0.10130883333333333,
"max": 0.1984515,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039265,
"min": 0.20775234999999995,
"max": 0.5813956500000003,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.531078333333351e-05,
"min": 7.531078333333351e-05,
"max": 0.00492272985,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022593235000000053,
"min": 0.00022593235000000053,
"max": 0.014071642935000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698743615",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698745936"
},
"total": 2320.535800524,
"count": 1,
"self": 0.43937868300054106,
"children": {
"run_training.setup": {
"total": 0.04385186500002192,
"count": 1,
"self": 0.04385186500002192
},
"TrainerController.start_learning": {
"total": 2320.0525699759996,
"count": 1,
"self": 4.252976090936045,
"children": {
"TrainerController._reset_env": {
"total": 8.375194132000047,
"count": 1,
"self": 8.375194132000047
},
"TrainerController.advance": {
"total": 2307.321596800063,
"count": 233435,
"self": 4.325535475036304,
"children": {
"env_step": {
"total": 1822.8124591490628,
"count": 233435,
"self": 1503.7387733210192,
"children": {
"SubprocessEnvManager._take_step": {
"total": 316.3991255849737,
"count": 233435,
"self": 15.96268953502323,
"children": {
"TorchPolicy.evaluate": {
"total": 300.4364360499505,
"count": 223018,
"self": 300.4364360499505
}
}
},
"workers": {
"total": 2.674560243069891,
"count": 233435,
"self": 0.0,
"children": {
"worker_root": {
"total": 2312.7416888709845,
"count": 233435,
"is_parallel": true,
"self": 1086.7951729130143,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008123120000504969,
"count": 1,
"is_parallel": true,
"self": 0.0002391010000337701,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005732110000167268,
"count": 2,
"is_parallel": true,
"self": 0.0005732110000167268
}
}
},
"UnityEnvironment.step": {
"total": 0.031094309000025078,
"count": 1,
"is_parallel": true,
"self": 0.0003144590000374592,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020246700000825513,
"count": 1,
"is_parallel": true,
"self": 0.00020246700000825513
},
"communicator.exchange": {
"total": 0.02992987400000402,
"count": 1,
"is_parallel": true,
"self": 0.02992987400000402
},
"steps_from_proto": {
"total": 0.0006475089999753436,
"count": 1,
"is_parallel": true,
"self": 0.00018375700000206052,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004637519999732831,
"count": 2,
"is_parallel": true,
"self": 0.0004637519999732831
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1225.9465159579702,
"count": 233434,
"is_parallel": true,
"self": 38.90583824698365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.71246576492484,
"count": 233434,
"is_parallel": true,
"self": 80.71246576492484
},
"communicator.exchange": {
"total": 1019.8075849031068,
"count": 233434,
"is_parallel": true,
"self": 1019.8075849031068
},
"steps_from_proto": {
"total": 86.52062704295486,
"count": 233434,
"is_parallel": true,
"self": 30.645644424910984,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.87498261804387,
"count": 466868,
"is_parallel": true,
"self": 55.87498261804387
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 480.18360217596427,
"count": 233435,
"self": 6.066866822886823,
"children": {
"process_trajectory": {
"total": 148.55254159407758,
"count": 233435,
"self": 147.44955559507702,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1029859990005662,
"count": 10,
"self": 1.1029859990005662
}
}
},
"_update_policy": {
"total": 325.56419375899986,
"count": 97,
"self": 265.3704177020076,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.19377605699225,
"count": 2910,
"self": 60.19377605699225
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0490002750884742e-06,
"count": 1,
"self": 1.0490002750884742e-06
},
"TrainerController._save_models": {
"total": 0.10280190399998901,
"count": 1,
"self": 0.0018670839999685995,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10093482000002041,
"count": 1,
"self": 0.10093482000002041
}
}
}
}
}
}
}