ppo-Huggy / run_logs /timers.json
parinzee's picture
Huggy
8181bd2
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4202393293380737,
"min": 1.4199544191360474,
"max": 1.430855631828308,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71280.390625,
"min": 68839.8984375,
"max": 78377.2265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 125.90609137055837,
"min": 83.20847457627119,
"max": 400.25396825396825,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49607.0,
"min": 48877.0,
"max": 50432.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999967.0,
"min": 49826.0,
"max": 1999967.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999967.0,
"min": 49826.0,
"max": 1999967.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.8679447174072266,
"min": 0.1131385788321495,
"max": 1.932198166847229,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 735.97021484375,
"min": 14.142322540283203,
"max": 1089.452392578125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.28954286396806,
"min": 1.8712130148410797,
"max": 3.8298789459221374,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1296.0798884034157,
"min": 233.90162685513496,
"max": 2253.0934875011444,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.28954286396806,
"min": 1.8712130148410797,
"max": 3.8298789459221374,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1296.0798884034157,
"min": 233.90162685513496,
"max": 2253.0934875011444,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.008128832462413508,
"min": 0.006835903138759628,
"max": 0.012940714596576678,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.016257664924827015,
"min": 0.013671806277519256,
"max": 0.038822143789730035,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.031639409406731524,
"min": 0.019437585181246202,
"max": 0.061142733631034694,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.06327881881346305,
"min": 0.038875170362492405,
"max": 0.17838694155216217,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.13394862205e-06,
"min": 4.13394862205e-06,
"max": 0.000295358626547125,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.2678972441e-06,
"min": 8.2678972441e-06,
"max": 0.0008440960686346501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10137795000000001,
"min": 0.10137795000000001,
"max": 0.19845287500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20275590000000002,
"min": 0.20275590000000002,
"max": 0.5813653500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.875970500000001e-05,
"min": 7.875970500000001e-05,
"max": 0.004922798462499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00015751941000000002,
"min": 0.00015751941000000002,
"max": 0.014070130965000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673313813",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673316004"
},
"total": 2190.981481545,
"count": 1,
"self": 0.3952584909993675,
"children": {
"run_training.setup": {
"total": 0.10478759900001933,
"count": 1,
"self": 0.10478759900001933
},
"TrainerController.start_learning": {
"total": 2190.4814354550003,
"count": 1,
"self": 3.7286998170538936,
"children": {
"TrainerController._reset_env": {
"total": 7.267118643000003,
"count": 1,
"self": 7.267118643000003
},
"TrainerController.advance": {
"total": 2179.3737634249464,
"count": 232015,
"self": 4.083810952938165,
"children": {
"env_step": {
"total": 1727.9775389950328,
"count": 232015,
"self": 1452.0918784359778,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.280217240056,
"count": 232015,
"self": 14.31501376613528,
"children": {
"TorchPolicy.evaluate": {
"total": 258.9652034739207,
"count": 223076,
"self": 64.79368098500288,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.17152248891784,
"count": 223076,
"self": 194.17152248891784
}
}
}
}
},
"workers": {
"total": 2.6054433189989368,
"count": 232015,
"self": 0.0,
"children": {
"worker_root": {
"total": 2182.2972115518933,
"count": 232015,
"is_parallel": true,
"self": 989.488213160827,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021199700000806843,
"count": 1,
"is_parallel": true,
"self": 0.00036371700002746365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017562530000532206,
"count": 2,
"is_parallel": true,
"self": 0.0017562530000532206
}
}
},
"UnityEnvironment.step": {
"total": 0.0273802289999594,
"count": 1,
"is_parallel": true,
"self": 0.00028469500000483094,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018978799994329165,
"count": 1,
"is_parallel": true,
"self": 0.00018978799994329165
},
"communicator.exchange": {
"total": 0.026170899000021564,
"count": 1,
"is_parallel": true,
"self": 0.026170899000021564
},
"steps_from_proto": {
"total": 0.0007348469999897134,
"count": 1,
"is_parallel": true,
"self": 0.0002491789998657623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00048566800012395106,
"count": 2,
"is_parallel": true,
"self": 0.00048566800012395106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1192.8089983910663,
"count": 232014,
"is_parallel": true,
"self": 34.21911316808246,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.27328880099469,
"count": 232014,
"is_parallel": true,
"self": 77.27328880099469
},
"communicator.exchange": {
"total": 987.6139651259822,
"count": 232014,
"is_parallel": true,
"self": 987.6139651259822
},
"steps_from_proto": {
"total": 93.7026312960071,
"count": 232014,
"is_parallel": true,
"self": 40.48613383997076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.216497456036336,
"count": 464028,
"is_parallel": true,
"self": 53.216497456036336
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 447.3124134769755,
"count": 232015,
"self": 6.011663879869616,
"children": {
"process_trajectory": {
"total": 142.86400319510483,
"count": 232015,
"self": 141.65192016610501,
"children": {
"RLTrainer._checkpoint": {
"total": 1.212083028999814,
"count": 10,
"self": 1.212083028999814
}
}
},
"_update_policy": {
"total": 298.43674640200106,
"count": 96,
"self": 254.14449291300366,
"children": {
"TorchPPOOptimizer.update": {
"total": 44.2922534889974,
"count": 1152,
"self": 44.2922534889974
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.930002539069392e-07,
"count": 1,
"self": 7.930002539069392e-07
},
"TrainerController._save_models": {
"total": 0.11185277699996732,
"count": 1,
"self": 0.001912286999868229,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10994049000009909,
"count": 1,
"self": 0.10994049000009909
}
}
}
}
}
}
}