ppo-Huggy / run_logs /timers.json
glory20h's picture
Huggy
2169048
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.411523699760437,
"min": 1.411523699760437,
"max": 1.4288020133972168,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 67516.0,
"min": 67516.0,
"max": 77591.375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 111.09480812641084,
"min": 85.71678321678321,
"max": 431.22222222222223,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49215.0,
"min": 49030.0,
"max": 50453.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999556.0,
"min": 49820.0,
"max": 1999556.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999556.0,
"min": 49820.0,
"max": 1999556.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.312788248062134,
"min": 0.08611175417900085,
"max": 2.4459445476531982,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1024.565185546875,
"min": 9.98896312713623,
"max": 1364.57763671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.397511273420691,
"min": 1.5674347831257458,
"max": 3.9098744114803647,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1505.0974941253662,
"min": 181.82243484258652,
"max": 2141.13948315382,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.397511273420691,
"min": 1.5674347831257458,
"max": 3.9098744114803647,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1505.0974941253662,
"min": 181.82243484258652,
"max": 2141.13948315382,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.020083212667668705,
"min": 0.013277355375127323,
"max": 0.020083212667668705,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04016642533533741,
"min": 0.026610359206097202,
"max": 0.05823663418705109,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05041125969340404,
"min": 0.02186571592465043,
"max": 0.0612865100718207,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10082251938680808,
"min": 0.04373143184930086,
"max": 0.18385953021546209,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.384898538399988e-06,
"min": 4.384898538399988e-06,
"max": 0.00029532772655742497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.769797076799976e-06,
"min": 8.769797076799976e-06,
"max": 0.0008439529686823499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10146160000000004,
"min": 0.10146160000000004,
"max": 0.198442575,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20292320000000008,
"min": 0.20292320000000008,
"max": 0.58131765,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.29338399999998e-05,
"min": 8.29338399999998e-05,
"max": 0.0049222844925,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001658676799999996,
"min": 0.0001658676799999996,
"max": 0.014067750734999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679721341",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679723693"
},
"total": 2351.9276392300003,
"count": 1,
"self": 0.44364301500036163,
"children": {
"run_training.setup": {
"total": 0.11626558699998668,
"count": 1,
"self": 0.11626558699998668
},
"TrainerController.start_learning": {
"total": 2351.367730628,
"count": 1,
"self": 4.393055163945064,
"children": {
"TrainerController._reset_env": {
"total": 9.57335374600001,
"count": 1,
"self": 9.57335374600001
},
"TrainerController.advance": {
"total": 2337.2836114220554,
"count": 231495,
"self": 4.52980679616212,
"children": {
"env_step": {
"total": 1824.5919266819353,
"count": 231495,
"self": 1531.9040242869382,
"children": {
"SubprocessEnvManager._take_step": {
"total": 289.88079666200775,
"count": 231495,
"self": 17.371278083017444,
"children": {
"TorchPolicy.evaluate": {
"total": 272.5095185789903,
"count": 222886,
"self": 272.5095185789903
}
}
},
"workers": {
"total": 2.8071057329893847,
"count": 231495,
"self": 0.0,
"children": {
"worker_root": {
"total": 2343.3921394409213,
"count": 231495,
"is_parallel": true,
"self": 1099.8515540458816,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010796920000188948,
"count": 1,
"is_parallel": true,
"self": 0.0002955760000133978,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000784116000005497,
"count": 2,
"is_parallel": true,
"self": 0.000784116000005497
}
}
},
"UnityEnvironment.step": {
"total": 0.050895120000006955,
"count": 1,
"is_parallel": true,
"self": 0.00030026999999677173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018890399999804686,
"count": 1,
"is_parallel": true,
"self": 0.00018890399999804686
},
"communicator.exchange": {
"total": 0.049733500000002095,
"count": 1,
"is_parallel": true,
"self": 0.049733500000002095
},
"steps_from_proto": {
"total": 0.0006724460000100407,
"count": 1,
"is_parallel": true,
"self": 0.00021668700000532226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004557590000047185,
"count": 2,
"is_parallel": true,
"self": 0.0004557590000047185
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1243.5405853950397,
"count": 231494,
"is_parallel": true,
"self": 38.408656578056934,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.72805431005375,
"count": 231494,
"is_parallel": true,
"self": 76.72805431005375
},
"communicator.exchange": {
"total": 1039.9783612630401,
"count": 231494,
"is_parallel": true,
"self": 1039.9783612630401
},
"steps_from_proto": {
"total": 88.42551324388893,
"count": 231494,
"is_parallel": true,
"self": 33.163509382912736,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.26200386097619,
"count": 462988,
"is_parallel": true,
"self": 55.26200386097619
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 508.16187794395773,
"count": 231495,
"self": 6.616629385879605,
"children": {
"process_trajectory": {
"total": 140.59194958308012,
"count": 231495,
"self": 139.27275584508027,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3191937379998535,
"count": 10,
"self": 1.3191937379998535
}
}
},
"_update_policy": {
"total": 360.953298974998,
"count": 96,
"self": 301.843991509998,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.109307465,
"count": 2880,
"self": 59.109307465
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1519996405695565e-06,
"count": 1,
"self": 1.1519996405695565e-06
},
"TrainerController._save_models": {
"total": 0.11770914399994581,
"count": 1,
"self": 0.002041991999703896,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11566715200024191,
"count": 1,
"self": 0.11566715200024191
}
}
}
}
}
}
}