ppo-Huggy / run_logs /timers.json
Mahmoud22's picture
Huggy
2206a74
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3992011547088623,
"min": 1.3992011547088623,
"max": 1.4276094436645508,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68135.5,
"min": 68135.5,
"max": 77830.96875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 91.93656716417911,
"min": 81.74668874172185,
"max": 393.8031496062992,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49278.0,
"min": 48974.0,
"max": 50127.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999956.0,
"min": 49949.0,
"max": 1999956.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999956.0,
"min": 49949.0,
"max": 1999956.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.439425230026245,
"min": 0.09052973240613937,
"max": 2.468040704727173,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1307.531982421875,
"min": 11.406745910644531,
"max": 1451.2078857421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7214976327855194,
"min": 1.6443146073392458,
"max": 4.018706100807344,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1994.7227311730385,
"min": 207.183640524745,
"max": 2297.3730575442314,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7214976327855194,
"min": 1.6443146073392458,
"max": 4.018706100807344,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1994.7227311730385,
"min": 207.183640524745,
"max": 2297.3730575442314,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018420797783700336,
"min": 0.013263079660221895,
"max": 0.022560999731649645,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05526239335110101,
"min": 0.02652615932044379,
"max": 0.05919463998870925,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05528429862525728,
"min": 0.020832985856880745,
"max": 0.05901518737276395,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16585289587577184,
"min": 0.04166597171376149,
"max": 0.1735415242612362,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3765988744999937e-06,
"min": 3.3765988744999937e-06,
"max": 0.00029529450156849993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0129796623499982e-05,
"min": 1.0129796623499982e-05,
"max": 0.0008441851686049499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112550000000002,
"min": 0.10112550000000002,
"max": 0.19843149999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30337650000000005,
"min": 0.20740335000000007,
"max": 0.58139505,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.616244999999988e-05,
"min": 6.616244999999988e-05,
"max": 0.00492173185,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019848734999999964,
"min": 0.00019848734999999964,
"max": 0.014071612995,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678189931",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678192294"
},
"total": 2363.542605162,
"count": 1,
"self": 0.4452345019999484,
"children": {
"run_training.setup": {
"total": 0.10935215200015591,
"count": 1,
"self": 0.10935215200015591
},
"TrainerController.start_learning": {
"total": 2362.988018508,
"count": 1,
"self": 4.022142968986827,
"children": {
"TrainerController._reset_env": {
"total": 9.758217640999874,
"count": 1,
"self": 9.758217640999874
},
"TrainerController.advance": {
"total": 2349.091387626014,
"count": 232369,
"self": 4.4166460292603915,
"children": {
"env_step": {
"total": 1823.3167903168644,
"count": 232369,
"self": 1524.785353948945,
"children": {
"SubprocessEnvManager._take_step": {
"total": 295.78967590401817,
"count": 232369,
"self": 15.835195723039305,
"children": {
"TorchPolicy.evaluate": {
"total": 279.95448018097886,
"count": 222966,
"self": 70.31783077307159,
"children": {
"TorchPolicy.sample_actions": {
"total": 209.63664940790727,
"count": 222966,
"self": 209.63664940790727
}
}
}
}
},
"workers": {
"total": 2.7417604639013007,
"count": 232369,
"self": 0.0,
"children": {
"worker_root": {
"total": 2354.6762658180446,
"count": 232369,
"is_parallel": true,
"self": 1120.3713618631425,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009750920000897167,
"count": 1,
"is_parallel": true,
"self": 0.0003310390000024199,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006440530000872968,
"count": 2,
"is_parallel": true,
"self": 0.0006440530000872968
}
}
},
"UnityEnvironment.step": {
"total": 0.02952349899987894,
"count": 1,
"is_parallel": true,
"self": 0.00030430800006797654,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020087299981241813,
"count": 1,
"is_parallel": true,
"self": 0.00020087299981241813
},
"communicator.exchange": {
"total": 0.028270542999962345,
"count": 1,
"is_parallel": true,
"self": 0.028270542999962345
},
"steps_from_proto": {
"total": 0.0007477750000361993,
"count": 1,
"is_parallel": true,
"self": 0.00023578600030305097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005119889997331484,
"count": 2,
"is_parallel": true,
"self": 0.0005119889997331484
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1234.304903954902,
"count": 232368,
"is_parallel": true,
"self": 38.28143562705168,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.91504769179755,
"count": 232368,
"is_parallel": true,
"self": 77.91504769179755
},
"communicator.exchange": {
"total": 1027.5895153820038,
"count": 232368,
"is_parallel": true,
"self": 1027.5895153820038
},
"steps_from_proto": {
"total": 90.51890525404906,
"count": 232368,
"is_parallel": true,
"self": 36.3276982071784,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.19120704687066,
"count": 464736,
"is_parallel": true,
"self": 54.19120704687066
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 521.357951279889,
"count": 232369,
"self": 6.549236328046163,
"children": {
"process_trajectory": {
"total": 162.26757749984563,
"count": 232369,
"self": 161.02200388784627,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2455736119993617,
"count": 10,
"self": 1.2455736119993617
}
}
},
"_update_policy": {
"total": 352.5411374519972,
"count": 97,
"self": 295.62344375899784,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.917693692999364,
"count": 2910,
"self": 56.917693692999364
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0339999789721332e-06,
"count": 1,
"self": 1.0339999789721332e-06
},
"TrainerController._save_models": {
"total": 0.11626923799985889,
"count": 1,
"self": 0.0021482110000761168,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11412102699978277,
"count": 1,
"self": 0.11412102699978277
}
}
}
}
}
}
}