poca-Huggy / run_logs /timers.json
codescv123's picture
Huggy poca
033b78f verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3916268348693848,
"min": 1.3916268348693848,
"max": 1.4216508865356445,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68937.015625,
"min": 68751.015625,
"max": 77920.21875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.88927943760984,
"min": 70.046875,
"max": 364.35766423357666,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49440.0,
"min": 48886.0,
"max": 50000.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49322.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49322.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 2.4505908489227295,
"min": 0.03313106670975685,
"max": 2.5996341705322266,
"count": 40
},
"Huggy.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1394.38623046875,
"min": 4.505825042724609,
"max": 1792.2926025390625,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4505908489227295,
"min": 0.03313106670975685,
"max": 2.5996341705322266,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1394.38623046875,
"min": 4.505825042724609,
"max": 1792.2926025390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6050782773532433,
"min": 1.8192918423344107,
"max": 4.034134925556183,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2051.2895398139954,
"min": 247.42369055747986,
"max": 2786.326250076294,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6050782773532433,
"min": 1.8192918423344107,
"max": 4.034134925556183,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2051.2895398139954,
"min": 247.42369055747986,
"max": 2786.326250076294,
"count": 40
},
"Huggy.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 40
},
"Huggy.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016031151500606532,
"min": 0.013342749202881046,
"max": 0.019620426353261188,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0480934545018196,
"min": 0.028070842195302245,
"max": 0.05739687615083919,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.047141387603349155,
"min": 0.020581617020070553,
"max": 0.058764980422953764,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14142416281004747,
"min": 0.041163234040141106,
"max": 0.175532033542792,
"count": 40
},
"Huggy.Losses.BaselineLoss.mean": {
"value": 0.04718402433726523,
"min": 0.020848911628127097,
"max": 0.06081106991817554,
"count": 40
},
"Huggy.Losses.BaselineLoss.sum": {
"value": 0.14155207301179568,
"min": 0.04169782325625419,
"max": 0.18215012724200885,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7518987494000046e-06,
"min": 3.7518987494000046e-06,
"max": 0.00029537055154315,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1255696248200014e-05,
"min": 1.1255696248200014e-05,
"max": 0.0008440876686374499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125060000000001,
"min": 0.10125060000000001,
"max": 0.19845685000000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037518,
"min": 0.20768405000000004,
"max": 0.5813625499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.240494000000011e-05,
"min": 7.240494000000011e-05,
"max": 0.0049229968150000004,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021721482000000033,
"min": 0.00021721482000000033,
"max": 0.014069991245,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709979911",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy_poca.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy_poca --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709982495"
},
"total": 2583.7533572599996,
"count": 1,
"self": 0.3224318919997131,
"children": {
"run_training.setup": {
"total": 0.05483431799984828,
"count": 1,
"self": 0.05483431799984828
},
"TrainerController.start_learning": {
"total": 2583.37609105,
"count": 1,
"self": 6.100902652137847,
"children": {
"TrainerController._reset_env": {
"total": 1.9457217649996892,
"count": 1,
"self": 1.9457217649996892
},
"TrainerController.advance": {
"total": 2575.1470488138616,
"count": 233775,
"self": 5.61938162556271,
"children": {
"env_step": {
"total": 1923.0842280320198,
"count": 233775,
"self": 1539.3110959309188,
"children": {
"SubprocessEnvManager._take_step": {
"total": 380.022996679113,
"count": 233775,
"self": 20.99852131811167,
"children": {
"TorchPolicy.evaluate": {
"total": 359.02447536100135,
"count": 223010,
"self": 359.02447536100135
}
}
},
"workers": {
"total": 3.750135421988034,
"count": 233775,
"self": 0.0,
"children": {
"worker_root": {
"total": 2574.037859120124,
"count": 233775,
"is_parallel": true,
"self": 1362.014970671074,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00084753000010096,
"count": 1,
"is_parallel": true,
"self": 0.0002206170001954888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006269129999054712,
"count": 2,
"is_parallel": true,
"self": 0.0006269129999054712
}
}
},
"UnityEnvironment.step": {
"total": 0.022431255000356032,
"count": 1,
"is_parallel": true,
"self": 0.00032610700009172433,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001605950001248857,
"count": 1,
"is_parallel": true,
"self": 0.0001605950001248857
},
"communicator.exchange": {
"total": 0.021362108000175795,
"count": 1,
"is_parallel": true,
"self": 0.021362108000175795
},
"steps_from_proto": {
"total": 0.0005824449999636272,
"count": 1,
"is_parallel": true,
"self": 0.00017393399957654765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00040851100038707955,
"count": 2,
"is_parallel": true,
"self": 0.00040851100038707955
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1212.0228884490498,
"count": 233774,
"is_parallel": true,
"self": 34.561523104620846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 55.97653014279422,
"count": 233774,
"is_parallel": true,
"self": 55.97653014279422
},
"communicator.exchange": {
"total": 1047.5751160147033,
"count": 233774,
"is_parallel": true,
"self": 1047.5751160147033
},
"steps_from_proto": {
"total": 73.9097191869314,
"count": 233774,
"is_parallel": true,
"self": 26.491541960969244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 47.418177225962154,
"count": 467548,
"is_parallel": true,
"self": 47.418177225962154
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.443439156279,
"count": 233775,
"self": 9.787509250286348,
"children": {
"process_trajectory": {
"total": 303.3345677179941,
"count": 233775,
"self": 301.5080169599946,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8265507579994846,
"count": 10,
"self": 1.8265507579994846
}
}
},
"_update_policy": {
"total": 333.3213621879986,
"count": 97,
"self": 247.8646457310333,
"children": {
"TorchPOCAOptimizer.update": {
"total": 85.4567164569653,
"count": 2910,
"self": 85.4567164569653
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0060002750833519e-06,
"count": 1,
"self": 1.0060002750833519e-06
},
"TrainerController._save_models": {
"total": 0.1824168130006001,
"count": 1,
"self": 0.002319498000360909,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18009731500023918,
"count": 1,
"self": 0.18009731500023918
}
}
}
}
}
}
}