Huggy_Dog / run_logs /timers.json
TEnsorTHiru's picture
Huggy
f32b13e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.40426766872406,
"min": 1.404264211654663,
"max": 1.4295552968978882,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70518.109375,
"min": 68419.453125,
"max": 76005.4375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 119.20631067961165,
"min": 104.77167019027485,
"max": 390.4921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49113.0,
"min": 48923.0,
"max": 50167.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999941.0,
"min": 49750.0,
"max": 1999941.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999941.0,
"min": 49750.0,
"max": 1999941.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.248845338821411,
"min": 0.13949358463287354,
"max": 2.399559259414673,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 926.5242919921875,
"min": 17.71568489074707,
"max": 1087.365234375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.3386213362795636,
"min": 1.912444867487029,
"max": 3.7721763971375255,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1375.5119905471802,
"min": 242.88049817085266,
"max": 1692.0471985340118,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.3386213362795636,
"min": 1.912444867487029,
"max": 3.7721763971375255,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1375.5119905471802,
"min": 242.88049817085266,
"max": 1692.0471985340118,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015218639011921671,
"min": 0.014059866468111675,
"max": 0.020733682197169402,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.030437278023843342,
"min": 0.02811973293622335,
"max": 0.05421736154045599,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.043416057868550226,
"min": 0.020960330000768107,
"max": 0.05247954198469718,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08683211573710045,
"min": 0.041920660001536214,
"max": 0.15681617272396883,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.4981485006499965e-06,
"min": 4.4981485006499965e-06,
"max": 0.0002953462515512499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.996297001299993e-06,
"min": 8.996297001299993e-06,
"max": 0.0008440686186437997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10149935000000002,
"min": 0.10149935000000002,
"max": 0.19844874999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20299870000000003,
"min": 0.20299870000000003,
"max": 0.5813562,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.481756499999996e-05,
"min": 8.481756499999996e-05,
"max": 0.004922592625,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016963512999999993,
"min": 0.00016963512999999993,
"max": 0.014069674379999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702189591",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702192191"
},
"total": 2599.5735341110003,
"count": 1,
"self": 0.6654527460000281,
"children": {
"run_training.setup": {
"total": 0.05625403700003062,
"count": 1,
"self": 0.05625403700003062
},
"TrainerController.start_learning": {
"total": 2598.851827328,
"count": 1,
"self": 4.908222920005301,
"children": {
"TrainerController._reset_env": {
"total": 3.429323758999999,
"count": 1,
"self": 3.429323758999999
},
"TrainerController.advance": {
"total": 2590.3555548469944,
"count": 230727,
"self": 5.182716217044799,
"children": {
"env_step": {
"total": 2067.0577727499663,
"count": 230727,
"self": 1707.529116964086,
"children": {
"SubprocessEnvManager._take_step": {
"total": 356.3920962160011,
"count": 230727,
"self": 17.775623409092304,
"children": {
"TorchPolicy.evaluate": {
"total": 338.6164728069088,
"count": 223088,
"self": 338.6164728069088
}
}
},
"workers": {
"total": 3.1365595698791253,
"count": 230727,
"self": 0.0,
"children": {
"worker_root": {
"total": 2590.924243266857,
"count": 230727,
"is_parallel": true,
"self": 1213.4123471098872,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008394140000405059,
"count": 1,
"is_parallel": true,
"self": 0.00024111000004722882,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005983039999932771,
"count": 2,
"is_parallel": true,
"self": 0.0005983039999932771
}
}
},
"UnityEnvironment.step": {
"total": 0.03100887599998714,
"count": 1,
"is_parallel": true,
"self": 0.00031571099998473073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022295499996971557,
"count": 1,
"is_parallel": true,
"self": 0.00022295499996971557
},
"communicator.exchange": {
"total": 0.029750298999999814,
"count": 1,
"is_parallel": true,
"self": 0.029750298999999814
},
"steps_from_proto": {
"total": 0.0007199110000328801,
"count": 1,
"is_parallel": true,
"self": 0.0002115240000648555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005083869999680246,
"count": 2,
"is_parallel": true,
"self": 0.0005083869999680246
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1377.51189615697,
"count": 230726,
"is_parallel": true,
"self": 42.12311324513075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 91.9558201120002,
"count": 230726,
"is_parallel": true,
"self": 91.9558201120002
},
"communicator.exchange": {
"total": 1147.6951323329138,
"count": 230726,
"is_parallel": true,
"self": 1147.6951323329138
},
"steps_from_proto": {
"total": 95.73783046692523,
"count": 230726,
"is_parallel": true,
"self": 35.45599138203795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.28183908488728,
"count": 461452,
"is_parallel": true,
"self": 60.28183908488728
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 518.1150658799834,
"count": 230727,
"self": 7.381976548887053,
"children": {
"process_trajectory": {
"total": 160.10117209709693,
"count": 230727,
"self": 158.7958695390975,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3053025579994255,
"count": 10,
"self": 1.3053025579994255
}
}
},
"_update_policy": {
"total": 350.6319172339994,
"count": 96,
"self": 285.52976012200514,
"children": {
"TorchPPOOptimizer.update": {
"total": 65.10215711199425,
"count": 2880,
"self": 65.10215711199425
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4020001799508464e-06,
"count": 1,
"self": 1.4020001799508464e-06
},
"TrainerController._save_models": {
"total": 0.15872440000021015,
"count": 1,
"self": 0.0022565190001841984,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15646788100002595,
"count": 1,
"self": 0.15646788100002595
}
}
}
}
}
}
}