ppo-Huggy / run_logs /timers.json
Brandulio's picture
Huggy
397c3c0
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4081043004989624,
"min": 1.4081043004989624,
"max": 1.4299368858337402,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68937.96875,
"min": 68937.96875,
"max": 78407.15625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.53065134099617,
"min": 86.71929824561404,
"max": 413.28688524590166,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48823.0,
"min": 48823.0,
"max": 50421.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999996.0,
"min": 49793.0,
"max": 1999996.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999996.0,
"min": 49793.0,
"max": 1999996.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4100403785705566,
"min": 0.056868936866521835,
"max": 2.463724374771118,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1258.0411376953125,
"min": 6.881141185760498,
"max": 1349.051025390625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.787550271014144,
"min": 1.8468181963794488,
"max": 3.947138870920615,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1977.1012414693832,
"min": 223.4650017619133,
"max": 2128.012749671936,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.787550271014144,
"min": 1.8468181963794488,
"max": 3.947138870920615,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1977.1012414693832,
"min": 223.4650017619133,
"max": 2128.012749671936,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.019380505296673314,
"min": 0.014436548696378141,
"max": 0.020523818488557784,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05814151589001994,
"min": 0.029675465304171665,
"max": 0.05814151589001994,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05195908865167035,
"min": 0.021570795495063064,
"max": 0.05749733416984479,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15587726595501106,
"min": 0.04314159099012613,
"max": 0.16194593608379365,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.220198926633337e-06,
"min": 3.220198926633337e-06,
"max": 0.0002952849765716749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.66059677990001e-06,
"min": 9.66059677990001e-06,
"max": 0.0008437818187393999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10107336666666666,
"min": 0.10107336666666666,
"max": 0.19842832500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032201,
"min": 0.20731700000000003,
"max": 0.5812605999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.356099666666673e-05,
"min": 6.356099666666673e-05,
"max": 0.004921573417500002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019068299000000018,
"min": 0.00019068299000000018,
"max": 0.014064903940000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685488945",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685491519"
},
"total": 2574.1889940439996,
"count": 1,
"self": 0.44063019799978065,
"children": {
"run_training.setup": {
"total": 0.042032846000012114,
"count": 1,
"self": 0.042032846000012114
},
"TrainerController.start_learning": {
"total": 2573.706331,
"count": 1,
"self": 4.461716477005666,
"children": {
"TrainerController._reset_env": {
"total": 5.289967245999975,
"count": 1,
"self": 5.289967245999975
},
"TrainerController.advance": {
"total": 2563.835057076995,
"count": 231808,
"self": 4.743173519992524,
"children": {
"env_step": {
"total": 2006.7604871410017,
"count": 231808,
"self": 1697.9446445230387,
"children": {
"SubprocessEnvManager._take_step": {
"total": 305.8004412539191,
"count": 231808,
"self": 17.916920883944897,
"children": {
"TorchPolicy.evaluate": {
"total": 287.8835203699742,
"count": 222919,
"self": 287.8835203699742
}
}
},
"workers": {
"total": 3.015401364043811,
"count": 231808,
"self": 0.0,
"children": {
"worker_root": {
"total": 2565.5190619659525,
"count": 231808,
"is_parallel": true,
"self": 1180.1711292040732,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012450920000048882,
"count": 1,
"is_parallel": true,
"self": 0.0003528340000116259,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008922579999932623,
"count": 2,
"is_parallel": true,
"self": 0.0008922579999932623
}
}
},
"UnityEnvironment.step": {
"total": 0.03277751600001011,
"count": 1,
"is_parallel": true,
"self": 0.0003530060000116464,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00025413399998797104,
"count": 1,
"is_parallel": true,
"self": 0.00025413399998797104
},
"communicator.exchange": {
"total": 0.03133440100000939,
"count": 1,
"is_parallel": true,
"self": 0.03133440100000939
},
"steps_from_proto": {
"total": 0.000835975000001099,
"count": 1,
"is_parallel": true,
"self": 0.00024052999998502855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005954450000160705,
"count": 2,
"is_parallel": true,
"self": 0.0005954450000160705
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1385.3479327618793,
"count": 231807,
"is_parallel": true,
"self": 40.385122595992016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.10937280101254,
"count": 231807,
"is_parallel": true,
"self": 87.10937280101254
},
"communicator.exchange": {
"total": 1158.5596728768605,
"count": 231807,
"is_parallel": true,
"self": 1158.5596728768605
},
"steps_from_proto": {
"total": 99.29376448801429,
"count": 231807,
"is_parallel": true,
"self": 38.44508557702366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.84867891099063,
"count": 463614,
"is_parallel": true,
"self": 60.84867891099063
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 552.3313964160008,
"count": 231808,
"self": 6.860412835907937,
"children": {
"process_trajectory": {
"total": 143.75461972609173,
"count": 231808,
"self": 142.303573807092,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4510459189997391,
"count": 10,
"self": 1.4510459189997391
}
}
},
"_update_policy": {
"total": 401.7163638540011,
"count": 97,
"self": 342.6513112070003,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.06505264700081,
"count": 2910,
"self": 59.06505264700081
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1399997674743645e-06,
"count": 1,
"self": 1.1399997674743645e-06
},
"TrainerController._save_models": {
"total": 0.11958905999972558,
"count": 1,
"self": 0.0020867589996669267,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11750230100005865,
"count": 1,
"self": 0.11750230100005865
}
}
}
}
}
}
}