ppo-Huggy / run_logs /timers.json
Dabe's picture
Huggy
9da1502
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.403080701828003,
"min": 1.403080701828003,
"max": 1.4272665977478027,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71182.4921875,
"min": 68873.2109375,
"max": 77171.2421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.23255813953489,
"min": 77.47252747252747,
"max": 407.3252032520325,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49504.0,
"min": 49023.0,
"max": 50180.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999931.0,
"min": 49749.0,
"max": 1999931.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999931.0,
"min": 49749.0,
"max": 1999931.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4510624408721924,
"min": 0.1578012853860855,
"max": 2.4847910404205322,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1475.53955078125,
"min": 19.25175666809082,
"max": 1539.736572265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.744232323676645,
"min": 1.8969553000614292,
"max": 3.9319497768727825,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2254.02785885334,
"min": 231.42854660749435,
"max": 2382.811706006527,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.744232323676645,
"min": 1.8969553000614292,
"max": 3.9319497768727825,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2254.02785885334,
"min": 231.42854660749435,
"max": 2382.811706006527,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01687960376633176,
"min": 0.01355514249735279,
"max": 0.02135097290156409,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05063881129899528,
"min": 0.02711028499470558,
"max": 0.05697938898520079,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05652881119814184,
"min": 0.021482816090186437,
"max": 0.059696849642528436,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16958643359442552,
"min": 0.04296563218037287,
"max": 0.1790905489275853,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.246298917933334e-06,
"min": 3.246298917933334e-06,
"max": 0.00029531535156154993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.738896753800002e-06,
"min": 9.738896753800002e-06,
"max": 0.0008437578187474,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108206666666669,
"min": 0.10108206666666669,
"max": 0.19843845,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3032462000000001,
"min": 0.20731334999999995,
"max": 0.5812526000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.399512666666666e-05,
"min": 6.399512666666666e-05,
"max": 0.0049220786549999986,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019198537999999999,
"min": 0.00019198537999999999,
"max": 0.01406450474,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677600831",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677603249"
},
"total": 2417.7806993840004,
"count": 1,
"self": 0.43776025600027424,
"children": {
"run_training.setup": {
"total": 0.11368782500005636,
"count": 1,
"self": 0.11368782500005636
},
"TrainerController.start_learning": {
"total": 2417.229251303,
"count": 1,
"self": 4.422722911033816,
"children": {
"TrainerController._reset_env": {
"total": 11.79196738799999,
"count": 1,
"self": 11.79196738799999
},
"TrainerController.advance": {
"total": 2400.9010010929665,
"count": 232521,
"self": 4.680268666048505,
"children": {
"env_step": {
"total": 1862.438380672955,
"count": 232521,
"self": 1559.4846295099155,
"children": {
"SubprocessEnvManager._take_step": {
"total": 300.1641278789732,
"count": 232521,
"self": 16.17107906701972,
"children": {
"TorchPolicy.evaluate": {
"total": 283.9930488119535,
"count": 223044,
"self": 71.03541133000431,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.95763748194918,
"count": 223044,
"self": 212.95763748194918
}
}
}
}
},
"workers": {
"total": 2.789623284066238,
"count": 232521,
"self": 0.0,
"children": {
"worker_root": {
"total": 2408.8464433280437,
"count": 232521,
"is_parallel": true,
"self": 1144.8046082830956,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001024701999995159,
"count": 1,
"is_parallel": true,
"self": 0.0004250349999210812,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005996670000740778,
"count": 2,
"is_parallel": true,
"self": 0.0005996670000740778
}
}
},
"UnityEnvironment.step": {
"total": 0.05008840900006817,
"count": 1,
"is_parallel": true,
"self": 0.00033261900011893886,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008509069999718122,
"count": 1,
"is_parallel": true,
"self": 0.0008509069999718122
},
"communicator.exchange": {
"total": 0.04822811400003957,
"count": 1,
"is_parallel": true,
"self": 0.04822811400003957
},
"steps_from_proto": {
"total": 0.000676768999937849,
"count": 1,
"is_parallel": true,
"self": 0.00022378799997113674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045298099996671226,
"count": 2,
"is_parallel": true,
"self": 0.00045298099996671226
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1264.0418350449481,
"count": 232520,
"is_parallel": true,
"self": 39.15571842807117,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.38459842393104,
"count": 232520,
"is_parallel": true,
"self": 80.38459842393104
},
"communicator.exchange": {
"total": 1051.9524396449665,
"count": 232520,
"is_parallel": true,
"self": 1051.9524396449665
},
"steps_from_proto": {
"total": 92.54907854797943,
"count": 232520,
"is_parallel": true,
"self": 37.41158595104571,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.137492596933726,
"count": 465040,
"is_parallel": true,
"self": 55.137492596933726
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 533.7823517539629,
"count": 232521,
"self": 6.591662118984004,
"children": {
"process_trajectory": {
"total": 165.26641318697796,
"count": 232521,
"self": 163.9084625179779,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3579506690000471,
"count": 10,
"self": 1.3579506690000471
}
}
},
"_update_policy": {
"total": 361.9242764480009,
"count": 97,
"self": 303.87216813399857,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.05210831400234,
"count": 2910,
"self": 58.05210831400234
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3429998944047838e-06,
"count": 1,
"self": 1.3429998944047838e-06
},
"TrainerController._save_models": {
"total": 0.1135585679999167,
"count": 1,
"self": 0.0021482749998540385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11141029300006267,
"count": 1,
"self": 0.11141029300006267
}
}
}
}
}
}
}