ppo-Huggy / run_logs /timers.json
rkdan's picture
Huggy
9964dc5
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4192925691604614,
"min": 1.4189573526382446,
"max": 1.4192925691604614,
"count": 2
},
"Huggy.Policy.Entropy.sum": {
"value": 70548.7734375,
"min": 70548.7734375,
"max": 77645.34375,
"count": 2
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 377.4961832061069,
"min": 377.4961832061069,
"max": 419.5083333333333,
"count": 2
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49452.0,
"min": 49452.0,
"max": 50341.0,
"count": 2
},
"Huggy.Step.mean": {
"value": 99823.0,
"min": 49800.0,
"max": 99823.0,
"count": 2
},
"Huggy.Step.sum": {
"value": 99823.0,
"min": 49800.0,
"max": 99823.0,
"count": 2
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2653907537460327,
"min": 0.035852763801813126,
"max": 0.2653907537460327,
"count": 2
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 34.76618957519531,
"min": 4.266479015350342,
"max": 34.76618957519531,
"count": 2
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 2.277547647479836,
"min": 1.8376906579282104,
"max": 2.277547647479836,
"count": 2
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 298.35874181985855,
"min": 218.68518829345703,
"max": 298.35874181985855,
"count": 2
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 2.277547647479836,
"min": 1.8376906579282104,
"max": 2.277547647479836,
"count": 2
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 298.35874181985855,
"min": 218.68518829345703,
"max": 298.35874181985855,
"count": 2
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01623708935561202,
"min": 0.015788526377097392,
"max": 0.01623708935561202,
"count": 2
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03247417871122404,
"min": 0.031577052754194784,
"max": 0.03247417871122404,
"count": 2
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.02369483799363176,
"min": 0.02369483799363176,
"max": 0.029367857612669468,
"count": 2
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.04738967598726352,
"min": 0.04738967598726352,
"max": 0.058735715225338936,
"count": 2
},
"Huggy.Policy.LearningRate.mean": {
"value": 8.047507317500001e-05,
"min": 8.047507317500001e-05,
"max": 0.00020589903136699998,
"count": 2
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.00016095014635000003,
"min": 0.00016095014635000003,
"max": 0.00041179806273399996,
"count": 2
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.126825,
"min": 0.126825,
"max": 0.16863300000000003,
"count": 2
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.25365,
"min": 0.25365,
"max": 0.33726600000000007,
"count": 2
},
"Huggy.Policy.Beta.mean": {
"value": 0.0013485675000000003,
"min": 0.0013485675000000003,
"max": 0.0034347866999999994,
"count": 2
},
"Huggy.Policy.Beta.sum": {
"value": 0.0026971350000000007,
"min": 0.0026971350000000007,
"max": 0.006869573399999999,
"count": 2
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676911634",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676911763"
},
"total": 128.36526629600007,
"count": 1,
"self": 0.7437073400000145,
"children": {
"run_training.setup": {
"total": 0.11072621599998911,
"count": 1,
"self": 0.11072621599998911
},
"TrainerController.start_learning": {
"total": 127.51083274000007,
"count": 1,
"self": 0.21089707400585667,
"children": {
"TrainerController._reset_env": {
"total": 10.794324095999968,
"count": 1,
"self": 10.794324095999968
},
"TrainerController.advance": {
"total": 116.33492363999426,
"count": 11732,
"self": 0.22509698098724584,
"children": {
"env_step": {
"total": 95.00165784000171,
"count": 11732,
"self": 79.32100260701054,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15.543974981001497,
"count": 11732,
"self": 0.8086707240139503,
"children": {
"TorchPolicy.evaluate": {
"total": 14.735304256987547,
"count": 11608,
"self": 3.6987239859988676,
"children": {
"TorchPolicy.sample_actions": {
"total": 11.03658027098868,
"count": 11608,
"self": 11.03658027098868
}
}
}
}
},
"workers": {
"total": 0.1366802519896737,
"count": 11732,
"self": 0.0,
"children": {
"worker_root": {
"total": 126.81751701299845,
"count": 11732,
"is_parallel": true,
"self": 62.65507045099503,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004255271999909382,
"count": 1,
"is_parallel": true,
"self": 0.00040766599977359874,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0038476060001357837,
"count": 2,
"is_parallel": true,
"self": 0.0038476060001357837
}
}
},
"UnityEnvironment.step": {
"total": 0.02893217400003323,
"count": 1,
"is_parallel": true,
"self": 0.00032001199997466756,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020093299997370195,
"count": 1,
"is_parallel": true,
"self": 0.00020093299997370195
},
"communicator.exchange": {
"total": 0.027491258000054586,
"count": 1,
"is_parallel": true,
"self": 0.027491258000054586
},
"steps_from_proto": {
"total": 0.000919971000030273,
"count": 1,
"is_parallel": true,
"self": 0.00045022000006156304,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046975099996870995,
"count": 2,
"is_parallel": true,
"self": 0.00046975099996870995
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 64.16244656200342,
"count": 11731,
"is_parallel": true,
"self": 1.9508964089982328,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.129928329012387,
"count": 11731,
"is_parallel": true,
"self": 4.129928329012387
},
"communicator.exchange": {
"total": 53.38087729799679,
"count": 11731,
"is_parallel": true,
"self": 53.38087729799679
},
"steps_from_proto": {
"total": 4.700744525996015,
"count": 11731,
"is_parallel": true,
"self": 1.9167157389957765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.7840287870002385,
"count": 23462,
"is_parallel": true,
"self": 2.7840287870002385
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 21.1081688190053,
"count": 11732,
"self": 0.3386048189976236,
"children": {
"process_trajectory": {
"total": 6.545452120007667,
"count": 11732,
"self": 6.204706182007612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3407459380000546,
"count": 2,
"self": 0.3407459380000546
}
}
},
"_update_policy": {
"total": 14.22411188000001,
"count": 4,
"self": 11.869653206999146,
"children": {
"TorchPPOOptimizer.update": {
"total": 2.354458673000863,
"count": 120,
"self": 2.354458673000863
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4039999314263696e-06,
"count": 1,
"self": 1.4039999314263696e-06
},
"TrainerController._save_models": {
"total": 0.17068652600005407,
"count": 1,
"self": 0.002852254000117682,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1678342719999364,
"count": 1,
"self": 0.1678342719999364
}
}
}
}
}
}
}