ppo-Huggy / run_logs /timers.json
ckauth's picture
Huggy
58f8a4e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3997420072555542,
"min": 1.3997420072555542,
"max": 1.4277914762496948,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69032.4765625,
"min": 69032.4765625,
"max": 75518.53125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.86115992970123,
"min": 74.00892857142857,
"max": 446.4107142857143,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49424.0,
"min": 48849.0,
"max": 49998.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49540.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49540.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.512362241744995,
"min": 0.088888019323349,
"max": 2.512362241744995,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1429.5341796875,
"min": 9.866570472717285,
"max": 1631.3372802734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.930707035563323,
"min": 1.8628865346178278,
"max": 4.008824000460874,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2236.572303235531,
"min": 206.7804053425789,
"max": 2560.7785569429398,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.930707035563323,
"min": 1.8628865346178278,
"max": 4.008824000460874,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2236.572303235531,
"min": 206.7804053425789,
"max": 2560.7785569429398,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.02034651902633616,
"min": 0.013837491839270417,
"max": 0.02034651902633616,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.061039557079008475,
"min": 0.027674983678540835,
"max": 0.061039557079008475,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05179400046666463,
"min": 0.022187011657903592,
"max": 0.06294712691257397,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1553820013999939,
"min": 0.044374023315807185,
"max": 0.179897952824831,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5261488246499924e-06,
"min": 3.5261488246499924e-06,
"max": 0.00029527822657392493,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0578446473949977e-05,
"min": 1.0578446473949977e-05,
"max": 0.0008438826187058,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117535,
"min": 0.10117535,
"max": 0.198426075,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30352605,
"min": 0.20748390000000005,
"max": 0.5812942000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.864996499999987e-05,
"min": 6.864996499999987e-05,
"max": 0.0049214611425,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002059498949999996,
"min": 0.0002059498949999996,
"max": 0.014066580580000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672308345",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672310481"
},
"total": 2136.017017103,
"count": 1,
"self": 0.39293413999985205,
"children": {
"run_training.setup": {
"total": 0.11196534399999791,
"count": 1,
"self": 0.11196534399999791
},
"TrainerController.start_learning": {
"total": 2135.512117619,
"count": 1,
"self": 3.7653862390188806,
"children": {
"TrainerController._reset_env": {
"total": 8.736138181000001,
"count": 1,
"self": 8.736138181000001
},
"TrainerController.advance": {
"total": 2122.8951522739817,
"count": 232829,
"self": 4.02795279897191,
"children": {
"env_step": {
"total": 1659.7709575440015,
"count": 232829,
"self": 1390.802914531001,
"children": {
"SubprocessEnvManager._take_step": {
"total": 266.52465127103665,
"count": 232829,
"self": 13.967850893090372,
"children": {
"TorchPolicy.evaluate": {
"total": 252.55680037794627,
"count": 222944,
"self": 63.22417059796027,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.332629779986,
"count": 222944,
"self": 189.332629779986
}
}
}
}
},
"workers": {
"total": 2.443391741963751,
"count": 232829,
"self": 0.0,
"children": {
"worker_root": {
"total": 2128.115646490002,
"count": 232829,
"is_parallel": true,
"self": 984.6375043678909,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017907459999833009,
"count": 1,
"is_parallel": true,
"self": 0.0003276520000099481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014630939999733528,
"count": 2,
"is_parallel": true,
"self": 0.0014630939999733528
}
}
},
"UnityEnvironment.step": {
"total": 0.026715476999982002,
"count": 1,
"is_parallel": true,
"self": 0.0002653349999945931,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021266999999625114,
"count": 1,
"is_parallel": true,
"self": 0.00021266999999625114
},
"communicator.exchange": {
"total": 0.02553240200001028,
"count": 1,
"is_parallel": true,
"self": 0.02553240200001028
},
"steps_from_proto": {
"total": 0.0007050699999808785,
"count": 1,
"is_parallel": true,
"self": 0.00023332699998945827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004717429999914202,
"count": 2,
"is_parallel": true,
"self": 0.0004717429999914202
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1143.478142122111,
"count": 232828,
"is_parallel": true,
"self": 33.65428250519585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.78489737099005,
"count": 232828,
"is_parallel": true,
"self": 73.78489737099005
},
"communicator.exchange": {
"total": 945.83783718793,
"count": 232828,
"is_parallel": true,
"self": 945.83783718793
},
"steps_from_proto": {
"total": 90.20112505799517,
"count": 232828,
"is_parallel": true,
"self": 37.25827590892396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.94284914907121,
"count": 465656,
"is_parallel": true,
"self": 52.94284914907121
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 459.09624193100836,
"count": 232829,
"self": 5.661710064001284,
"children": {
"process_trajectory": {
"total": 147.15598347500622,
"count": 232829,
"self": 145.8487761520064,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3072073229998296,
"count": 10,
"self": 1.3072073229998296
}
}
},
"_update_policy": {
"total": 306.27854839200086,
"count": 97,
"self": 254.01059359199627,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.26795480000459,
"count": 2910,
"self": 52.26795480000459
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.479996722599026e-07,
"count": 1,
"self": 8.479996722599026e-07
},
"TrainerController._save_models": {
"total": 0.11544007699967551,
"count": 1,
"self": 0.002011953999499383,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11342812300017613,
"count": 1,
"self": 0.11342812300017613
}
}
}
}
}
}
}