ppo-Huggy / run_logs /timers.json
stinoco's picture
Huggy
26356ae
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404396414756775,
"min": 1.404396414756775,
"max": 1.4258482456207275,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69486.7265625,
"min": 67715.8203125,
"max": 77362.7421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 122.55061728395061,
"min": 94.47227533460803,
"max": 409.38524590163934,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49633.0,
"min": 48868.0,
"max": 50250.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49641.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49641.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3241357803344727,
"min": 0.08291309326887131,
"max": 2.402446985244751,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 941.2750244140625,
"min": 10.03248405456543,
"max": 1227.0843505859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5403750512335037,
"min": 1.9657032477461602,
"max": 3.922117276649999,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1433.851895749569,
"min": 237.85009297728539,
"max": 1917.4316394925117,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5403750512335037,
"min": 1.9657032477461602,
"max": 3.922117276649999,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1433.851895749569,
"min": 237.85009297728539,
"max": 1917.4316394925117,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017841476676039747,
"min": 0.013698274325967456,
"max": 0.020117781956731858,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.035682953352079494,
"min": 0.027396548651934912,
"max": 0.06035334587019557,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04422835173706213,
"min": 0.023145649085442227,
"max": 0.057467390907307465,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08845670347412427,
"min": 0.046291298170884454,
"max": 0.1674029848227898,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.477898507399998e-06,
"min": 4.477898507399998e-06,
"max": 0.0002953740765419749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.955797014799997e-06,
"min": 8.955797014799997e-06,
"max": 0.0008443794185401999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1014926,
"min": 0.1014926,
"max": 0.19845802499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2029852,
"min": 0.2029852,
"max": 0.5814598000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.448073999999997e-05,
"min": 8.448073999999997e-05,
"max": 0.004923055447500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016896147999999995,
"min": 0.00016896147999999995,
"max": 0.01407484402,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675189350",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675191584"
},
"total": 2234.365482131,
"count": 1,
"self": 0.39587949600036154,
"children": {
"run_training.setup": {
"total": 0.10249701999998706,
"count": 1,
"self": 0.10249701999998706
},
"TrainerController.start_learning": {
"total": 2233.8671056149997,
"count": 1,
"self": 4.068433431102221,
"children": {
"TrainerController._reset_env": {
"total": 10.343042406999984,
"count": 1,
"self": 10.343042406999984
},
"TrainerController.advance": {
"total": 2219.350670593897,
"count": 231213,
"self": 4.283122819774235,
"children": {
"env_step": {
"total": 1767.0473897910324,
"count": 231213,
"self": 1482.2508546340612,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.23454217298234,
"count": 231213,
"self": 14.782793667925944,
"children": {
"TorchPolicy.evaluate": {
"total": 267.4517485050564,
"count": 222960,
"self": 66.31291305304,
"children": {
"TorchPolicy.sample_actions": {
"total": 201.1388354520164,
"count": 222960,
"self": 201.1388354520164
}
}
}
}
},
"workers": {
"total": 2.5619929839889437,
"count": 231213,
"self": 0.0,
"children": {
"worker_root": {
"total": 2225.9079090720484,
"count": 231213,
"is_parallel": true,
"self": 1008.9452911860994,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016925959999980478,
"count": 1,
"is_parallel": true,
"self": 0.0003191680000327324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013734279999653154,
"count": 2,
"is_parallel": true,
"self": 0.0013734279999653154
}
}
},
"UnityEnvironment.step": {
"total": 0.029897363999964455,
"count": 1,
"is_parallel": true,
"self": 0.00027049999994233076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020496199999797682,
"count": 1,
"is_parallel": true,
"self": 0.00020496199999797682
},
"communicator.exchange": {
"total": 0.028550626000026114,
"count": 1,
"is_parallel": true,
"self": 0.028550626000026114
},
"steps_from_proto": {
"total": 0.0008712759999980335,
"count": 1,
"is_parallel": true,
"self": 0.0003975390000050538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004737369999929797,
"count": 2,
"is_parallel": true,
"self": 0.0004737369999929797
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1216.962617885949,
"count": 231212,
"is_parallel": true,
"self": 34.09105130879925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.85015904206352,
"count": 231212,
"is_parallel": true,
"self": 79.85015904206352
},
"communicator.exchange": {
"total": 1009.7330422309941,
"count": 231212,
"is_parallel": true,
"self": 1009.7330422309941
},
"steps_from_proto": {
"total": 93.28836530409222,
"count": 231212,
"is_parallel": true,
"self": 40.649436895204474,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.63892840888775,
"count": 462424,
"is_parallel": true,
"self": 52.63892840888775
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 448.02015798309054,
"count": 231213,
"self": 6.501406786093185,
"children": {
"process_trajectory": {
"total": 140.71479882599652,
"count": 231213,
"self": 139.62860828399624,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0861905420002813,
"count": 10,
"self": 1.0861905420002813
}
}
},
"_update_policy": {
"total": 300.80395237100083,
"count": 96,
"self": 248.61343861000222,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.19051376099861,
"count": 2880,
"self": 52.19051376099861
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.870000212686136e-07,
"count": 1,
"self": 9.870000212686136e-07
},
"TrainerController._save_models": {
"total": 0.10495819600009781,
"count": 1,
"self": 0.002614564999930735,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10234363100016708,
"count": 1,
"self": 0.10234363100016708
}
}
}
}
}
}
}