ppo-Huggy / run_logs /timers.json
MarshallPF's picture
Huggy
9a91a27
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.401292324066162,
"min": 1.401292324066162,
"max": 1.4286161661148071,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68818.8671875,
"min": 67655.5,
"max": 79868.59375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.42633228840126,
"min": 72.91728212703102,
"max": 389.7578125,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49398.0,
"min": 48974.0,
"max": 50152.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49707.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49707.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.547128915786743,
"min": 0.0636599138379097,
"max": 2.547128915786743,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1625.0682373046875,
"min": 8.084809303283691,
"max": 1668.3896484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9358012830388955,
"min": 1.888439474847373,
"max": 4.0205978841256735,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2511.0412185788155,
"min": 239.83181330561638,
"max": 2649.4757629036903,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9358012830388955,
"min": 1.888439474847373,
"max": 4.0205978841256735,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2511.0412185788155,
"min": 239.83181330561638,
"max": 2649.4757629036903,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01593277262096914,
"min": 0.013474351874255162,
"max": 0.02042516729658625,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04779831786290742,
"min": 0.030074256869071787,
"max": 0.06020444165333174,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05671879715389675,
"min": 0.02365317111834884,
"max": 0.06101987318446239,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17015639146169026,
"min": 0.04730634223669768,
"max": 0.17655087125798066,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.1628986123999995e-06,
"min": 4.1628986123999995e-06,
"max": 0.00029538532653822507,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.2488695837199998e-05,
"min": 1.2488695837199998e-05,
"max": 0.0008445156184948,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10138759999999998,
"min": 0.10138759999999998,
"max": 0.19846177500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30416279999999996,
"min": 0.20790474999999997,
"max": 0.5815051999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.924124000000001e-05,
"min": 7.924124000000001e-05,
"max": 0.0049232425725,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00023772372000000004,
"min": 0.00023772372000000004,
"max": 0.014077109479999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679002405",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679004857"
},
"total": 2451.970111557,
"count": 1,
"self": 0.4401358430000073,
"children": {
"run_training.setup": {
"total": 0.10836237699999174,
"count": 1,
"self": 0.10836237699999174
},
"TrainerController.start_learning": {
"total": 2451.421613337,
"count": 1,
"self": 4.469856870987314,
"children": {
"TrainerController._reset_env": {
"total": 10.460980944,
"count": 1,
"self": 10.460980944
},
"TrainerController.advance": {
"total": 2436.371791384013,
"count": 233478,
"self": 4.778043876103766,
"children": {
"env_step": {
"total": 1886.02031315691,
"count": 233478,
"self": 1589.6784232379125,
"children": {
"SubprocessEnvManager._take_step": {
"total": 293.3011981479747,
"count": 233478,
"self": 17.086344834976217,
"children": {
"TorchPolicy.evaluate": {
"total": 276.21485331299846,
"count": 222890,
"self": 276.21485331299846
}
}
},
"workers": {
"total": 3.0406917710229493,
"count": 233478,
"self": 0.0,
"children": {
"worker_root": {
"total": 2443.3489275580287,
"count": 233478,
"is_parallel": true,
"self": 1148.4269296899956,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010726410000074793,
"count": 1,
"is_parallel": true,
"self": 0.0002749459999904502,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007976950000170291,
"count": 2,
"is_parallel": true,
"self": 0.0007976950000170291
}
}
},
"UnityEnvironment.step": {
"total": 0.030605282000010448,
"count": 1,
"is_parallel": true,
"self": 0.00033625599999709266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021591899999862108,
"count": 1,
"is_parallel": true,
"self": 0.00021591899999862108
},
"communicator.exchange": {
"total": 0.02933842100000561,
"count": 1,
"is_parallel": true,
"self": 0.02933842100000561
},
"steps_from_proto": {
"total": 0.0007146860000091237,
"count": 1,
"is_parallel": true,
"self": 0.00020985000003292953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005048359999761942,
"count": 2,
"is_parallel": true,
"self": 0.0005048359999761942
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1294.921997868033,
"count": 233477,
"is_parallel": true,
"self": 39.32465492503047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.4878871959865,
"count": 233477,
"is_parallel": true,
"self": 78.4878871959865
},
"communicator.exchange": {
"total": 1086.1086599360085,
"count": 233477,
"is_parallel": true,
"self": 1086.1086599360085
},
"steps_from_proto": {
"total": 91.00079581100752,
"count": 233477,
"is_parallel": true,
"self": 34.23681178499504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.76398402601248,
"count": 466954,
"is_parallel": true,
"self": 56.76398402601248
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 545.5734343509992,
"count": 233478,
"self": 6.856486341968662,
"children": {
"process_trajectory": {
"total": 153.57821845202986,
"count": 233478,
"self": 152.1822971080301,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3959213439997598,
"count": 10,
"self": 1.3959213439997598
}
}
},
"_update_policy": {
"total": 385.1387295570006,
"count": 97,
"self": 325.24759753101125,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.89113202598935,
"count": 2910,
"self": 59.89113202598935
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.940000952861737e-07,
"count": 1,
"self": 8.940000952861737e-07
},
"TrainerController._save_models": {
"total": 0.11898324399999183,
"count": 1,
"self": 0.0026609050000843126,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11632233899990752,
"count": 1,
"self": 0.11632233899990752
}
}
}
}
}
}
}