ppo-Huggy / run_logs /timers.json
ARandomFrenchDev's picture
Huggy
c096aba
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.398516058921814,
"min": 1.3985105752944946,
"max": 1.4268105030059814,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70232.078125,
"min": 66913.890625,
"max": 79244.0546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.18536585365854,
"min": 74.18345864661654,
"max": 390.390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48699.0,
"min": 48699.0,
"max": 50064.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49778.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49778.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5225372314453125,
"min": 0.050549525767564774,
"max": 2.528977632522583,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1551.3603515625,
"min": 6.419789791107178,
"max": 1633.719482421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9421597402270248,
"min": 1.7225280267985787,
"max": 4.042849699363989,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2424.42824023962,
"min": 218.7610594034195,
"max": 2563.208613038063,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9421597402270248,
"min": 1.7225280267985787,
"max": 4.042849699363989,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2424.42824023962,
"min": 218.7610594034195,
"max": 2563.208613038063,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017662648834996412,
"min": 0.01389119212011186,
"max": 0.019213291721987644,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05298794650498924,
"min": 0.029432639766794937,
"max": 0.05763987516596293,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0571221864885754,
"min": 0.0234082301457723,
"max": 0.06248702257871628,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1713665594657262,
"min": 0.0468164602915446,
"max": 0.18181175080438455,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.529098823666672e-06,
"min": 3.529098823666672e-06,
"max": 0.0002952983265672249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0587296471000016e-05,
"min": 1.0587296471000016e-05,
"max": 0.0008435739188086998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10117633333333335,
"min": 0.10117633333333335,
"max": 0.19843277499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30352900000000005,
"min": 0.20748200000000003,
"max": 0.5811913000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.86990333333334e-05,
"min": 6.86990333333334e-05,
"max": 0.004921795472500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002060971000000002,
"min": 0.0002060971000000002,
"max": 0.014061445869999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673736474",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673738960"
},
"total": 2486.046484846,
"count": 1,
"self": 0.3911573070004124,
"children": {
"run_training.setup": {
"total": 0.11616724600003181,
"count": 1,
"self": 0.11616724600003181
},
"TrainerController.start_learning": {
"total": 2485.5391602929994,
"count": 1,
"self": 4.415968041984343,
"children": {
"TrainerController._reset_env": {
"total": 10.555026836000025,
"count": 1,
"self": 10.555026836000025
},
"TrainerController.advance": {
"total": 2470.441513237015,
"count": 233503,
"self": 4.701316466991557,
"children": {
"env_step": {
"total": 1968.0350050619722,
"count": 233503,
"self": 1653.7066011051327,
"children": {
"SubprocessEnvManager._take_step": {
"total": 311.3281819028996,
"count": 233503,
"self": 16.0967619759665,
"children": {
"TorchPolicy.evaluate": {
"total": 295.2314199269331,
"count": 222992,
"self": 72.80641022088003,
"children": {
"TorchPolicy.sample_actions": {
"total": 222.42500970605306,
"count": 222992,
"self": 222.42500970605306
}
}
}
}
},
"workers": {
"total": 3.0002220539399787,
"count": 233503,
"self": 0.0,
"children": {
"worker_root": {
"total": 2475.1164289979174,
"count": 233503,
"is_parallel": true,
"self": 1116.134599872893,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021692870000151743,
"count": 1,
"is_parallel": true,
"self": 0.00036626499991143646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018030220001037378,
"count": 2,
"is_parallel": true,
"self": 0.0018030220001037378
}
}
},
"UnityEnvironment.step": {
"total": 0.053948962000049505,
"count": 1,
"is_parallel": true,
"self": 0.00031438099995284574,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019343799999660405,
"count": 1,
"is_parallel": true,
"self": 0.00019343799999660405
},
"communicator.exchange": {
"total": 0.052266565000081755,
"count": 1,
"is_parallel": true,
"self": 0.052266565000081755
},
"steps_from_proto": {
"total": 0.0011745780000183004,
"count": 1,
"is_parallel": true,
"self": 0.0003153300001486059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008592479998696945,
"count": 2,
"is_parallel": true,
"self": 0.0008592479998696945
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1358.9818291250244,
"count": 233502,
"is_parallel": true,
"self": 38.286310060962705,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.7128618830759,
"count": 233502,
"is_parallel": true,
"self": 89.7128618830759
},
"communicator.exchange": {
"total": 1121.5153698410215,
"count": 233502,
"is_parallel": true,
"self": 1121.5153698410215
},
"steps_from_proto": {
"total": 109.46728733996429,
"count": 233502,
"is_parallel": true,
"self": 46.173763668835136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.29352367112915,
"count": 467004,
"is_parallel": true,
"self": 63.29352367112915
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 497.70519170805096,
"count": 233503,
"self": 6.752518145115573,
"children": {
"process_trajectory": {
"total": 169.68676881993383,
"count": 233503,
"self": 168.2812220279352,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4055467919986313,
"count": 10,
"self": 1.4055467919986313
}
}
},
"_update_policy": {
"total": 321.26590474300156,
"count": 97,
"self": 266.46990102699533,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.796003716006226,
"count": 2910,
"self": 54.796003716006226
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.439992598141544e-07,
"count": 1,
"self": 8.439992598141544e-07
},
"TrainerController._save_models": {
"total": 0.12665133400059858,
"count": 1,
"self": 0.0019500540001899935,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12470128000040859,
"count": 1,
"self": 0.12470128000040859
}
}
}
}
}
}
}