ppo-Huggy / run_logs /timers.json
vonewman's picture
Huggy
95810b6
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4069138765335083,
"min": 1.4069138765335083,
"max": 1.4301542043685913,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70698.828125,
"min": 69397.421875,
"max": 78425.9609375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 81.0622950819672,
"min": 76.9190031152648,
"max": 416.6666666666667,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49448.0,
"min": 49284.0,
"max": 50000.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999995.0,
"min": 49602.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999995.0,
"min": 49602.0,
"max": 1999995.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.471451997756958,
"min": -0.0006918192957527936,
"max": 2.493807792663574,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1507.585693359375,
"min": -0.0823264941573143,
"max": 1570.454833984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8221764665158067,
"min": 1.8437940884037178,
"max": 3.9931232571601867,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2331.527644574642,
"min": 219.41149652004242,
"max": 2441.9464238882065,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8221764665158067,
"min": 1.8437940884037178,
"max": 3.9931232571601867,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2331.527644574642,
"min": 219.41149652004242,
"max": 2441.9464238882065,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017462344790692443,
"min": 0.013314770137528992,
"max": 0.019159827921248505,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052387034372077326,
"min": 0.026896218375622997,
"max": 0.05657769696117612,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05878366861078474,
"min": 0.02369714795301358,
"max": 0.06317140944302083,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17635100583235422,
"min": 0.04739429590602716,
"max": 0.1888033871849378,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.396898867733334e-06,
"min": 3.396898867733334e-06,
"max": 0.00029529397656867496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0190696603200002e-05,
"min": 1.0190696603200002e-05,
"max": 0.0008436732187755999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113226666666668,
"min": 0.10113226666666668,
"max": 0.19843132500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033968,
"min": 0.20739400000000008,
"max": 0.5812244,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.650010666666666e-05,
"min": 6.650010666666666e-05,
"max": 0.0049217231174999996,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019950032,
"min": 0.00019950032,
"max": 0.014063097559999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674049091",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674051256"
},
"total": 2164.093119366,
"count": 1,
"self": 0.38804791699976704,
"children": {
"run_training.setup": {
"total": 0.1028983999999582,
"count": 1,
"self": 0.1028983999999582
},
"TrainerController.start_learning": {
"total": 2163.602173049,
"count": 1,
"self": 3.842059038854586,
"children": {
"TrainerController._reset_env": {
"total": 10.271913277000294,
"count": 1,
"self": 10.271913277000294
},
"TrainerController.advance": {
"total": 2149.377157611146,
"count": 232441,
"self": 4.058375131281537,
"children": {
"env_step": {
"total": 1691.1074169370377,
"count": 232441,
"self": 1421.5734430058574,
"children": {
"SubprocessEnvManager._take_step": {
"total": 267.07780354716306,
"count": 232441,
"self": 13.960088701244786,
"children": {
"TorchPolicy.evaluate": {
"total": 253.11771484591827,
"count": 222956,
"self": 64.02158151201502,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.09613333390325,
"count": 222956,
"self": 189.09613333390325
}
}
}
}
},
"workers": {
"total": 2.4561703840172413,
"count": 232441,
"self": 0.0,
"children": {
"worker_root": {
"total": 2156.0808455190295,
"count": 232441,
"is_parallel": true,
"self": 985.9513000360562,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020148230000813783,
"count": 1,
"is_parallel": true,
"self": 0.0003769780000766332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001637845000004745,
"count": 2,
"is_parallel": true,
"self": 0.001637845000004745
}
}
},
"UnityEnvironment.step": {
"total": 0.02778324300015811,
"count": 1,
"is_parallel": true,
"self": 0.0002767289997791522,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001783660000000964,
"count": 1,
"is_parallel": true,
"self": 0.0001783660000000964
},
"communicator.exchange": {
"total": 0.026601582000239432,
"count": 1,
"is_parallel": true,
"self": 0.026601582000239432
},
"steps_from_proto": {
"total": 0.0007265660001394281,
"count": 1,
"is_parallel": true,
"self": 0.00025003400014611543,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047653199999331264,
"count": 2,
"is_parallel": true,
"self": 0.00047653199999331264
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1170.1295454829733,
"count": 232440,
"is_parallel": true,
"self": 33.62624215798269,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.4170699449769,
"count": 232440,
"is_parallel": true,
"self": 73.4170699449769
},
"communicator.exchange": {
"total": 959.795428941914,
"count": 232440,
"is_parallel": true,
"self": 959.795428941914
},
"steps_from_proto": {
"total": 103.29080443809971,
"count": 232440,
"is_parallel": true,
"self": 37.30003960919021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.9907648289095,
"count": 464880,
"is_parallel": true,
"self": 65.9907648289095
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 454.21136554282657,
"count": 232441,
"self": 5.565885114936009,
"children": {
"process_trajectory": {
"total": 140.09636751188827,
"count": 232441,
"self": 138.9862188628872,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1101486490010757,
"count": 10,
"self": 1.1101486490010757
}
}
},
"_update_policy": {
"total": 308.5491129160023,
"count": 97,
"self": 256.0552555839963,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.49385733200597,
"count": 2910,
"self": 52.49385733200597
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.180999788630288e-06,
"count": 1,
"self": 1.180999788630288e-06
},
"TrainerController._save_models": {
"total": 0.1110419409997121,
"count": 1,
"self": 0.002105544000187365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10893639699952473,
"count": 1,
"self": 0.10893639699952473
}
}
}
}
}
}
}