snowball / run_logs /timers.json
anders0204's picture
First Push
32c8272
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9645881056785583,
"min": 0.9645881056785583,
"max": 2.8631346225738525,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9231.1083984375,
"min": 9231.1083984375,
"max": 29321.361328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.710789680480957,
"min": 0.41503843665122986,
"max": 12.751395225524902,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2478.60400390625,
"min": 80.5174560546875,
"max": 2601.28466796875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06762708881095639,
"min": 0.06264751766624006,
"max": 0.0732921762825629,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27050835524382555,
"min": 0.25059007066496025,
"max": 0.358468279628284,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18999936343992457,
"min": 0.13599461400721624,
"max": 0.29469047808179666,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7599974537596983,
"min": 0.543978456028865,
"max": 1.4558063795169196,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.863636363636363,
"min": 4.136363636363637,
"max": 25.236363636363638,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1094.0,
"min": 182.0,
"max": 1388.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.863636363636363,
"min": 4.136363636363637,
"max": 25.236363636363638,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1094.0,
"min": 182.0,
"max": 1388.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695907805",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695908292"
},
"total": 487.81525632399996,
"count": 1,
"self": 0.4241840219999631,
"children": {
"run_training.setup": {
"total": 0.05448181899998872,
"count": 1,
"self": 0.05448181899998872
},
"TrainerController.start_learning": {
"total": 487.336590483,
"count": 1,
"self": 0.5503704469909962,
"children": {
"TrainerController._reset_env": {
"total": 4.933057276999989,
"count": 1,
"self": 4.933057276999989
},
"TrainerController.advance": {
"total": 481.707295400009,
"count": 18202,
"self": 0.26153175500553516,
"children": {
"env_step": {
"total": 481.44576364500347,
"count": 18202,
"self": 348.5702667549957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.59862181099595,
"count": 18202,
"self": 1.7356725590004771,
"children": {
"TorchPolicy.evaluate": {
"total": 130.86294925199547,
"count": 18202,
"self": 130.86294925199547
}
}
},
"workers": {
"total": 0.27687507901180197,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 485.74802399100497,
"count": 18202,
"is_parallel": true,
"self": 233.40045177801642,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005842251999979453,
"count": 1,
"is_parallel": true,
"self": 0.004330151999852205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015121000001272478,
"count": 10,
"is_parallel": true,
"self": 0.0015121000001272478
}
}
},
"UnityEnvironment.step": {
"total": 0.11493170599999303,
"count": 1,
"is_parallel": true,
"self": 0.0006491649999134097,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005090390000077605,
"count": 1,
"is_parallel": true,
"self": 0.0005090390000077605
},
"communicator.exchange": {
"total": 0.10680287800005317,
"count": 1,
"is_parallel": true,
"self": 0.10680287800005317
},
"steps_from_proto": {
"total": 0.006970624000018688,
"count": 1,
"is_parallel": true,
"self": 0.0027790719999529756,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0041915520000657125,
"count": 10,
"is_parallel": true,
"self": 0.0041915520000657125
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 252.34757221298855,
"count": 18201,
"is_parallel": true,
"self": 10.267069578972041,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.6640785680028785,
"count": 18201,
"is_parallel": true,
"self": 5.6640785680028785
},
"communicator.exchange": {
"total": 200.1329038630014,
"count": 18201,
"is_parallel": true,
"self": 200.1329038630014
},
"steps_from_proto": {
"total": 36.283520203012245,
"count": 18201,
"is_parallel": true,
"self": 6.713441203049911,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.570078999962334,
"count": 182010,
"is_parallel": true,
"self": 29.570078999962334
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010535100000197417,
"count": 1,
"self": 0.00010535100000197417,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 477.88723492595454,
"count": 457910,
"is_parallel": true,
"self": 10.239880598954528,
"children": {
"process_trajectory": {
"total": 260.480925028,
"count": 457910,
"is_parallel": true,
"self": 259.69715035900003,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7837746689999676,
"count": 4,
"is_parallel": true,
"self": 0.7837746689999676
}
}
},
"_update_policy": {
"total": 207.16642929900001,
"count": 90,
"is_parallel": true,
"self": 80.54774998400171,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.6186793149983,
"count": 4587,
"is_parallel": true,
"self": 126.6186793149983
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1457620080000197,
"count": 1,
"self": 0.0008865940000077899,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14487541400001192,
"count": 1,
"self": 0.14487541400001192
}
}
}
}
}
}
}