SnowballTarget / run_logs /timers.json
tivaldo's picture
First Push
62555db verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6146581768989563,
"min": 0.4877191185951233,
"max": 2.8522908687591553,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6301.4755859375,
"min": 4384.46875,
"max": 29653.296875,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.084091186523438,
"min": 0.37808966636657715,
"max": 14.18260383605957,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1380.240966796875,
"min": 36.67469787597656,
"max": 1453.4964599609375,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.711111111111112,
"min": 3.522727272727273,
"max": 27.884615384615383,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1247.0,
"min": 155.0,
"max": 1521.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.711111111111112,
"min": 3.522727272727273,
"max": 27.884615384615383,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1247.0,
"min": 155.0,
"max": 1521.0,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07095538789187283,
"min": 0.06114683207261431,
"max": 0.07731391892622352,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.35477693945936417,
"min": 0.2449065320953891,
"max": 0.3865695946311176,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15892484836718615,
"min": 0.13363706204565423,
"max": 0.28440946625436053,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7946242418359307,
"min": 0.5345482481826169,
"max": 1.3627852429361904,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6800994400000014e-06,
"min": 1.6800994400000014e-06,
"max": 0.00029835000055,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.400497200000007e-06,
"min": 8.400497200000007e-06,
"max": 0.0014769000076999999,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10056000000000001,
"min": 0.10056000000000001,
"max": 0.19945000000000002,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5028,
"min": 0.4062,
"max": 0.9923000000000001,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.794400000000003e-05,
"min": 3.794400000000003e-05,
"max": 0.004972555,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00018972000000000016,
"min": 0.00018972000000000016,
"max": 0.02461577,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710687100",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710689300"
},
"total": 2199.3389862429995,
"count": 1,
"self": 0.4343201570000019,
"children": {
"run_training.setup": {
"total": 0.09340444699955697,
"count": 1,
"self": 0.09340444699955697
},
"TrainerController.start_learning": {
"total": 2198.811261639,
"count": 1,
"self": 2.792321032158725,
"children": {
"TrainerController._reset_env": {
"total": 2.0872996520001834,
"count": 1,
"self": 2.0872996520001834
},
"TrainerController.advance": {
"total": 2193.8337521108406,
"count": 90940,
"self": 1.4391148969034475,
"children": {
"env_step": {
"total": 2192.394637213937,
"count": 90940,
"self": 1371.5250379679637,
"children": {
"SubprocessEnvManager._take_step": {
"total": 819.2216098889267,
"count": 90940,
"self": 7.535240109797087,
"children": {
"TorchPolicy.evaluate": {
"total": 811.6863697791296,
"count": 90940,
"self": 811.6863697791296
}
}
},
"workers": {
"total": 1.6479893570467539,
"count": 90940,
"self": 0.0,
"children": {
"worker_root": {
"total": 2193.03557291,
"count": 90940,
"is_parallel": true,
"self": 1051.0183055737534,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002191622000282223,
"count": 1,
"is_parallel": true,
"self": 0.0006575389988938696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015340830013883533,
"count": 10,
"is_parallel": true,
"self": 0.0015340830013883533
}
}
},
"UnityEnvironment.step": {
"total": 0.03989202299999306,
"count": 1,
"is_parallel": true,
"self": 0.0008064760004344862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004623770000762306,
"count": 1,
"is_parallel": true,
"self": 0.0004623770000762306
},
"communicator.exchange": {
"total": 0.03618441399976291,
"count": 1,
"is_parallel": true,
"self": 0.03618441399976291
},
"steps_from_proto": {
"total": 0.002438755999719433,
"count": 1,
"is_parallel": true,
"self": 0.00046011699851078447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019786390012086486,
"count": 10,
"is_parallel": true,
"self": 0.0019786390012086486
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1142.0172673362467,
"count": 90939,
"is_parallel": true,
"self": 51.632565194197014,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.765041421186652,
"count": 90939,
"is_parallel": true,
"self": 27.765041421186652
},
"communicator.exchange": {
"total": 888.4825358559583,
"count": 90939,
"is_parallel": true,
"self": 888.4825358559583
},
"steps_from_proto": {
"total": 174.13712486490476,
"count": 90939,
"is_parallel": true,
"self": 33.27397351507261,
"children": {
"_process_rank_one_or_two_observation": {
"total": 140.86315134983215,
"count": 909390,
"is_parallel": true,
"self": 140.86315134983215
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002993830003106268,
"count": 1,
"self": 0.0002993830003106268,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 2171.239855954347,
"count": 2684027,
"is_parallel": true,
"self": 61.29481332234809,
"children": {
"process_trajectory": {
"total": 992.4873533459922,
"count": 2684027,
"is_parallel": true,
"self": 989.7876090849923,
"children": {
"RLTrainer._checkpoint": {
"total": 2.699744260999978,
"count": 20,
"is_parallel": true,
"self": 2.699744260999978
}
}
},
"_update_policy": {
"total": 1117.4576892860068,
"count": 454,
"is_parallel": true,
"self": 277.859193023015,
"children": {
"TorchPPOOptimizer.update": {
"total": 839.5984962629918,
"count": 23139,
"is_parallel": true,
"self": 839.5984962629918
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0975894610000978,
"count": 1,
"self": 0.0013217030000305385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09626775800006726,
"count": 1,
"self": 0.09626775800006726
}
}
}
}
}
}
}