SkyR's picture
First Push
024f179
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6968308091163635,
"min": 0.6968308091163635,
"max": 2.8665218353271484,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6615.0146484375,
"min": 6615.0146484375,
"max": 29608.3046875,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.335993766784668,
"min": 0.41713541746139526,
"max": 13.335993766784668,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2587.182861328125,
"min": 80.92427062988281,
"max": 2725.48974609375,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06497402107119092,
"min": 0.06218711691155263,
"max": 0.07531185024637643,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2598960842847637,
"min": 0.24874846764621053,
"max": 0.35830930280773077,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.200930181160277,
"min": 0.11266189387214243,
"max": 0.2774748248504657,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.803720724641108,
"min": 0.4506475754885697,
"max": 1.3458630308216692,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.511778084800001e-06,
"min": 6.511778084800001e-06,
"max": 0.0003326396821648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6047112339200005e-05,
"min": 2.6047112339200005e-05,
"max": 0.001595878430624,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191520000000001,
"min": 0.10191520000000001,
"max": 0.19783520000000002,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40766080000000005,
"min": 0.40766080000000005,
"max": 0.9693760000000002,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.022727272727273,
"min": 3.090909090909091,
"max": 26.477272727272727,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1145.0,
"min": 136.0,
"max": 1443.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.022727272727273,
"min": 3.090909090909091,
"max": 26.477272727272727,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1145.0,
"min": 136.0,
"max": 1443.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702910364",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702910972"
},
"total": 608.0418634260001,
"count": 1,
"self": 0.4394031660001474,
"children": {
"run_training.setup": {
"total": 0.07340925400001197,
"count": 1,
"self": 0.07340925400001197
},
"TrainerController.start_learning": {
"total": 607.5290510059999,
"count": 1,
"self": 0.8238504159988906,
"children": {
"TrainerController._reset_env": {
"total": 3.553978454000003,
"count": 1,
"self": 3.553978454000003
},
"TrainerController.advance": {
"total": 603.045336903001,
"count": 22735,
"self": 0.4086771860213503,
"children": {
"env_step": {
"total": 602.6366597169797,
"count": 22735,
"self": 398.7339570599146,
"children": {
"SubprocessEnvManager._take_step": {
"total": 203.49776054303175,
"count": 22735,
"self": 2.0326982810503296,
"children": {
"TorchPolicy.evaluate": {
"total": 201.46506226198142,
"count": 22735,
"self": 201.46506226198142
}
}
},
"workers": {
"total": 0.40494211403336067,
"count": 22735,
"self": 0.0,
"children": {
"worker_root": {
"total": 605.7994984830333,
"count": 22735,
"is_parallel": true,
"self": 297.90900954105393,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006002784000088468,
"count": 1,
"is_parallel": true,
"self": 0.0038887319999503234,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021140520001381446,
"count": 10,
"is_parallel": true,
"self": 0.0021140520001381446
}
}
},
"UnityEnvironment.step": {
"total": 0.039690434000021924,
"count": 1,
"is_parallel": true,
"self": 0.0006423990000712365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042490500004532805,
"count": 1,
"is_parallel": true,
"self": 0.00042490500004532805
},
"communicator.exchange": {
"total": 0.036603379999974095,
"count": 1,
"is_parallel": true,
"self": 0.036603379999974095
},
"steps_from_proto": {
"total": 0.002019749999931264,
"count": 1,
"is_parallel": true,
"self": 0.00040582600013294723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016139239997983168,
"count": 10,
"is_parallel": true,
"self": 0.0016139239997983168
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 307.89048894197936,
"count": 22734,
"is_parallel": true,
"self": 14.345737054994174,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.3258013949957785,
"count": 22734,
"is_parallel": true,
"self": 7.3258013949957785
},
"communicator.exchange": {
"total": 239.2748470600012,
"count": 22734,
"is_parallel": true,
"self": 239.2748470600012
},
"steps_from_proto": {
"total": 46.9441034319882,
"count": 22734,
"is_parallel": true,
"self": 8.850858159946142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.093245272042054,
"count": 227340,
"is_parallel": true,
"self": 38.093245272042054
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.683099993802898e-05,
"count": 1,
"self": 5.683099993802898e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 595.3291868080229,
"count": 901638,
"is_parallel": true,
"self": 20.627983046114537,
"children": {
"process_trajectory": {
"total": 327.7954353249081,
"count": 901638,
"is_parallel": true,
"self": 326.84917588690803,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9462594380000837,
"count": 5,
"is_parallel": true,
"self": 0.9462594380000837
}
}
},
"_update_policy": {
"total": 246.90576843700023,
"count": 113,
"is_parallel": true,
"self": 77.42877844200586,
"children": {
"TorchPPOOptimizer.update": {
"total": 169.47698999499437,
"count": 5760,
"is_parallel": true,
"self": 169.47698999499437
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10582840200004284,
"count": 1,
"self": 0.001000459000124465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10482794299991838,
"count": 1,
"self": 0.10482794299991838
}
}
}
}
}
}
}