jlse's picture
First Push
2a8fb3f verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3717280626296997,
"min": 1.3717280626296997,
"max": 2.8676395416259766,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13036.9033203125,
"min": 13036.9033203125,
"max": 29272.86328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.649423599243164,
"min": 0.1495904177427292,
"max": 9.649423599243164,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1881.6375732421875,
"min": 29.02054214477539,
"max": 1946.6973876953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.295454545454547,
"min": 3.227272727272727,
"max": 20.09090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 849.0,
"min": 142.0,
"max": 1078.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.295454545454547,
"min": 3.227272727272727,
"max": 20.09090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 849.0,
"min": 142.0,
"max": 1078.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.03317702602824226,
"min": 0.02634447612541003,
"max": 0.03677972554162677,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.09953107808472678,
"min": 0.06184022129690718,
"max": 0.11033917662488031,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2731594973140293,
"min": 0.1119703957367511,
"max": 0.32940030523708885,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8194784919420879,
"min": 0.2239407914735022,
"max": 0.9882009157112666,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.588097137333328e-06,
"min": 8.588097137333328e-06,
"max": 0.000291816002728,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.5764291411999985e-05,
"min": 2.5764291411999985e-05,
"max": 0.0008342640219119999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10286266666666667,
"min": 0.10286266666666667,
"max": 0.197272,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.30858800000000003,
"min": 0.21489199999999997,
"max": 0.578088,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00015284706666666664,
"min": 0.00015284706666666664,
"max": 0.0048638728,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004585411999999999,
"min": 0.0004585411999999999,
"max": 0.013906591199999999,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740128412",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740128897"
},
"total": 485.182162349,
"count": 1,
"self": 0.5459912420000137,
"children": {
"run_training.setup": {
"total": 0.027866448999986915,
"count": 1,
"self": 0.027866448999986915
},
"TrainerController.start_learning": {
"total": 484.608304658,
"count": 1,
"self": 0.5628418420023991,
"children": {
"TrainerController._reset_env": {
"total": 3.080970834000027,
"count": 1,
"self": 3.080970834000027
},
"TrainerController.advance": {
"total": 480.87996623199746,
"count": 18192,
"self": 0.5769784490045708,
"children": {
"env_step": {
"total": 355.8910466469992,
"count": 18192,
"self": 301.75084521699233,
"children": {
"SubprocessEnvManager._take_step": {
"total": 53.82256167201183,
"count": 18192,
"self": 1.6835121980154781,
"children": {
"TorchPolicy.evaluate": {
"total": 52.13904947399635,
"count": 18192,
"self": 52.13904947399635
}
}
},
"workers": {
"total": 0.31763975799503896,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 482.8035949769921,
"count": 18192,
"is_parallel": true,
"self": 220.22091446498877,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0066298360000018874,
"count": 1,
"is_parallel": true,
"self": 0.004819523000037407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018103129999644807,
"count": 10,
"is_parallel": true,
"self": 0.0018103129999644807
}
}
},
"UnityEnvironment.step": {
"total": 0.04327036000000817,
"count": 1,
"is_parallel": true,
"self": 0.0006834269999558273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000498587000038242,
"count": 1,
"is_parallel": true,
"self": 0.000498587000038242
},
"communicator.exchange": {
"total": 0.039826762000018334,
"count": 1,
"is_parallel": true,
"self": 0.039826762000018334
},
"steps_from_proto": {
"total": 0.0022615839999957643,
"count": 1,
"is_parallel": true,
"self": 0.000508861000014349,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017527229999814153,
"count": 10,
"is_parallel": true,
"self": 0.0017527229999814153
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 262.5826805120033,
"count": 18191,
"is_parallel": true,
"self": 12.478404542017188,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.828294142994878,
"count": 18191,
"is_parallel": true,
"self": 6.828294142994878
},
"communicator.exchange": {
"total": 204.27684411198555,
"count": 18191,
"is_parallel": true,
"self": 204.27684411198555
},
"steps_from_proto": {
"total": 38.9991377150057,
"count": 18191,
"is_parallel": true,
"self": 7.363743783029179,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.635393931976523,
"count": 181910,
"is_parallel": true,
"self": 31.635393931976523
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 124.41194113599369,
"count": 18192,
"self": 0.6736082579947151,
"children": {
"process_trajectory": {
"total": 28.569544553998867,
"count": 18192,
"self": 28.13844324599893,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4311013079999384,
"count": 4,
"self": 0.4311013079999384
}
}
},
"_update_policy": {
"total": 95.1687883240001,
"count": 54,
"self": 49.82224088099895,
"children": {
"TorchPPOOptimizer.update": {
"total": 45.34654744300116,
"count": 1134,
"self": 45.34654744300116
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6630000345685403e-06,
"count": 1,
"self": 1.6630000345685403e-06
},
"TrainerController._save_models": {
"total": 0.08452408700009073,
"count": 1,
"self": 0.001197368000134702,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08332671899995603,
"count": 1,
"self": 0.08332671899995603
}
}
}
}
}
}
}