galaholic's picture
First Push
ddb0592 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.5238704681396484,
"min": 1.5238704681396484,
"max": 2.8698039054870605,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 15689.7705078125,
"min": 15451.7890625,
"max": 29294.958984375,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.992545127868652,
"min": 0.5229910016059875,
"max": 8.992545127868652,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1834.479248046875,
"min": 101.46025085449219,
"max": 1834.479248046875,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06614022647982518,
"min": 0.0658550445940123,
"max": 0.08366208475847232,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3307011323991259,
"min": 0.2634201783760492,
"max": 0.3635009432345053,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2550792004547867,
"min": 0.12248408181168248,
"max": 0.29679345989636347,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.2753960022739335,
"min": 0.4899363272467299,
"max": 1.4119945451909421,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6464094511999996e-05,
"min": 1.6464094511999996e-05,
"max": 0.000283764005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.232047255999999e-05,
"min": 8.232047255999999e-05,
"max": 0.00127032007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.0,
"min": 3.6136363636363638,
"max": 19.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1045.0,
"min": 159.0,
"max": 1045.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.0,
"min": 3.6136363636363638,
"max": 19.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1045.0,
"min": 159.0,
"max": 1045.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752650434",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752650724"
},
"total": 289.184162538,
"count": 1,
"self": 0.9498027049999109,
"children": {
"run_training.setup": {
"total": 0.0751516900002116,
"count": 1,
"self": 0.0751516900002116
},
"TrainerController.start_learning": {
"total": 288.1592081429999,
"count": 1,
"self": 0.32853635700962514,
"children": {
"TrainerController._reset_env": {
"total": 5.338979336000193,
"count": 1,
"self": 5.338979336000193
},
"TrainerController.advance": {
"total": 282.34762078899007,
"count": 9128,
"self": 0.35850337496935936,
"children": {
"env_step": {
"total": 202.2247837190182,
"count": 9128,
"self": 173.75358190100496,
"children": {
"SubprocessEnvManager._take_step": {
"total": 28.287506841008735,
"count": 9128,
"self": 1.0302637770157617,
"children": {
"TorchPolicy.evaluate": {
"total": 27.257243063992973,
"count": 9128,
"self": 27.257243063992973
}
}
},
"workers": {
"total": 0.18369497700450665,
"count": 9128,
"self": 0.0,
"children": {
"worker_root": {
"total": 286.7664638479946,
"count": 9128,
"is_parallel": true,
"self": 136.673706343998,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.015726620000123148,
"count": 1,
"is_parallel": true,
"self": 0.011540426999999909,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0041861930001232395,
"count": 10,
"is_parallel": true,
"self": 0.0041861930001232395
}
}
},
"UnityEnvironment.step": {
"total": 0.04644631799988019,
"count": 1,
"is_parallel": true,
"self": 0.000746753999919747,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004977300000064133,
"count": 1,
"is_parallel": true,
"self": 0.0004977300000064133
},
"communicator.exchange": {
"total": 0.04276855800003432,
"count": 1,
"is_parallel": true,
"self": 0.04276855800003432
},
"steps_from_proto": {
"total": 0.002433275999919715,
"count": 1,
"is_parallel": true,
"self": 0.0004319009999562695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020013749999634456,
"count": 10,
"is_parallel": true,
"self": 0.0020013749999634456
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 150.0927575039966,
"count": 9127,
"is_parallel": true,
"self": 7.07343146001881,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.905107965997331,
"count": 9127,
"is_parallel": true,
"self": 3.905107965997331
},
"communicator.exchange": {
"total": 117.5635501499853,
"count": 9127,
"is_parallel": true,
"self": 117.5635501499853
},
"steps_from_proto": {
"total": 21.550667927995164,
"count": 9127,
"is_parallel": true,
"self": 4.13429436299316,
"children": {
"_process_rank_one_or_two_observation": {
"total": 17.416373565002004,
"count": 91270,
"is_parallel": true,
"self": 17.416373565002004
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 79.7643336950025,
"count": 9128,
"self": 0.41692469699091816,
"children": {
"process_trajectory": {
"total": 15.468997128011779,
"count": 9128,
"self": 15.177261979011746,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29173514900003283,
"count": 2,
"self": 0.29173514900003283
}
}
},
"_update_policy": {
"total": 63.87841186999981,
"count": 45,
"self": 25.25852895799403,
"children": {
"TorchPPOOptimizer.update": {
"total": 38.61988291200578,
"count": 2292,
"self": 38.61988291200578
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4720001217938261e-06,
"count": 1,
"self": 1.4720001217938261e-06
},
"TrainerController._save_models": {
"total": 0.14407018899987634,
"count": 1,
"self": 0.0014333789997635904,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14263681000011275,
"count": 1,
"self": 0.14263681000011275
}
}
}
}
}
}
}