rua0ra1's picture
First Push
1d1c494 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8770585656166077,
"min": 0.8770585656166077,
"max": 2.830709457397461,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8335.564453125,
"min": 8335.564453125,
"max": 28895.8828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.95383071899414,
"min": 0.34430280327796936,
"max": 12.95383071899414,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2525.9970703125,
"min": 66.79474639892578,
"max": 2630.0830078125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07062610058353896,
"min": 0.06501332515080338,
"max": 0.07546514188837974,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28250440233415586,
"min": 0.2684959759365073,
"max": 0.36520765768021674,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20712938473797313,
"min": 0.13173237533249219,
"max": 0.2980467761264128,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8285175389518925,
"min": 0.5269295013299687,
"max": 1.4902338806320639,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.84090909090909,
"min": 3.3863636363636362,
"max": 25.84090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1137.0,
"min": 149.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.84090909090909,
"min": 3.3863636363636362,
"max": 25.84090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1137.0,
"min": 149.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744777803",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744778265"
},
"total": 461.84542099,
"count": 1,
"self": 0.4900471590001416,
"children": {
"run_training.setup": {
"total": 0.02539500299997144,
"count": 1,
"self": 0.02539500299997144
},
"TrainerController.start_learning": {
"total": 461.32997882799987,
"count": 1,
"self": 0.4606372300016801,
"children": {
"TrainerController._reset_env": {
"total": 3.2159931610000285,
"count": 1,
"self": 3.2159931610000285
},
"TrainerController.advance": {
"total": 457.5603602439983,
"count": 18192,
"self": 0.46082447901380874,
"children": {
"env_step": {
"total": 326.7502727519951,
"count": 18192,
"self": 249.41198962897602,
"children": {
"SubprocessEnvManager._take_step": {
"total": 77.07033822500682,
"count": 18192,
"self": 1.4261586150104222,
"children": {
"TorchPolicy.evaluate": {
"total": 75.6441796099964,
"count": 18192,
"self": 75.6441796099964
}
}
},
"workers": {
"total": 0.2679448980122743,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 459.7027603709988,
"count": 18192,
"is_parallel": true,
"self": 242.07494250699995,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006673794999983329,
"count": 1,
"is_parallel": true,
"self": 0.0050861480000321535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015876469999511755,
"count": 10,
"is_parallel": true,
"self": 0.0015876469999511755
}
}
},
"UnityEnvironment.step": {
"total": 0.03506181199998082,
"count": 1,
"is_parallel": true,
"self": 0.0006003480000345007,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039167800002815056,
"count": 1,
"is_parallel": true,
"self": 0.00039167800002815056
},
"communicator.exchange": {
"total": 0.03229417799991552,
"count": 1,
"is_parallel": true,
"self": 0.03229417799991552
},
"steps_from_proto": {
"total": 0.0017756080000026486,
"count": 1,
"is_parallel": true,
"self": 0.00034375500024452776,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014318529997581209,
"count": 10,
"is_parallel": true,
"self": 0.0014318529997581209
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 217.62781786399887,
"count": 18191,
"is_parallel": true,
"self": 10.330195812999818,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.559041670000624,
"count": 18191,
"is_parallel": true,
"self": 5.559041670000624
},
"communicator.exchange": {
"total": 169.13764154099272,
"count": 18191,
"is_parallel": true,
"self": 169.13764154099272
},
"steps_from_proto": {
"total": 32.60093884000571,
"count": 18191,
"is_parallel": true,
"self": 6.20572353202283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.39521530798288,
"count": 181910,
"is_parallel": true,
"self": 26.39521530798288
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 130.34926301298935,
"count": 18192,
"self": 0.5920994999871709,
"children": {
"process_trajectory": {
"total": 27.916634336002176,
"count": 18192,
"self": 27.470637540002258,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4459967959999176,
"count": 4,
"self": 0.4459967959999176
}
}
},
"_update_policy": {
"total": 101.84052917700001,
"count": 90,
"self": 40.87573787599774,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.964791301002265,
"count": 4587,
"self": 60.964791301002265
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.469999102089787e-07,
"count": 1,
"self": 8.469999102089787e-07
},
"TrainerController._save_models": {
"total": 0.0929873459999726,
"count": 1,
"self": 0.0010027269997863186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09198461900018629,
"count": 1,
"self": 0.09198461900018629
}
}
}
}
}
}
}