mgoksu's picture
First Push
1909e40 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9660996794700623,
"min": 0.9660996794700623,
"max": 2.868175983428955,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9181.8115234375,
"min": 9181.8115234375,
"max": 29278.33984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.956878662109375,
"min": 0.3559059500694275,
"max": 12.973469734191895,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2526.59130859375,
"min": 68.68984985351562,
"max": 2646.587890625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06546640424366,
"min": 0.06301046795281354,
"max": 0.07627590767365558,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26186561697464,
"min": 0.25204187181125415,
"max": 0.34871732126054006,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.196397227764714,
"min": 0.11450982734551834,
"max": 0.3037931157093422,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.785588911058856,
"min": 0.4580393093820734,
"max": 1.5189655785467109,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.98609733800001e-06,
"min": 7.98609733800001e-06,
"max": 0.00029178600273799997,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.194438935200004e-05,
"min": 3.194438935200004e-05,
"max": 0.00138468003844,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102662,
"min": 0.102662,
"max": 0.19726200000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.410648,
"min": 0.410648,
"max": 0.96156,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014283380000000017,
"min": 0.00014283380000000017,
"max": 0.0048633738,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005713352000000007,
"min": 0.0005713352000000007,
"max": 0.023081844,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.0,
"min": 3.272727272727273,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1100.0,
"min": 144.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.0,
"min": 3.272727272727273,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1100.0,
"min": 144.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1734878454",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1734878959"
},
"total": 505.2832227470001,
"count": 1,
"self": 0.47543023899993386,
"children": {
"run_training.setup": {
"total": 0.059654108000017914,
"count": 1,
"self": 0.059654108000017914
},
"TrainerController.start_learning": {
"total": 504.74813840000013,
"count": 1,
"self": 0.5147279780031795,
"children": {
"TrainerController._reset_env": {
"total": 2.1167134160000387,
"count": 1,
"self": 2.1167134160000387
},
"TrainerController.advance": {
"total": 502.02440908499693,
"count": 18192,
"self": 0.5068890149987055,
"children": {
"env_step": {
"total": 363.4297008039978,
"count": 18192,
"self": 277.1465201009959,
"children": {
"SubprocessEnvManager._take_step": {
"total": 85.97856617700677,
"count": 18192,
"self": 1.5728378080216316,
"children": {
"TorchPolicy.evaluate": {
"total": 84.40572836898514,
"count": 18192,
"self": 84.40572836898514
}
}
},
"workers": {
"total": 0.3046145259951345,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 503.0693739120046,
"count": 18192,
"is_parallel": true,
"self": 262.68007847499655,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028302189999749316,
"count": 1,
"is_parallel": true,
"self": 0.0008542450001414181,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019759739998335135,
"count": 10,
"is_parallel": true,
"self": 0.0019759739998335135
}
}
},
"UnityEnvironment.step": {
"total": 0.04442015299991908,
"count": 1,
"is_parallel": true,
"self": 0.0006880729999920732,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044443599995247496,
"count": 1,
"is_parallel": true,
"self": 0.00044443599995247496
},
"communicator.exchange": {
"total": 0.041146540999989156,
"count": 1,
"is_parallel": true,
"self": 0.041146540999989156
},
"steps_from_proto": {
"total": 0.002141102999985378,
"count": 1,
"is_parallel": true,
"self": 0.0004748409999137948,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016662620000715833,
"count": 10,
"is_parallel": true,
"self": 0.0016662620000715833
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 240.38929543700806,
"count": 18191,
"is_parallel": true,
"self": 11.531342373989673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.3726602550045754,
"count": 18191,
"is_parallel": true,
"self": 6.3726602550045754
},
"communicator.exchange": {
"total": 184.61147759501682,
"count": 18191,
"is_parallel": true,
"self": 184.61147759501682
},
"steps_from_proto": {
"total": 37.87381521299699,
"count": 18191,
"is_parallel": true,
"self": 7.233340924001254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.640474288995733,
"count": 181910,
"is_parallel": true,
"self": 30.640474288995733
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 138.08781926600045,
"count": 18192,
"self": 0.665841511991971,
"children": {
"process_trajectory": {
"total": 31.0816143740077,
"count": 18192,
"self": 30.653156954007727,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42845741999997244,
"count": 4,
"self": 0.42845741999997244
}
}
},
"_update_policy": {
"total": 106.34036338000078,
"count": 90,
"self": 44.366315493005686,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.974047886995095,
"count": 4587,
"self": 61.974047886995095
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.035999957821332e-06,
"count": 1,
"self": 1.035999957821332e-06
},
"TrainerController._save_models": {
"total": 0.09228688500002136,
"count": 1,
"self": 0.001217716999917684,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09106916800010367,
"count": 1,
"self": 0.09106916800010367
}
}
}
}
}
}
}