vttlam's picture
First Push
2f8f9ec verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8966401815414429,
"min": 0.8888797163963318,
"max": 2.841992139816284,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7890.43359375,
"min": 7890.43359375,
"max": 31261.9140625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199800.0,
"min": 9800.0,
"max": 199800.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199800.0,
"min": 9800.0,
"max": 199800.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.203579902648926,
"min": 0.4047658145427704,
"max": 13.203579902648926,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 660.1790161132812,
"min": 19.833524703979492,
"max": 660.1790161132812,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.72,
"min": 3.877551020408163,
"max": 26.72,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1336.0,
"min": 190.0,
"max": 1336.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.72,
"min": 3.877551020408163,
"max": 26.72,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1336.0,
"min": 190.0,
"max": 1336.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0668199760884996,
"min": 0.06472876065709701,
"max": 0.07468039455330548,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2672799043539984,
"min": 0.2621592565710136,
"max": 0.3734019727665274,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20090203768774575,
"min": 0.15325214721116365,
"max": 0.3183578928896025,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.803608150750983,
"min": 0.6130085888446546,
"max": 1.5917894644480124,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.950097350000008e-06,
"min": 7.950097350000008e-06,
"max": 0.00029175000274999995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.180038940000003e-05,
"min": 3.180038940000003e-05,
"max": 0.0013845000385,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10265,
"min": 0.10265,
"max": 0.19725000000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4106,
"min": 0.4106,
"max": 0.9615,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014223500000000013,
"min": 0.00014223500000000013,
"max": 0.004862775,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005689400000000005,
"min": 0.0005689400000000005,
"max": 0.023078849999999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740678207",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740678633"
},
"total": 425.98339389600005,
"count": 1,
"self": 0.4354671529999905,
"children": {
"run_training.setup": {
"total": 0.02286312500018539,
"count": 1,
"self": 0.02286312500018539
},
"TrainerController.start_learning": {
"total": 425.5250636179999,
"count": 1,
"self": 0.384086085061881,
"children": {
"TrainerController._reset_env": {
"total": 3.012973783999769,
"count": 1,
"self": 3.012973783999769
},
"TrainerController.advance": {
"total": 422.0313512419384,
"count": 18200,
"self": 0.3864267369035588,
"children": {
"env_step": {
"total": 305.20333797504645,
"count": 18200,
"self": 232.9490485540482,
"children": {
"SubprocessEnvManager._take_step": {
"total": 72.0382241179982,
"count": 18200,
"self": 1.2916827630006082,
"children": {
"TorchPolicy.evaluate": {
"total": 70.74654135499759,
"count": 18200,
"self": 70.74654135499759
}
}
},
"workers": {
"total": 0.21606530300005033,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 423.94704371401986,
"count": 18200,
"is_parallel": true,
"self": 219.89755216305048,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005835263999870222,
"count": 1,
"is_parallel": true,
"self": 0.003961351000270952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018739129995992698,
"count": 10,
"is_parallel": true,
"self": 0.0018739129995992698
}
}
},
"UnityEnvironment.step": {
"total": 0.07191652900019108,
"count": 1,
"is_parallel": true,
"self": 0.0006173230008243991,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003613169997151999,
"count": 1,
"is_parallel": true,
"self": 0.0003613169997151999
},
"communicator.exchange": {
"total": 0.0686821899998904,
"count": 1,
"is_parallel": true,
"self": 0.0686821899998904
},
"steps_from_proto": {
"total": 0.00225569899976108,
"count": 1,
"is_parallel": true,
"self": 0.0003968050004914403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018588939992696396,
"count": 10,
"is_parallel": true,
"self": 0.0018588939992696396
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 204.04949155096938,
"count": 18199,
"is_parallel": true,
"self": 9.742650001011498,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.438002863973907,
"count": 18199,
"is_parallel": true,
"self": 5.438002863973907
},
"communicator.exchange": {
"total": 157.0522302999948,
"count": 18199,
"is_parallel": true,
"self": 157.0522302999948
},
"steps_from_proto": {
"total": 31.816608385989184,
"count": 18199,
"is_parallel": true,
"self": 5.8167412770635565,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.999867108925628,
"count": 181990,
"is_parallel": true,
"self": 25.999867108925628
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 116.4415865299884,
"count": 18200,
"self": 0.5181709059443165,
"children": {
"process_trajectory": {
"total": 20.210247575044377,
"count": 18200,
"self": 19.713246305043867,
"children": {
"RLTrainer._checkpoint": {
"total": 0.49700127000051,
"count": 4,
"self": 0.49700127000051
}
}
},
"_update_policy": {
"total": 95.7131680489997,
"count": 90,
"self": 38.291143993002606,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.4220240559971,
"count": 4590,
"self": 57.4220240559971
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.879998520365916e-07,
"count": 1,
"self": 7.879998520365916e-07
},
"TrainerController._save_models": {
"total": 0.09665171899996494,
"count": 1,
"self": 0.0010082109997711086,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09564350800019383,
"count": 1,
"self": 0.09564350800019383
}
}
}
}
}
}
}