ezrab's picture
First Push
c0f7804 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6046640872955322,
"min": 0.5809189081192017,
"max": 2.8379483222961426,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6172.4111328125,
"min": 5553.84130859375,
"max": 28969.77734375,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.935003280639648,
"min": 0.4331207275390625,
"max": 14.972412109375,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2912.32568359375,
"min": 84.02542114257812,
"max": 3055.17724609375,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.032520873723115074,
"min": 0.02139378272962252,
"max": 0.039632793032311986,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.1300834948924603,
"min": 0.08557513091849007,
"max": 0.19816396516155993,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.10008202726021409,
"min": 0.10008202726021409,
"max": 0.35671817511320114,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.40032810904085636,
"min": 0.40032810904085636,
"max": 1.5131584915022054,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.3764995412000023e-06,
"min": 1.3764995412000023e-06,
"max": 0.00029837640054119997,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.505998164800009e-06,
"min": 5.505998164800009e-06,
"max": 0.001477032007656,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10045880000000001,
"min": 0.10045880000000001,
"max": 0.19945880000000002,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40183520000000006,
"min": 0.40183520000000006,
"max": 0.992344,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.289412000000005e-05,
"min": 3.289412000000005e-05,
"max": 0.00497299412,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001315764800000002,
"min": 0.0001315764800000002,
"max": 0.0246179656,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 29.733333333333334,
"min": 3.6818181818181817,
"max": 29.733333333333334,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1338.0,
"min": 162.0,
"max": 1624.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 29.733333333333334,
"min": 3.6818181818181817,
"max": 29.733333333333334,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1338.0,
"min": 162.0,
"max": 1624.0,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741124112",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget5 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741126297"
},
"total": 2184.4819171159997,
"count": 1,
"self": 0.4751641730008487,
"children": {
"run_training.setup": {
"total": 0.021494432999134006,
"count": 1,
"self": 0.021494432999134006
},
"TrainerController.start_learning": {
"total": 2183.9852585099998,
"count": 1,
"self": 1.7617365812266144,
"children": {
"TrainerController._reset_env": {
"total": 2.0118533029999526,
"count": 1,
"self": 2.0118533029999526
},
"TrainerController.advance": {
"total": 2179.579274817773,
"count": 90928,
"self": 1.844405148676742,
"children": {
"env_step": {
"total": 1590.0935259330408,
"count": 90928,
"self": 1156.8052782602626,
"children": {
"SubprocessEnvManager._take_step": {
"total": 432.1743347717793,
"count": 90928,
"self": 6.4066391177093465,
"children": {
"TorchPolicy.evaluate": {
"total": 425.76769565406994,
"count": 90928,
"self": 425.76769565406994
}
}
},
"workers": {
"total": 1.113912900998912,
"count": 90928,
"self": 0.0,
"children": {
"worker_root": {
"total": 2176.7002558878876,
"count": 90928,
"is_parallel": true,
"self": 1161.2251435198823,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021309090006980114,
"count": 1,
"is_parallel": true,
"self": 0.0006625349988098606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014683740018881508,
"count": 10,
"is_parallel": true,
"self": 0.0014683740018881508
}
}
},
"UnityEnvironment.step": {
"total": 0.03779112400025042,
"count": 1,
"is_parallel": true,
"self": 0.0006039820009391406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043680799990397645,
"count": 1,
"is_parallel": true,
"self": 0.00043680799990397645
},
"communicator.exchange": {
"total": 0.03451568899981794,
"count": 1,
"is_parallel": true,
"self": 0.03451568899981794
},
"steps_from_proto": {
"total": 0.0022346449995893636,
"count": 1,
"is_parallel": true,
"self": 0.00040954399810289033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018251010014864733,
"count": 10,
"is_parallel": true,
"self": 0.0018251010014864733
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1015.4751123680053,
"count": 90927,
"is_parallel": true,
"self": 48.28619834217261,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.520327178905063,
"count": 90927,
"is_parallel": true,
"self": 27.520327178905063
},
"communicator.exchange": {
"total": 779.0861997340371,
"count": 90927,
"is_parallel": true,
"self": 779.0861997340371
},
"steps_from_proto": {
"total": 160.5823871128905,
"count": 90927,
"is_parallel": true,
"self": 28.509026324361912,
"children": {
"_process_rank_one_or_two_observation": {
"total": 132.0733607885286,
"count": 909270,
"is_parallel": true,
"self": 132.0733607885286
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 587.6413437360552,
"count": 90928,
"self": 2.248193327007357,
"children": {
"process_trajectory": {
"total": 259.7026438380535,
"count": 90928,
"self": 247.48320745705496,
"children": {
"RLTrainer._checkpoint": {
"total": 12.219436380998559,
"count": 20,
"self": 12.219436380998559
}
}
},
"_update_policy": {
"total": 325.69050657099433,
"count": 454,
"self": 190.89637654200396,
"children": {
"TorchPPOOptimizer.update": {
"total": 134.79413002899037,
"count": 5448,
"self": 134.79413002899037
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.180002962239087e-07,
"count": 1,
"self": 9.180002962239087e-07
},
"TrainerController._save_models": {
"total": 0.6323928900001192,
"count": 1,
"self": 0.02648488399972848,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6059080060003907,
"count": 1,
"self": 0.6059080060003907
}
}
}
}
}
}
}