LolloMagic's picture
First Push
e0b10fc verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5212552547454834,
"min": 0.5212552547454834,
"max": 2.823472499847412,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5366.84423828125,
"min": 5211.24267578125,
"max": 28822.0078125,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 26.778047561645508,
"min": 0.4260154664516449,
"max": 26.778047561645508,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5462.7216796875,
"min": 82.64700317382812,
"max": 5477.35302734375,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07018861216772004,
"min": 0.06425004584764846,
"max": 0.07332311038478699,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3509430608386002,
"min": 0.25845869857927456,
"max": 0.35843144043567865,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17590927585082894,
"min": 0.16702564289409885,
"max": 0.3398141638671651,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8795463792541447,
"min": 0.6681025715763954,
"max": 1.6990708193358255,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333332e-06,
"min": 5.288098237333332e-06,
"max": 0.0002945880018039999,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.644049118666666e-05,
"min": 2.644049118666666e-05,
"max": 0.00142344002552,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666664,
"min": 0.10176266666666664,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333332,
"min": 0.42025066666666666,
"max": 0.9744800000000001,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 4.507706666666664e-05,
"min": 4.507706666666664e-05,
"max": 0.0019641003999999996,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00022538533333333322,
"min": 0.00022538533333333322,
"max": 0.009492152,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.30909090909091,
"min": 4.431818181818182,
"max": 26.745454545454546,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1447.0,
"min": 195.0,
"max": 1471.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.30909090909091,
"min": 4.431818181818182,
"max": 26.745454545454546,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1447.0,
"min": 195.0,
"max": 1471.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1764586598",
"python_version": "3.10.6 | packaged by conda-forge | (main, Aug 22 2022, 20:35:26) [GCC 10.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1764587373"
},
"total": 775.564756849,
"count": 1,
"self": 0.43530339799997364,
"children": {
"run_training.setup": {
"total": 0.029861223000011705,
"count": 1,
"self": 0.029861223000011705
},
"TrainerController.start_learning": {
"total": 775.099592228,
"count": 1,
"self": 0.6251697349671304,
"children": {
"TrainerController._reset_env": {
"total": 3.1322960189999094,
"count": 1,
"self": 3.1322960189999094
},
"TrainerController.advance": {
"total": 771.2244012730332,
"count": 27328,
"self": 0.6310758190234083,
"children": {
"env_step": {
"total": 488.79915576399105,
"count": 27328,
"self": 383.303813302002,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.12632256299366,
"count": 27328,
"self": 1.9497518859868705,
"children": {
"TorchPolicy.evaluate": {
"total": 103.1765706770068,
"count": 27328,
"self": 103.1765706770068
}
}
},
"workers": {
"total": 0.3690198989953615,
"count": 27328,
"self": 0.0,
"children": {
"worker_root": {
"total": 772.4082089249906,
"count": 27328,
"is_parallel": true,
"self": 436.9386260549818,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005678278999994291,
"count": 1,
"is_parallel": true,
"self": 0.004171400999894104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001506878000100187,
"count": 10,
"is_parallel": true,
"self": 0.001506878000100187
}
}
},
"UnityEnvironment.step": {
"total": 0.0349108629999364,
"count": 1,
"is_parallel": true,
"self": 0.0006066439999585782,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004011209999816856,
"count": 1,
"is_parallel": true,
"self": 0.0004011209999816856
},
"communicator.exchange": {
"total": 0.03206100399995648,
"count": 1,
"is_parallel": true,
"self": 0.03206100399995648
},
"steps_from_proto": {
"total": 0.0018420940000396513,
"count": 1,
"is_parallel": true,
"self": 0.0003755929999442742,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001466501000095377,
"count": 10,
"is_parallel": true,
"self": 0.001466501000095377
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 335.4695828700088,
"count": 27327,
"is_parallel": true,
"self": 15.483895376006103,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.163920658994584,
"count": 27327,
"is_parallel": true,
"self": 8.163920658994584
},
"communicator.exchange": {
"total": 257.42597281699784,
"count": 27327,
"is_parallel": true,
"self": 257.42597281699784
},
"steps_from_proto": {
"total": 54.39579401801029,
"count": 27327,
"is_parallel": true,
"self": 10.227048283090198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 44.168745734920094,
"count": 273270,
"is_parallel": true,
"self": 44.168745734920094
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 281.79416969001875,
"count": 27328,
"self": 0.7822801750253348,
"children": {
"process_trajectory": {
"total": 40.24612741099281,
"count": 27328,
"self": 39.47031623099258,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7758111800002325,
"count": 6,
"self": 0.7758111800002325
}
}
},
"_update_policy": {
"total": 240.7657621040006,
"count": 136,
"self": 98.20917119098988,
"children": {
"TorchPPOOptimizer.update": {
"total": 142.55659091301072,
"count": 11555,
"self": 142.55659091301072
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.629998203308787e-07,
"count": 1,
"self": 9.629998203308787e-07
},
"TrainerController._save_models": {
"total": 0.11772423799993703,
"count": 1,
"self": 0.0012363579999146168,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11648788000002241,
"count": 1,
"self": 0.11648788000002241
}
}
}
}
}
}
}