k0T0z's picture
First Push
7eb242c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7093857526779175,
"min": 0.68915194272995,
"max": 2.86297607421875,
"count": 100
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6757.60888671875,
"min": 6595.18408203125,
"max": 29382.72265625,
"count": 100
},
"SnowballTarget.Step.mean": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Step.sum": {
"value": 999952.0,
"min": 9952.0,
"max": 999952.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.376633644104004,
"min": 0.451972633600235,
"max": 14.548315048217773,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2803.443603515625,
"min": 87.68269348144531,
"max": 2972.89453125,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 100
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06875585216694162,
"min": 0.062443952937182454,
"max": 0.07684726449763676,
"count": 100
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2750234086677665,
"min": 0.24977581174872981,
"max": 0.3653218817642483,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15356399132048382,
"min": 0.12475253410007367,
"max": 0.27994405535214084,
"count": 100
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6142559652819353,
"min": 0.4990101364002947,
"max": 1.3220909725217258,
"count": 100
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.8352995412000037e-06,
"min": 1.8352995412000037e-06,
"max": 0.00039783520054120005,
"count": 100
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 7.341198164800015e-06,
"min": 7.341198164800015e-06,
"max": 0.001969376007656,
"count": 100
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10045880000000001,
"min": 0.10045880000000001,
"max": 0.19945880000000002,
"count": 100
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40183520000000006,
"min": 0.40183520000000006,
"max": 0.9923440000000001,
"count": 100
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.289412000000005e-05,
"min": 3.289412000000005e-05,
"max": 0.004972994119999999,
"count": 100
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001315764800000002,
"min": 0.0001315764800000002,
"max": 0.024617965600000007,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.57777777777778,
"min": 3.8181818181818183,
"max": 28.685185185185187,
"count": 100
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1286.0,
"min": 168.0,
"max": 1577.0,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.57777777777778,
"min": 3.8181818181818183,
"max": 28.685185185185187,
"count": 100
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1286.0,
"min": 168.0,
"max": 1577.0,
"count": 100
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705752664",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705756998"
},
"total": 4333.933604594999,
"count": 1,
"self": 1.072528097999566,
"children": {
"run_training.setup": {
"total": 0.09069979600008082,
"count": 1,
"self": 0.09069979600008082
},
"TrainerController.start_learning": {
"total": 4332.770376701,
"count": 1,
"self": 5.283740808882612,
"children": {
"TrainerController._reset_env": {
"total": 5.3399123490000875,
"count": 1,
"self": 5.3399123490000875
},
"TrainerController.advance": {
"total": 4321.922220586117,
"count": 90947,
"self": 2.5106592760012063,
"children": {
"env_step": {
"total": 4319.411561310116,
"count": 90947,
"self": 3541.79149806122,
"children": {
"SubprocessEnvManager._take_step": {
"total": 775.1228558939604,
"count": 90947,
"self": 15.90126287695557,
"children": {
"TorchPolicy.evaluate": {
"total": 759.2215930170048,
"count": 90947,
"self": 759.2215930170048
}
}
},
"workers": {
"total": 2.4972073549354263,
"count": 90947,
"self": 0.0,
"children": {
"worker_root": {
"total": 4320.533252514877,
"count": 90947,
"is_parallel": true,
"self": 2488.0631194779216,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008673203999933321,
"count": 1,
"is_parallel": true,
"self": 0.005435309999938909,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032378939999944123,
"count": 10,
"is_parallel": true,
"self": 0.0032378939999944123
}
}
},
"UnityEnvironment.step": {
"total": 0.049348313000109556,
"count": 1,
"is_parallel": true,
"self": 0.0007979040001373505,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000487903000021106,
"count": 1,
"is_parallel": true,
"self": 0.000487903000021106
},
"communicator.exchange": {
"total": 0.04545025700008409,
"count": 1,
"is_parallel": true,
"self": 0.04545025700008409
},
"steps_from_proto": {
"total": 0.002612248999867006,
"count": 1,
"is_parallel": true,
"self": 0.0004871229998570925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021251260000099137,
"count": 10,
"is_parallel": true,
"self": 0.0021251260000099137
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1832.4701330369555,
"count": 90946,
"is_parallel": true,
"self": 83.47777124174877,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 43.56783141201163,
"count": 90946,
"is_parallel": true,
"self": 43.56783141201163
},
"communicator.exchange": {
"total": 1448.1417905950896,
"count": 90946,
"is_parallel": true,
"self": 1448.1417905950896
},
"steps_from_proto": {
"total": 257.2827397881056,
"count": 90946,
"is_parallel": true,
"self": 52.70407596667155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 204.57866382143402,
"count": 909460,
"is_parallel": true,
"self": 204.57866382143402
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00039574000038555823,
"count": 1,
"self": 0.00039574000038555823,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4280.65498954288,
"count": 4550358,
"is_parallel": true,
"self": 111.46386916851407,
"children": {
"process_trajectory": {
"total": 1839.0748376783724,
"count": 4550358,
"is_parallel": true,
"self": 1832.8501009593717,
"children": {
"RLTrainer._checkpoint": {
"total": 6.224736719000703,
"count": 20,
"is_parallel": true,
"self": 6.224736719000703
}
}
},
"_update_policy": {
"total": 2330.1162826959935,
"count": 454,
"is_parallel": true,
"self": 539.609026397992,
"children": {
"TorchPPOOptimizer.update": {
"total": 1790.5072562980015,
"count": 38580,
"is_parallel": true,
"self": 1790.5072562980015
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22410721699998248,
"count": 1,
"self": 0.0031709009999758564,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22093631600000663,
"count": 1,
"self": 0.22093631600000663
}
}
}
}
}
}
}