mahir05's picture
First Push
b4a8fc6 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9091355800628662,
"min": 0.9001529216766357,
"max": 1.4321595430374146,
"count": 11
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8700.427734375,
"min": 1071.25537109375,
"max": 13953.9892578125,
"count": 11
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 99936.0,
"max": 199960.0,
"count": 11
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 99936.0,
"max": 199960.0,
"count": 11
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.951735496520996,
"min": 9.393911361694336,
"max": 12.951735496520996,
"count": 11
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2525.58837890625,
"min": 46.96955490112305,
"max": 2628.22265625,
"count": 11
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 11
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 11
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07301025085882215,
"min": 0.06562597083461964,
"max": 0.07566091545661181,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2920410034352886,
"min": 0.263968915295075,
"max": 0.37830457728305905,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18530471491463044,
"min": 0.18530471491463044,
"max": 0.24312238249124266,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7412188596585217,
"min": 0.7412188596585217,
"max": 1.2156119124562133,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.158097614000008e-06,
"min": 7.158097614000008e-06,
"max": 0.00014245805251400002,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.863239045600003e-05,
"min": 2.863239045600003e-05,
"max": 0.00063804028732,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102386,
"min": 0.102386,
"max": 0.14748600000000003,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.409544,
"min": 0.409544,
"max": 0.71268,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001290614000000001,
"min": 0.0001290614000000001,
"max": 0.0023795514,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005162456000000004,
"min": 0.0005162456000000004,
"max": 0.010662732000000001,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.90909090909091,
"min": 21.0,
"max": 25.90909090909091,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1140.0,
"min": 924.0,
"max": 1405.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.90909090909091,
"min": 21.0,
"max": 25.90909090909091,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1140.0,
"min": 924.0,
"max": 1405.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1762573223",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/mahir_05/.local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --resume --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1762573482"
},
"total": 259.20728806400007,
"count": 1,
"self": 0.4243668650001382,
"children": {
"run_training.setup": {
"total": 0.04421666099983668,
"count": 1,
"self": 0.04421666099983668
},
"TrainerController.start_learning": {
"total": 258.7387045380001,
"count": 1,
"self": 0.3205412700342549,
"children": {
"TrainerController._reset_env": {
"total": 2.2018862420000005,
"count": 1,
"self": 2.2018862420000005
},
"TrainerController.advance": {
"total": 256.08054451296607,
"count": 9141,
"self": 0.16192801097190568,
"children": {
"env_step": {
"total": 255.91861650199417,
"count": 9141,
"self": 195.83209225296287,
"children": {
"SubprocessEnvManager._take_step": {
"total": 59.90510187802397,
"count": 9141,
"self": 0.9518246300394821,
"children": {
"TorchPolicy.evaluate": {
"total": 58.95327724798449,
"count": 9141,
"self": 58.95327724798449
}
}
},
"workers": {
"total": 0.18142237100732928,
"count": 9141,
"self": 0.0,
"children": {
"worker_root": {
"total": 258.0529505519944,
"count": 9141,
"is_parallel": true,
"self": 113.0952482069813,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003308736000008139,
"count": 1,
"is_parallel": true,
"self": 0.0009933759997693414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023153600002387975,
"count": 10,
"is_parallel": true,
"self": 0.0023153600002387975
}
}
},
"UnityEnvironment.step": {
"total": 0.03805204400009643,
"count": 1,
"is_parallel": true,
"self": 0.0009476550003455486,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037461800002347445,
"count": 1,
"is_parallel": true,
"self": 0.00037461800002347445
},
"communicator.exchange": {
"total": 0.03550346899987744,
"count": 1,
"is_parallel": true,
"self": 0.03550346899987744
},
"steps_from_proto": {
"total": 0.0012263019998499658,
"count": 1,
"is_parallel": true,
"self": 0.0002586880000308156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009676139998191502,
"count": 10,
"is_parallel": true,
"self": 0.0009676139998191502
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 144.9577023450131,
"count": 9140,
"is_parallel": true,
"self": 6.478769567988138,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.5494719750165586,
"count": 9140,
"is_parallel": true,
"self": 3.5494719750165586
},
"communicator.exchange": {
"total": 114.81642529100168,
"count": 9140,
"is_parallel": true,
"self": 114.81642529100168
},
"steps_from_proto": {
"total": 20.11303551100673,
"count": 9140,
"is_parallel": true,
"self": 3.6957827760056716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 16.417252735001057,
"count": 91400,
"is_parallel": true,
"self": 16.417252735001057
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.4903999878442846e-05,
"count": 1,
"self": 4.4903999878442846e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 254.92911742509273,
"count": 133852,
"is_parallel": true,
"self": 3.370166481117849,
"children": {
"process_trajectory": {
"total": 142.23732054697552,
"count": 133852,
"is_parallel": true,
"self": 141.48433644697502,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7529841000005035,
"count": 3,
"is_parallel": true,
"self": 0.7529841000005035
}
}
},
"_update_policy": {
"total": 109.32163039699935,
"count": 45,
"is_parallel": true,
"self": 22.453262275003908,
"children": {
"TorchPPOOptimizer.update": {
"total": 86.86836812199545,
"count": 2292,
"is_parallel": true,
"self": 86.86836812199545
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13568760899988774,
"count": 1,
"self": 0.002437639999698149,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1332499690001896,
"count": 1,
"self": 0.1332499690001896
}
}
}
}
}
}
}