Yuto2007's picture
Upload folder using huggingface_hub
162ea58 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9133936762809753,
"min": 0.9133936762809753,
"max": 2.879502534866333,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8680.8935546875,
"min": 8680.8935546875,
"max": 29393.962890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.443099021911621,
"min": 0.46676257252693176,
"max": 12.443099021911621,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2426.404296875,
"min": 90.55194091796875,
"max": 2507.48779296875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.066661940322471,
"min": 0.06392240664561041,
"max": 0.07602945301286858,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.266647761289884,
"min": 0.2617452530690107,
"max": 0.3801472650643429,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2071432456666348,
"min": 0.10288163358984771,
"max": 0.29552779178701194,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8285729826665392,
"min": 0.41152653435939085,
"max": 1.4634633426572763,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.3999999999999999,
"min": 0.3999999999999999,
"max": 0.4999999999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.886363636363637,
"min": 2.8863636363636362,
"max": 24.886363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1095.0,
"min": 127.0,
"max": 1366.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.886363636363637,
"min": 2.8863636363636362,
"max": 24.886363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1095.0,
"min": 127.0,
"max": 1366.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745491733",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1745492160"
},
"total": 426.77446818199985,
"count": 1,
"self": 0.4382948539998779,
"children": {
"run_training.setup": {
"total": 0.030650683999965622,
"count": 1,
"self": 0.030650683999965622
},
"TrainerController.start_learning": {
"total": 426.305522644,
"count": 1,
"self": 0.34828151900001103,
"children": {
"TrainerController._reset_env": {
"total": 3.1168185569999878,
"count": 1,
"self": 3.1168185569999878
},
"TrainerController.advance": {
"total": 422.7575020080002,
"count": 18192,
"self": 0.37912960800531437,
"children": {
"env_step": {
"total": 299.7707756819931,
"count": 18192,
"self": 228.09861133396964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 71.4646508600124,
"count": 18192,
"self": 1.2644228280170182,
"children": {
"TorchPolicy.evaluate": {
"total": 70.20022803199538,
"count": 18192,
"self": 70.20022803199538
}
}
},
"workers": {
"total": 0.2075134880110454,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 424.7603650919973,
"count": 18192,
"is_parallel": true,
"self": 225.08678720399882,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007184316000007129,
"count": 1,
"is_parallel": true,
"self": 0.004889019999836819,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022952960001703104,
"count": 10,
"is_parallel": true,
"self": 0.0022952960001703104
}
}
},
"UnityEnvironment.step": {
"total": 0.036951810000005025,
"count": 1,
"is_parallel": true,
"self": 0.0005844190000061644,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042028599989407667,
"count": 1,
"is_parallel": true,
"self": 0.00042028599989407667
},
"communicator.exchange": {
"total": 0.034160087000032036,
"count": 1,
"is_parallel": true,
"self": 0.034160087000032036
},
"steps_from_proto": {
"total": 0.0017870180000727487,
"count": 1,
"is_parallel": true,
"self": 0.00037043200006792176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001416586000004827,
"count": 10,
"is_parallel": true,
"self": 0.001416586000004827
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 199.67357788799848,
"count": 18191,
"is_parallel": true,
"self": 9.694810009022149,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.428675821998127,
"count": 18191,
"is_parallel": true,
"self": 5.428675821998127
},
"communicator.exchange": {
"total": 152.7670333039962,
"count": 18191,
"is_parallel": true,
"self": 152.7670333039962
},
"steps_from_proto": {
"total": 31.783058752982015,
"count": 18191,
"is_parallel": true,
"self": 5.723447407998947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.059611344983068,
"count": 181910,
"is_parallel": true,
"self": 26.059611344983068
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 122.60759671800179,
"count": 18192,
"self": 0.4462921970025491,
"children": {
"process_trajectory": {
"total": 27.898740676999296,
"count": 18192,
"self": 27.49108206499932,
"children": {
"RLTrainer._checkpoint": {
"total": 0.40765861199997744,
"count": 4,
"self": 0.40765861199997744
}
}
},
"_update_policy": {
"total": 94.26256384399994,
"count": 90,
"self": 37.92400818599731,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.33855565800263,
"count": 4587,
"self": 56.33855565800263
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.009997938846936e-07,
"count": 1,
"self": 9.009997938846936e-07
},
"TrainerController._save_models": {
"total": 0.08291965900002651,
"count": 1,
"self": 0.0008249750001141365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08209468399991238,
"count": 1,
"self": 0.08209468399991238
}
}
}
}
}
}
}