SauravDevon's picture
First Push
44b94fb verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7740551233291626,
"min": 0.7730932235717773,
"max": 2.8372554779052734,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7356.61962890625,
"min": 7356.61962890625,
"max": 28962.703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.02734088897705,
"min": 0.41042274236679077,
"max": 13.02734088897705,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2540.33154296875,
"min": 79.62200927734375,
"max": 2650.6767578125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.101150900792181,
"min": 0.09373601831783376,
"max": 0.10277270829644347,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.404603603168724,
"min": 0.3802865994004213,
"max": 0.5138635414822174,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2043496016595586,
"min": 0.11527159011497234,
"max": 0.27423359815250425,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8173984066382344,
"min": 0.4610863604598894,
"max": 1.25942898483253,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000007e-06,
"min": 8.082097306000007e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400003e-05,
"min": 3.232838922400003e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269399999999997,
"min": 0.10269399999999997,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077599999999986,
"min": 0.41077599999999986,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.004864970599999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828000000003,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 3.340909090909091,
"max": 25.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 147.0,
"max": 1420.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 3.340909090909091,
"max": 25.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 147.0,
"max": 1420.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739878091",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739878573"
},
"total": 482.154772243,
"count": 1,
"self": 0.7517600840001251,
"children": {
"run_training.setup": {
"total": 0.023518303999935597,
"count": 1,
"self": 0.023518303999935597
},
"TrainerController.start_learning": {
"total": 481.37949385499996,
"count": 1,
"self": 0.3728264659898741,
"children": {
"TrainerController._reset_env": {
"total": 3.2119858529999874,
"count": 1,
"self": 3.2119858529999874
},
"TrainerController.advance": {
"total": 477.66402210101023,
"count": 18192,
"self": 0.3624904570106082,
"children": {
"env_step": {
"total": 298.6962199630145,
"count": 18192,
"self": 226.2206120640435,
"children": {
"SubprocessEnvManager._take_step": {
"total": 72.25962844198807,
"count": 18192,
"self": 1.246361858992941,
"children": {
"TorchPolicy.evaluate": {
"total": 71.01326658299513,
"count": 18192,
"self": 71.01326658299513
}
}
},
"workers": {
"total": 0.21597945698295007,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 479.8521826609924,
"count": 18192,
"is_parallel": true,
"self": 281.75526081197677,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005997724000053495,
"count": 1,
"is_parallel": true,
"self": 0.0043738780000239785,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016238460000295163,
"count": 10,
"is_parallel": true,
"self": 0.0016238460000295163
}
}
},
"UnityEnvironment.step": {
"total": 0.04395548100001179,
"count": 1,
"is_parallel": true,
"self": 0.0006084840001676639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039963599988368514,
"count": 1,
"is_parallel": true,
"self": 0.00039963599988368514
},
"communicator.exchange": {
"total": 0.04111294399990584,
"count": 1,
"is_parallel": true,
"self": 0.04111294399990584
},
"steps_from_proto": {
"total": 0.0018344170000546,
"count": 1,
"is_parallel": true,
"self": 0.00032962999944174953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015047870006128505,
"count": 10,
"is_parallel": true,
"self": 0.0015047870006128505
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 198.09692184901564,
"count": 18191,
"is_parallel": true,
"self": 9.597537127012629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.184328476025485,
"count": 18191,
"is_parallel": true,
"self": 5.184328476025485
},
"communicator.exchange": {
"total": 152.76287805698075,
"count": 18191,
"is_parallel": true,
"self": 152.76287805698075
},
"steps_from_proto": {
"total": 30.552178188996777,
"count": 18191,
"is_parallel": true,
"self": 5.453911055014032,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.098267133982745,
"count": 181910,
"is_parallel": true,
"self": 25.098267133982745
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 178.6053116809851,
"count": 18192,
"self": 0.4554956390031748,
"children": {
"process_trajectory": {
"total": 26.797175010982755,
"count": 18192,
"self": 26.33430542598262,
"children": {
"RLTrainer._checkpoint": {
"total": 0.462869585000135,
"count": 4,
"self": 0.462869585000135
}
}
},
"_update_policy": {
"total": 151.35264103099917,
"count": 90,
"self": 39.94573282600368,
"children": {
"TorchPPOOptimizer.update": {
"total": 111.40690820499549,
"count": 9177,
"self": 111.40690820499549
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1839999842777615e-06,
"count": 1,
"self": 1.1839999842777615e-06
},
"TrainerController._save_models": {
"total": 0.13065825099988615,
"count": 1,
"self": 0.002564974999813785,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12809327600007236,
"count": 1,
"self": 0.12809327600007236
}
}
}
}
}
}
}