sweeneyhe's picture
First Push
d4baf79 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8437018990516663,
"min": 0.8437018990516663,
"max": 2.822126865386963,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8018.54296875,
"min": 8018.54296875,
"max": 28808.271484375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.028243064880371,
"min": 0.412729948759079,
"max": 13.115806579589844,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2540.50732421875,
"min": 80.06961059570312,
"max": 2675.62451171875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0715928416773072,
"min": 0.06157174554913689,
"max": 0.07717571544665534,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2863713667092288,
"min": 0.2557778270644371,
"max": 0.36200885188893656,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.23606729865366335,
"min": 0.1346975036762089,
"max": 0.2894128752397556,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9442691946146534,
"min": 0.5387900147048356,
"max": 1.3765486510945302,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.90909090909091,
"min": 3.772727272727273,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1096.0,
"min": 166.0,
"max": 1414.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.90909090909091,
"min": 3.772727272727273,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1096.0,
"min": 166.0,
"max": 1414.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751339862",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget6 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751340305"
},
"total": 442.85179600599986,
"count": 1,
"self": 0.4405316729998958,
"children": {
"run_training.setup": {
"total": 0.021176423999804683,
"count": 1,
"self": 0.021176423999804683
},
"TrainerController.start_learning": {
"total": 442.39008790900016,
"count": 1,
"self": 0.44945951103818516,
"children": {
"TrainerController._reset_env": {
"total": 2.6912118239997653,
"count": 1,
"self": 2.6912118239997653
},
"TrainerController.advance": {
"total": 439.1644211799621,
"count": 18192,
"self": 0.42259671100191554,
"children": {
"env_step": {
"total": 319.49093538896886,
"count": 18192,
"self": 246.83324435895156,
"children": {
"SubprocessEnvManager._take_step": {
"total": 72.40853234503174,
"count": 18192,
"self": 1.3330535180339211,
"children": {
"TorchPolicy.evaluate": {
"total": 71.07547882699782,
"count": 18192,
"self": 71.07547882699782
}
}
},
"workers": {
"total": 0.24915868498555938,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 440.8837808390199,
"count": 18192,
"is_parallel": true,
"self": 225.00431562901167,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029889270003877755,
"count": 1,
"is_parallel": true,
"self": 0.0008300410004267178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021588859999610577,
"count": 10,
"is_parallel": true,
"self": 0.0021588859999610577
}
}
},
"UnityEnvironment.step": {
"total": 0.08957917799989445,
"count": 1,
"is_parallel": true,
"self": 0.000613992000126018,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038933199994062306,
"count": 1,
"is_parallel": true,
"self": 0.00038933199994062306
},
"communicator.exchange": {
"total": 0.086694523999995,
"count": 1,
"is_parallel": true,
"self": 0.086694523999995
},
"steps_from_proto": {
"total": 0.0018813299998328148,
"count": 1,
"is_parallel": true,
"self": 0.00040066900010060635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014806609997322084,
"count": 10,
"is_parallel": true,
"self": 0.0014806609997322084
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 215.87946521000822,
"count": 18191,
"is_parallel": true,
"self": 10.035845870977937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.7807686709884365,
"count": 18191,
"is_parallel": true,
"self": 5.7807686709884365
},
"communicator.exchange": {
"total": 166.87303858603036,
"count": 18191,
"is_parallel": true,
"self": 166.87303858603036
},
"steps_from_proto": {
"total": 33.189812082011485,
"count": 18191,
"is_parallel": true,
"self": 6.032252333039651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.157559748971835,
"count": 181910,
"is_parallel": true,
"self": 27.157559748971835
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 119.25088907999134,
"count": 18192,
"self": 0.5602205559657705,
"children": {
"process_trajectory": {
"total": 27.32768614702718,
"count": 18192,
"self": 26.906036857026265,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42164929000091433,
"count": 4,
"self": 0.42164929000091433
}
}
},
"_update_policy": {
"total": 91.36298237699839,
"count": 90,
"self": 37.64945081200494,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.713531564993445,
"count": 4587,
"self": 53.713531564993445
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3149997357686516e-06,
"count": 1,
"self": 1.3149997357686516e-06
},
"TrainerController._save_models": {
"total": 0.08499407900035294,
"count": 1,
"self": 0.0008617650000815047,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08413231400027144,
"count": 1,
"self": 0.08413231400027144
}
}
}
}
}
}
}