pableitorr's picture
First Push
d936d1a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1203638315200806,
"min": 1.1172832250595093,
"max": 2.862736701965332,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10709.5576171875,
"min": 10709.5576171875,
"max": 29443.24609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.683660507202148,
"min": 0.2819778621196747,
"max": 12.683660507202148,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2473.313720703125,
"min": 54.703704833984375,
"max": 2573.107421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06769613626381091,
"min": 0.06352055709496342,
"max": 0.07645459437957398,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27078454505524363,
"min": 0.254195165719983,
"max": 0.3822729718978699,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19513976369418348,
"min": 0.13769672290814639,
"max": 0.2845994009837216,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7805590547767339,
"min": 0.5507868916325855,
"max": 1.2571641180445166,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.772727272727273,
"min": 3.1136363636363638,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1090.0,
"min": 137.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.772727272727273,
"min": 3.1136363636363638,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1090.0,
"min": 137.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728243820",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1728244215"
},
"total": 395.18712970599995,
"count": 1,
"self": 0.42443193099984455,
"children": {
"run_training.setup": {
"total": 0.05345356600000173,
"count": 1,
"self": 0.05345356600000173
},
"TrainerController.start_learning": {
"total": 394.7092442090001,
"count": 1,
"self": 0.5190950670015582,
"children": {
"TrainerController._reset_env": {
"total": 2.182747935000009,
"count": 1,
"self": 2.182747935000009
},
"TrainerController.advance": {
"total": 391.94188533199855,
"count": 18202,
"self": 0.24544539199951032,
"children": {
"env_step": {
"total": 391.69643993999904,
"count": 18202,
"self": 300.9332474899993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 90.51305349800239,
"count": 18202,
"self": 1.3143109979975236,
"children": {
"TorchPolicy.evaluate": {
"total": 89.19874250000487,
"count": 18202,
"self": 89.19874250000487
}
}
},
"workers": {
"total": 0.2501389519973145,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 393.72741894900116,
"count": 18202,
"is_parallel": true,
"self": 185.43653064100312,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026542850000055296,
"count": 1,
"is_parallel": true,
"self": 0.0006286740000120972,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020256109999934324,
"count": 10,
"is_parallel": true,
"self": 0.0020256109999934324
}
}
},
"UnityEnvironment.step": {
"total": 0.035369020000018736,
"count": 1,
"is_parallel": true,
"self": 0.0007001430000457276,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003985509999893111,
"count": 1,
"is_parallel": true,
"self": 0.0003985509999893111
},
"communicator.exchange": {
"total": 0.03231751399999894,
"count": 1,
"is_parallel": true,
"self": 0.03231751399999894
},
"steps_from_proto": {
"total": 0.0019528119999847604,
"count": 1,
"is_parallel": true,
"self": 0.00039034799999626557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015624639999884948,
"count": 10,
"is_parallel": true,
"self": 0.0015624639999884948
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 208.29088830799805,
"count": 18201,
"is_parallel": true,
"self": 9.889641127983197,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.1315510000004565,
"count": 18201,
"is_parallel": true,
"self": 5.1315510000004565
},
"communicator.exchange": {
"total": 162.1858414810095,
"count": 18201,
"is_parallel": true,
"self": 162.1858414810095
},
"steps_from_proto": {
"total": 31.08385469900489,
"count": 18201,
"is_parallel": true,
"self": 5.708354824997826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.375499874007062,
"count": 182010,
"is_parallel": true,
"self": 25.375499874007062
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013495500002136396,
"count": 1,
"self": 0.00013495500002136396,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 387.98790113798935,
"count": 529734,
"is_parallel": true,
"self": 10.260942013995702,
"children": {
"process_trajectory": {
"total": 206.15342469899352,
"count": 529734,
"is_parallel": true,
"self": 205.5945608579935,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5588638410000044,
"count": 4,
"is_parallel": true,
"self": 0.5588638410000044
}
}
},
"_update_policy": {
"total": 171.5735344250001,
"count": 90,
"is_parallel": true,
"self": 53.046851470001,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.5266829549991,
"count": 4587,
"is_parallel": true,
"self": 118.5266829549991
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.06538091999993867,
"count": 1,
"self": 0.0008990300000277784,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06448188999991089,
"count": 1,
"self": 0.06448188999991089
}
}
}
}
}
}
}