lupoplon's picture
First Push
9df5c50 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8668443560600281,
"min": 0.8668443560600281,
"max": 2.863755702972412,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8286.1650390625,
"min": 8286.1650390625,
"max": 29327.72265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.863457679748535,
"min": 0.46224677562713623,
"max": 12.863457679748535,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2508.374267578125,
"min": 89.67587280273438,
"max": 2614.728759765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06866685939380926,
"min": 0.05915796499971561,
"max": 0.0760819138952202,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27466743757523704,
"min": 0.255905486122584,
"max": 0.38040956947610105,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18427617409650016,
"min": 0.1363371276620812,
"max": 0.2557823061943054,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7371046963860006,
"min": 0.5453485106483248,
"max": 1.231887838419746,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.347009730600001e-05,
"min": 1.347009730600001e-05,
"max": 0.00048647000270600005,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.388038922400004e-05,
"min": 5.388038922400004e-05,
"max": 0.00230860003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.931818181818183,
"min": 3.772727272727273,
"max": 25.931818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1141.0,
"min": 166.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.931818181818183,
"min": 3.772727272727273,
"max": 25.931818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1141.0,
"min": 166.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728301472",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ../config/ppo/SnowballTarget2.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1728302039"
},
"total": 567.1566313940002,
"count": 1,
"self": 0.5272754200004783,
"children": {
"run_training.setup": {
"total": 0.07268235199990158,
"count": 1,
"self": 0.07268235199990158
},
"TrainerController.start_learning": {
"total": 566.5566736219998,
"count": 1,
"self": 0.8787547879746853,
"children": {
"TrainerController._reset_env": {
"total": 2.4031638490005207,
"count": 1,
"self": 2.4031638490005207
},
"TrainerController.advance": {
"total": 563.1931225570243,
"count": 18203,
"self": 0.4416712740821822,
"children": {
"env_step": {
"total": 562.7514512829421,
"count": 18203,
"self": 425.4850428569134,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.85608094999498,
"count": 18203,
"self": 2.2701863251058967,
"children": {
"TorchPolicy.evaluate": {
"total": 134.58589462488908,
"count": 18203,
"self": 134.58589462488908
}
}
},
"workers": {
"total": 0.4103274760336717,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 564.7037608690362,
"count": 18203,
"is_parallel": true,
"self": 259.6087391550409,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0034653940001589945,
"count": 1,
"is_parallel": true,
"self": 0.001138624001214339,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023267699989446555,
"count": 10,
"is_parallel": true,
"self": 0.0023267699989446555
}
}
},
"UnityEnvironment.step": {
"total": 0.048433474999910686,
"count": 1,
"is_parallel": true,
"self": 0.0011167940001541865,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004689799998232047,
"count": 1,
"is_parallel": true,
"self": 0.0004689799998232047
},
"communicator.exchange": {
"total": 0.04414669200014032,
"count": 1,
"is_parallel": true,
"self": 0.04414669200014032
},
"steps_from_proto": {
"total": 0.0027010089997929754,
"count": 1,
"is_parallel": true,
"self": 0.0004508740012170165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002250134998575959,
"count": 10,
"is_parallel": true,
"self": 0.002250134998575959
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 305.09502171399527,
"count": 18202,
"is_parallel": true,
"self": 14.682019873879653,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.409966176977832,
"count": 18202,
"is_parallel": true,
"self": 7.409966176977832
},
"communicator.exchange": {
"total": 239.61788703007096,
"count": 18202,
"is_parallel": true,
"self": 239.61788703007096
},
"steps_from_proto": {
"total": 43.38514863306682,
"count": 18202,
"is_parallel": true,
"self": 8.557538232142178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.82761040092464,
"count": 182020,
"is_parallel": true,
"self": 34.82761040092464
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014916700001776917,
"count": 1,
"self": 0.00014916700001776917,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 556.5169647229022,
"count": 745308,
"is_parallel": true,
"self": 17.246539132979706,
"children": {
"process_trajectory": {
"total": 312.6194226019261,
"count": 745308,
"is_parallel": true,
"self": 301.6695909449236,
"children": {
"RLTrainer._checkpoint": {
"total": 10.949831657002505,
"count": 40,
"is_parallel": true,
"self": 10.949831657002505
}
}
},
"_update_policy": {
"total": 226.65100298799643,
"count": 90,
"is_parallel": true,
"self": 68.8151287230121,
"children": {
"TorchPPOOptimizer.update": {
"total": 157.83587426498434,
"count": 4584,
"is_parallel": true,
"self": 157.83587426498434
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08148326100035774,
"count": 1,
"self": 0.0008534940006938996,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08062976699966384,
"count": 1,
"self": 0.08062976699966384
}
}
}
}
}
}
}