ptoloudis's picture
First Push
1ebc0f3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7194647192955017,
"min": 0.7194647192955017,
"max": 2.8622305393218994,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6837.79296875,
"min": 6837.79296875,
"max": 29217.650390625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.119627952575684,
"min": 0.5556190013885498,
"max": 13.255434036254883,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2558.327392578125,
"min": 107.79008483886719,
"max": 2717.364013671875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07521530454287845,
"min": 0.06051951421606788,
"max": 0.07805147594555426,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3008612181715138,
"min": 0.24356117049704634,
"max": 0.3902573797277713,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18542270114024478,
"min": 0.12297080015631245,
"max": 0.3004791034641219,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7416908045609791,
"min": 0.4918832006252498,
"max": 1.4654213815343147,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.113636363636363,
"min": 3.590909090909091,
"max": 26.10909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1105.0,
"min": 158.0,
"max": 1436.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.113636363636363,
"min": 3.590909090909091,
"max": 26.10909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1105.0,
"min": 158.0,
"max": 1436.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743758317",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743758772"
},
"total": 454.73605424599987,
"count": 1,
"self": 0.4247488999999405,
"children": {
"run_training.setup": {
"total": 0.020548404000010123,
"count": 1,
"self": 0.020548404000010123
},
"TrainerController.start_learning": {
"total": 454.2907569419999,
"count": 1,
"self": 0.4770669470133271,
"children": {
"TrainerController._reset_env": {
"total": 2.5925442190000467,
"count": 1,
"self": 2.5925442190000467
},
"TrainerController.advance": {
"total": 451.13060916598647,
"count": 18192,
"self": 0.4578248550117223,
"children": {
"env_step": {
"total": 322.180561111991,
"count": 18192,
"self": 245.12905699197495,
"children": {
"SubprocessEnvManager._take_step": {
"total": 76.79175942900815,
"count": 18192,
"self": 1.3732451650025723,
"children": {
"TorchPolicy.evaluate": {
"total": 75.41851426400558,
"count": 18192,
"self": 75.41851426400558
}
}
},
"workers": {
"total": 0.25974469100788156,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 452.707638532989,
"count": 18192,
"is_parallel": true,
"self": 238.77242364098788,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002085001999830638,
"count": 1,
"is_parallel": true,
"self": 0.000658315000237053,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001426686999593585,
"count": 10,
"is_parallel": true,
"self": 0.001426686999593585
}
}
},
"UnityEnvironment.step": {
"total": 0.04687822400001096,
"count": 1,
"is_parallel": true,
"self": 0.0006168800000523333,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003647009998530848,
"count": 1,
"is_parallel": true,
"self": 0.0003647009998530848
},
"communicator.exchange": {
"total": 0.04406567400019412,
"count": 1,
"is_parallel": true,
"self": 0.04406567400019412
},
"steps_from_proto": {
"total": 0.0018309689999114198,
"count": 1,
"is_parallel": true,
"self": 0.0003984370000580384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014325319998533814,
"count": 10,
"is_parallel": true,
"self": 0.0014325319998533814
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 213.9352148920011,
"count": 18191,
"is_parallel": true,
"self": 9.862520310967284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.45416728000464,
"count": 18191,
"is_parallel": true,
"self": 5.45416728000464
},
"communicator.exchange": {
"total": 166.96984781003903,
"count": 18191,
"is_parallel": true,
"self": 166.96984781003903
},
"steps_from_proto": {
"total": 31.648679490990162,
"count": 18191,
"is_parallel": true,
"self": 5.87286216798384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.775817323006322,
"count": 181910,
"is_parallel": true,
"self": 25.775817323006322
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 128.49222319898377,
"count": 18192,
"self": 0.6051092199450068,
"children": {
"process_trajectory": {
"total": 27.503953281039458,
"count": 18192,
"self": 27.03931207803953,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4646412029999283,
"count": 4,
"self": 0.4646412029999283
}
}
},
"_update_policy": {
"total": 100.3831606979993,
"count": 90,
"self": 40.081343229001504,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.3018174689978,
"count": 4587,
"self": 60.3018174689978
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0090000159834744e-06,
"count": 1,
"self": 1.0090000159834744e-06
},
"TrainerController._save_models": {
"total": 0.09053560100005598,
"count": 1,
"self": 0.0012056269999902725,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0893299740000657,
"count": 1,
"self": 0.0893299740000657
}
}
}
}
}
}
}