JunhaoYu's picture
First Push
feb5084 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9118564128875732,
"min": 0.9118564128875732,
"max": 2.832993268966675,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8666.283203125,
"min": 8666.283203125,
"max": 28919.1953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.837214469909668,
"min": 0.4206913709640503,
"max": 12.837214469909668,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2503.2568359375,
"min": 81.61412811279297,
"max": 2582.41943359375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06743148619787855,
"min": 0.06373934725507199,
"max": 0.0774532049970231,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2697259447915142,
"min": 0.25495738902028797,
"max": 0.36770317113077194,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22849008893849804,
"min": 0.14344039291837343,
"max": 0.31220642597067594,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9139603557539921,
"min": 0.5737615716734937,
"max": 1.5610321298533796,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.431818181818183,
"min": 4.045454545454546,
"max": 25.431818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1119.0,
"min": 178.0,
"max": 1382.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.431818181818183,
"min": 4.045454545454546,
"max": 25.431818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1119.0,
"min": 178.0,
"max": 1382.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1759650763",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1759651218"
},
"total": 455.38095185200007,
"count": 1,
"self": 0.4816686600000821,
"children": {
"run_training.setup": {
"total": 0.039217625999981465,
"count": 1,
"self": 0.039217625999981465
},
"TrainerController.start_learning": {
"total": 454.860065566,
"count": 1,
"self": 0.3753794420177883,
"children": {
"TrainerController._reset_env": {
"total": 2.9946864200001073,
"count": 1,
"self": 2.9946864200001073
},
"TrainerController.advance": {
"total": 451.40460073598217,
"count": 18192,
"self": 0.3872321769838436,
"children": {
"env_step": {
"total": 330.45870954800057,
"count": 18192,
"self": 259.2618714820112,
"children": {
"SubprocessEnvManager._take_step": {
"total": 70.96882669299055,
"count": 18192,
"self": 1.3060785800000758,
"children": {
"TorchPolicy.evaluate": {
"total": 69.66274811299047,
"count": 18192,
"self": 69.66274811299047
}
}
},
"workers": {
"total": 0.22801137299882157,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 453.0523103810017,
"count": 18192,
"is_parallel": true,
"self": 225.3879754229929,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004458638999949471,
"count": 1,
"is_parallel": true,
"self": 0.002993617000015547,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014650219999339242,
"count": 10,
"is_parallel": true,
"self": 0.0014650219999339242
}
}
},
"UnityEnvironment.step": {
"total": 0.03814949100001286,
"count": 1,
"is_parallel": true,
"self": 0.00065284199990856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040918700005931896,
"count": 1,
"is_parallel": true,
"self": 0.00040918700005931896
},
"communicator.exchange": {
"total": 0.03497177600002033,
"count": 1,
"is_parallel": true,
"self": 0.03497177600002033
},
"steps_from_proto": {
"total": 0.0021156860000246525,
"count": 1,
"is_parallel": true,
"self": 0.000421807999828161,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016938780001964915,
"count": 10,
"is_parallel": true,
"self": 0.0016938780001964915
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 227.6643349580088,
"count": 18191,
"is_parallel": true,
"self": 10.71451547098286,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.732321902014519,
"count": 18191,
"is_parallel": true,
"self": 5.732321902014519
},
"communicator.exchange": {
"total": 172.985250876004,
"count": 18191,
"is_parallel": true,
"self": 172.985250876004
},
"steps_from_proto": {
"total": 38.23224670900743,
"count": 18191,
"is_parallel": true,
"self": 6.813124824983333,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.419121884024094,
"count": 181910,
"is_parallel": true,
"self": 31.419121884024094
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 120.55865901099776,
"count": 18192,
"self": 0.4549206459961397,
"children": {
"process_trajectory": {
"total": 27.195670438002594,
"count": 18192,
"self": 26.803068156002382,
"children": {
"RLTrainer._checkpoint": {
"total": 0.39260228200021174,
"count": 4,
"self": 0.39260228200021174
}
}
},
"_update_policy": {
"total": 92.90806792699902,
"count": 90,
"self": 38.66591609599777,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.24215183100125,
"count": 4587,
"self": 54.24215183100125
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0179999208048685e-06,
"count": 1,
"self": 1.0179999208048685e-06
},
"TrainerController._save_models": {
"total": 0.08539795000001504,
"count": 1,
"self": 0.0007496550001633295,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08464829499985171,
"count": 1,
"self": 0.08464829499985171
}
}
}
}
}
}
}