otski's picture
First Push
ac40b0e verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8749735951423645,
"min": 0.8749735951423645,
"max": 2.8635120391845703,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8315.7490234375,
"min": 8315.7490234375,
"max": 29230.73046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.047510147094727,
"min": 0.26942193508148193,
"max": 13.047510147094727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2544.264404296875,
"min": 52.267852783203125,
"max": 2643.146240234375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.068218174416674,
"min": 0.06318738936976183,
"max": 0.07303378279417909,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.272872697666696,
"min": 0.2593386250065997,
"max": 0.35502652981085703,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18483528513096126,
"min": 0.11620438947141462,
"max": 0.30221911731596085,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.739341140523845,
"min": 0.4648175578856585,
"max": 1.399535254812708,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.0,
"min": 3.25,
"max": 26.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1144.0,
"min": 143.0,
"max": 1428.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.0,
"min": 3.25,
"max": 26.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1144.0,
"min": 143.0,
"max": 1428.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742752738",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742753207"
},
"total": 469.20625012100004,
"count": 1,
"self": 0.4420170080000503,
"children": {
"run_training.setup": {
"total": 0.023814107000021068,
"count": 1,
"self": 0.023814107000021068
},
"TrainerController.start_learning": {
"total": 468.74041900599997,
"count": 1,
"self": 0.41065170000098306,
"children": {
"TrainerController._reset_env": {
"total": 3.713666559000046,
"count": 1,
"self": 3.713666559000046
},
"TrainerController.advance": {
"total": 464.520920063999,
"count": 18192,
"self": 0.4409110289948899,
"children": {
"env_step": {
"total": 330.79409583200106,
"count": 18192,
"self": 252.5366393090054,
"children": {
"SubprocessEnvManager._take_step": {
"total": 78.01549027399403,
"count": 18192,
"self": 1.4602733730007458,
"children": {
"TorchPolicy.evaluate": {
"total": 76.55521690099329,
"count": 18192,
"self": 76.55521690099329
}
}
},
"workers": {
"total": 0.24196624900162078,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 467.1518117280013,
"count": 18192,
"is_parallel": true,
"self": 246.18013512200588,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006575312000052236,
"count": 1,
"is_parallel": true,
"self": 0.004735902000106762,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018394099999454738,
"count": 10,
"is_parallel": true,
"self": 0.0018394099999454738
}
}
},
"UnityEnvironment.step": {
"total": 0.06220068699997228,
"count": 1,
"is_parallel": true,
"self": 0.0007215869999868119,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003872240000077909,
"count": 1,
"is_parallel": true,
"self": 0.0003872240000077909
},
"communicator.exchange": {
"total": 0.0592106429999717,
"count": 1,
"is_parallel": true,
"self": 0.0592106429999717
},
"steps_from_proto": {
"total": 0.0018812330000059774,
"count": 1,
"is_parallel": true,
"self": 0.00043897199998355063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014422610000224267,
"count": 10,
"is_parallel": true,
"self": 0.0014422610000224267
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 220.9716766059954,
"count": 18191,
"is_parallel": true,
"self": 10.76474504600219,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.683419834997494,
"count": 18191,
"is_parallel": true,
"self": 5.683419834997494
},
"communicator.exchange": {
"total": 171.1026856860003,
"count": 18191,
"is_parallel": true,
"self": 171.1026856860003
},
"steps_from_proto": {
"total": 33.420826038995415,
"count": 18191,
"is_parallel": true,
"self": 6.156460531989012,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.264365507006403,
"count": 181910,
"is_parallel": true,
"self": 27.264365507006403
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 133.28591320300302,
"count": 18192,
"self": 0.5376427119995242,
"children": {
"process_trajectory": {
"total": 28.944250969002837,
"count": 18192,
"self": 28.40225508500282,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5419958840000163,
"count": 4,
"self": 0.5419958840000163
}
}
},
"_update_policy": {
"total": 103.80401952200066,
"count": 90,
"self": 40.98957715399274,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.81444236800792,
"count": 4587,
"self": 62.81444236800792
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.030000001075678e-07,
"count": 1,
"self": 9.030000001075678e-07
},
"TrainerController._save_models": {
"total": 0.09517977999996674,
"count": 1,
"self": 0.0010267879999901197,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09415299199997662,
"count": 1,
"self": 0.09415299199997662
}
}
}
}
}
}
}