bk1021's picture
First Push
4994117 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1377358436584473,
"min": 1.1377358436584473,
"max": 2.8773391246795654,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10813.0419921875,
"min": 10813.0419921875,
"max": 29371.87890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.927663803100586,
"min": 0.17838409543037415,
"max": 11.927663803100586,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2325.89453125,
"min": 34.60651397705078,
"max": 2379.1865234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.15909090909091,
"min": 2.7954545454545454,
"max": 25.15909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1107.0,
"min": 123.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.15909090909091,
"min": 2.7954545454545454,
"max": 25.15909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1107.0,
"min": 123.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07255346874230514,
"min": 0.0648029163366671,
"max": 0.07440038961930459,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.1451069374846103,
"min": 0.1296058326733342,
"max": 0.22013614902714512,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24449044122707608,
"min": 0.10299610608078615,
"max": 0.2911515620847543,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.48898088245415217,
"min": 0.2059922121615723,
"max": 0.8207812068216942,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432097856000005e-06,
"min": 6.432097856000005e-06,
"max": 0.000290232003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.286419571200001e-05,
"min": 1.286419571200001e-05,
"max": 0.0007419960526679999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10214399999999998,
"min": 0.10214399999999998,
"max": 0.19674400000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20428799999999997,
"min": 0.20428799999999997,
"max": 0.5473319999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000007,
"min": 0.00011698560000000007,
"max": 0.004837525599999998,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000015,
"min": 0.00023397120000000015,
"max": 0.0123718668,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1769049087",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1769049651"
},
"total": 564.237660576,
"count": 1,
"self": 0.584483868999996,
"children": {
"run_training.setup": {
"total": 0.043507446000148775,
"count": 1,
"self": 0.043507446000148775
},
"TrainerController.start_learning": {
"total": 563.6096692609999,
"count": 1,
"self": 0.6285329659929175,
"children": {
"TrainerController._reset_env": {
"total": 4.001125599000034,
"count": 1,
"self": 4.001125599000034
},
"TrainerController.advance": {
"total": 558.9086515640072,
"count": 18192,
"self": 0.6480796380135416,
"children": {
"env_step": {
"total": 398.82924888800744,
"count": 18192,
"self": 344.2249761300054,
"children": {
"SubprocessEnvManager._take_step": {
"total": 54.22442971200667,
"count": 18192,
"self": 1.8558513240163848,
"children": {
"TorchPolicy.evaluate": {
"total": 52.36857838799028,
"count": 18192,
"self": 52.36857838799028
}
}
},
"workers": {
"total": 0.3798430459953579,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 561.1159445880032,
"count": 18192,
"is_parallel": true,
"self": 263.07580192001456,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008256684000116365,
"count": 1,
"is_parallel": true,
"self": 0.005639510000264636,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026171739998517296,
"count": 10,
"is_parallel": true,
"self": 0.0026171739998517296
}
}
},
"UnityEnvironment.step": {
"total": 0.04458825100005015,
"count": 1,
"is_parallel": true,
"self": 0.0007081480002852913,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045755699989058485,
"count": 1,
"is_parallel": true,
"self": 0.00045755699989058485
},
"communicator.exchange": {
"total": 0.04109670899993034,
"count": 1,
"is_parallel": true,
"self": 0.04109670899993034
},
"steps_from_proto": {
"total": 0.002325836999943931,
"count": 1,
"is_parallel": true,
"self": 0.000426769000114291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00189906799982964,
"count": 10,
"is_parallel": true,
"self": 0.00189906799982964
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 298.0401426679887,
"count": 18191,
"is_parallel": true,
"self": 13.82201082198344,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.213656643982176,
"count": 18191,
"is_parallel": true,
"self": 7.213656643982176
},
"communicator.exchange": {
"total": 229.92215247299555,
"count": 18191,
"is_parallel": true,
"self": 229.92215247299555
},
"steps_from_proto": {
"total": 47.08232272902751,
"count": 18191,
"is_parallel": true,
"self": 8.365411665030251,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.71691106399726,
"count": 181910,
"is_parallel": true,
"self": 38.71691106399726
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 159.4313230379862,
"count": 18192,
"self": 0.8177544479951848,
"children": {
"process_trajectory": {
"total": 29.138542309991408,
"count": 18192,
"self": 28.72065214799113,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4178901620002762,
"count": 4,
"self": 0.4178901620002762
}
}
},
"_update_policy": {
"total": 129.4750262799996,
"count": 45,
"self": 48.881330600004276,
"children": {
"TorchPPOOptimizer.update": {
"total": 80.59369567999534,
"count": 4587,
"self": 80.59369567999534
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1249999261053745e-06,
"count": 1,
"self": 1.1249999261053745e-06
},
"TrainerController._save_models": {
"total": 0.07135800699984429,
"count": 1,
"self": 0.0009340699998574564,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07042393699998684,
"count": 1,
"self": 0.07042393699998684
}
}
}
}
}
}
}