aalva's picture
First Push
029b267 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9122512936592102,
"min": 0.8648973703384399,
"max": 2.8469982147216797,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8750.314453125,
"min": 8257.0712890625,
"max": 28072.05078125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9992.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9992.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.908886909484863,
"min": 0.29201480746269226,
"max": 12.992841720581055,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2646.32177734375,
"min": 34.74976348876953,
"max": 2646.32177734375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07000340311864348,
"min": 0.06074473659842339,
"max": 0.07354217232033775,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3500170155932174,
"min": 0.14551974174979726,
"max": 0.3625160762948879,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2071649559572631,
"min": 0.11846387059446059,
"max": 0.3248487457486929,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0358247797863156,
"min": 0.23692774118892118,
"max": 1.56613420242188,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.620097459999994e-06,
"min": 7.620097459999994e-06,
"max": 0.00028977000341,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.810048729999997e-05,
"min": 3.810048729999997e-05,
"max": 0.0013911000362999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10254,
"min": 0.10254,
"max": 0.19659000000000001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5127,
"min": 0.39318000000000003,
"max": 0.9637000000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001367459999999999,
"min": 0.0001367459999999999,
"max": 0.0048298410000000005,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0006837299999999995,
"min": 0.0006837299999999995,
"max": 0.023188630000000002,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 4378.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.181818181818183,
"min": 3.3181818181818183,
"max": 25.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1385.0,
"min": 73.0,
"max": 1387.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.181818181818183,
"min": 3.3181818181818183,
"max": 25.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1385.0,
"min": 73.0,
"max": 1387.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745268644",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1745269069"
},
"total": 425.52263586599986,
"count": 1,
"self": 0.4250566009998238,
"children": {
"run_training.setup": {
"total": 0.01978913499999635,
"count": 1,
"self": 0.01978913499999635
},
"TrainerController.start_learning": {
"total": 425.07779013000004,
"count": 1,
"self": 0.3736697190106497,
"children": {
"TrainerController._reset_env": {
"total": 1.9940492760000552,
"count": 1,
"self": 1.9940492760000552
},
"TrainerController.advance": {
"total": 422.6217971019893,
"count": 17864,
"self": 0.374020807000079,
"children": {
"env_step": {
"total": 298.4061487159962,
"count": 17864,
"self": 227.48556105400974,
"children": {
"SubprocessEnvManager._take_step": {
"total": 70.70688770899073,
"count": 17864,
"self": 1.2646937879876532,
"children": {
"TorchPolicy.evaluate": {
"total": 69.44219392100308,
"count": 17864,
"self": 69.44219392100308
}
}
},
"workers": {
"total": 0.2136999529957393,
"count": 17864,
"self": 0.0,
"children": {
"worker_root": {
"total": 423.6309970400026,
"count": 17864,
"is_parallel": true,
"self": 223.9017756430095,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002023345999987214,
"count": 1,
"is_parallel": true,
"self": 0.0006687999999712702,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013545460000159437,
"count": 10,
"is_parallel": true,
"self": 0.0013545460000159437
}
}
},
"UnityEnvironment.step": {
"total": 0.034845172000018465,
"count": 1,
"is_parallel": true,
"self": 0.0005958750000445434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041200199996183073,
"count": 1,
"is_parallel": true,
"self": 0.00041200199996183073
},
"communicator.exchange": {
"total": 0.032075327000029574,
"count": 1,
"is_parallel": true,
"self": 0.032075327000029574
},
"steps_from_proto": {
"total": 0.0017619679999825166,
"count": 1,
"is_parallel": true,
"self": 0.00033606599981794716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014259020001645695,
"count": 10,
"is_parallel": true,
"self": 0.0014259020001645695
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 199.72922139699313,
"count": 17863,
"is_parallel": true,
"self": 9.523106293985506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.240348837008128,
"count": 17863,
"is_parallel": true,
"self": 5.240348837008128
},
"communicator.exchange": {
"total": 154.63243771900454,
"count": 17863,
"is_parallel": true,
"self": 154.63243771900454
},
"steps_from_proto": {
"total": 30.33332854699495,
"count": 17863,
"is_parallel": true,
"self": 5.4884487269885085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.84487982000644,
"count": 178630,
"is_parallel": true,
"self": 24.84487982000644
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 123.84162757899298,
"count": 17864,
"self": 0.46947275199852356,
"children": {
"process_trajectory": {
"total": 26.44300769099584,
"count": 17864,
"self": 26.03020795499583,
"children": {
"RLTrainer._checkpoint": {
"total": 0.41279973600001085,
"count": 4,
"self": 0.41279973600001085
}
}
},
"_update_policy": {
"total": 96.92914713599862,
"count": 89,
"self": 39.34455579400537,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.584591341993246,
"count": 4536,
"self": 57.584591341993246
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.290001798945013e-07,
"count": 1,
"self": 9.290001798945013e-07
},
"TrainerController._save_models": {
"total": 0.08827310399988164,
"count": 1,
"self": 0.0010468959999343497,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08722620799994729,
"count": 1,
"self": 0.08722620799994729
}
}
}
}
}
}
}