JoshOng's picture
First Push
9d4efc7 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0618977546691895,
"min": 1.0618977546691895,
"max": 2.8673129081726074,
"count": 15
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10933.298828125,
"min": 10373.2099609375,
"max": 29269.53125,
"count": 15
},
"SnowballTarget.Step.mean": {
"value": 149984.0,
"min": 9952.0,
"max": 149984.0,
"count": 15
},
"SnowballTarget.Step.sum": {
"value": 149984.0,
"min": 9952.0,
"max": 149984.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.787035942077637,
"min": 0.45938384532928467,
"max": 11.787035942077637,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2416.34228515625,
"min": 89.12046813964844,
"max": 2416.34228515625,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07720615142572891,
"min": 0.06575973280614204,
"max": 0.07720615142572891,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.38603075712864454,
"min": 0.2633494075073709,
"max": 0.38603075712864454,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.23064172887334636,
"min": 0.13838361714071795,
"max": 0.27176668585515495,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.1532086443667318,
"min": 0.5535344685628718,
"max": 1.3588334292757747,
"count": 15
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.776096741333334e-06,
"min": 9.776096741333334e-06,
"max": 0.000289176003608,
"count": 15
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.888048370666667e-05,
"min": 4.888048370666667e-05,
"max": 0.0013468800510399999,
"count": 15
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10325866666666668,
"min": 0.10325866666666668,
"max": 0.19639199999999998,
"count": 15
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5162933333333334,
"min": 0.43943466666666664,
"max": 0.94896,
"count": 15
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00017260746666666673,
"min": 0.00017260746666666673,
"max": 0.004819960799999999,
"count": 15
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0008630373333333337,
"min": 0.0008630373333333337,
"max": 0.022453104,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.0,
"min": 3.8863636363636362,
"max": 24.0,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1320.0,
"min": 171.0,
"max": 1320.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.0,
"min": 3.8863636363636362,
"max": 24.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1320.0,
"min": 171.0,
"max": 1320.0,
"count": 15
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1760281046",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/content/py310/lib/python3.10/site-packages/mlagents/trainers/learn.py ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1760281393"
},
"total": 346.603127953,
"count": 1,
"self": 0.48183957299966096,
"children": {
"run_training.setup": {
"total": 0.028237930000159395,
"count": 1,
"self": 0.028237930000159395
},
"TrainerController.start_learning": {
"total": 346.0930504500002,
"count": 1,
"self": 0.2894833700095205,
"children": {
"TrainerController._reset_env": {
"total": 3.348158010000134,
"count": 1,
"self": 3.348158010000134
},
"TrainerController.advance": {
"total": 342.36852956699045,
"count": 13664,
"self": 0.30946491894906103,
"children": {
"env_step": {
"total": 248.74095293403138,
"count": 13664,
"self": 193.11242298604634,
"children": {
"SubprocessEnvManager._take_step": {
"total": 55.449152949999416,
"count": 13664,
"self": 0.9700904329915829,
"children": {
"TorchPolicy.evaluate": {
"total": 54.47906251700783,
"count": 13664,
"self": 54.47906251700783
}
}
},
"workers": {
"total": 0.1793769979856279,
"count": 13664,
"self": 0.0,
"children": {
"worker_root": {
"total": 344.93564018199936,
"count": 13664,
"is_parallel": true,
"self": 174.61963952400652,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004929758000116635,
"count": 1,
"is_parallel": true,
"self": 0.003565983000498818,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013637749996178172,
"count": 10,
"is_parallel": true,
"self": 0.0013637749996178172
}
}
},
"UnityEnvironment.step": {
"total": 0.037936142000035034,
"count": 1,
"is_parallel": true,
"self": 0.0006660270000793389,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045550899994850624,
"count": 1,
"is_parallel": true,
"self": 0.00045550899994850624
},
"communicator.exchange": {
"total": 0.03463684700000158,
"count": 1,
"is_parallel": true,
"self": 0.03463684700000158
},
"steps_from_proto": {
"total": 0.002177759000005608,
"count": 1,
"is_parallel": true,
"self": 0.0005691999992905039,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001608559000715104,
"count": 10,
"is_parallel": true,
"self": 0.001608559000715104
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 170.31600065799284,
"count": 13663,
"is_parallel": true,
"self": 7.7721765459873495,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.347323674000563,
"count": 13663,
"is_parallel": true,
"self": 4.347323674000563
},
"communicator.exchange": {
"total": 129.05459598699713,
"count": 13663,
"is_parallel": true,
"self": 129.05459598699713
},
"steps_from_proto": {
"total": 29.141904451007804,
"count": 13663,
"is_parallel": true,
"self": 5.37606366399018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.765840787017623,
"count": 136630,
"is_parallel": true,
"self": 23.765840787017623
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 93.31811171401,
"count": 13664,
"self": 0.3651575950327697,
"children": {
"process_trajectory": {
"total": 20.372906001977753,
"count": 13664,
"self": 20.048028285978035,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32487771599971893,
"count": 3,
"self": 0.32487771599971893
}
}
},
"_update_policy": {
"total": 72.58004811699948,
"count": 68,
"self": 29.484737364995908,
"children": {
"TorchPPOOptimizer.update": {
"total": 43.09531075200357,
"count": 3465,
"self": 43.09531075200357
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.770001270226203e-07,
"count": 1,
"self": 9.770001270226203e-07
},
"TrainerController._save_models": {
"total": 0.08687852599996404,
"count": 1,
"self": 0.0008516220000274188,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08602690399993662,
"count": 1,
"self": 0.08602690399993662
}
}
}
}
}
}
}