GeerBox's picture
Second Push
a43e451 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9811392426490784,
"min": 0.9519923329353333,
"max": 0.9945018291473389,
"count": 3
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9411.087890625,
"min": 700.1292724609375,
"max": 9717.9375,
"count": 3
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 479960.0,
"max": 499952.0,
"count": 3
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 479960.0,
"max": 499952.0,
"count": 3
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 118.75331115722656,
"min": 117.22108459472656,
"max": 118.75331115722656,
"count": 3
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 24225.67578125,
"min": 820.547607421875,
"max": 24225.67578125,
"count": 3
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07010364909062856,
"min": 0.07010364909062856,
"max": 0.0752406652269094,
"count": 2
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3505182454531428,
"min": 0.3009626609076376,
"max": 0.3505182454531428,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 1.0401250626526628,
"min": 1.0401250626526628,
"max": 1.1791614168049658,
"count": 2
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 5.200625313263314,
"min": 4.716645667219863,
"max": 5.200625313263314,
"count": 2
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0352989647999984e-06,
"min": 1.0352989647999984e-06,
"max": 3.0152969848000013e-06,
"count": 2
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.176494823999992e-06,
"min": 5.176494823999992e-06,
"max": 1.2061187939200005e-05,
"count": 2
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10103520000000002,
"min": 0.10103520000000002,
"max": 0.1030152,
"count": 2
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5051760000000001,
"min": 0.4120608,
"max": 0.5051760000000001,
"count": 2
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.165647999999992e-05,
"min": 6.165647999999992e-05,
"max": 0.00016045848000000008,
"count": 2
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003082823999999996,
"min": 0.0003082823999999996,
"max": 0.0006418339200000003,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 2
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.818181818181817,
"min": 25.636363636363637,
"max": 25.818181818181817,
"count": 2
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1420.0,
"min": 1128.0,
"max": 1420.0,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.818181818181817,
"min": 25.636363636363637,
"max": 25.818181818181817,
"count": 2
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1420.0,
"min": 1128.0,
"max": 1420.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754451080",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1754451134"
},
"total": 54.574341497000205,
"count": 1,
"self": 0.47651181600031123,
"children": {
"run_training.setup": {
"total": 0.022783994999826973,
"count": 1,
"self": 0.022783994999826973
},
"TrainerController.start_learning": {
"total": 54.07504568600007,
"count": 1,
"self": 0.04649125301011736,
"children": {
"TrainerController._reset_env": {
"total": 2.255951511999683,
"count": 1,
"self": 2.255951511999683
},
"TrainerController.advance": {
"total": 51.66140153299011,
"count": 1864,
"self": 0.04556924597454781,
"children": {
"env_step": {
"total": 35.19539097300412,
"count": 1864,
"self": 26.57316563000677,
"children": {
"SubprocessEnvManager._take_step": {
"total": 8.594578837008157,
"count": 1864,
"self": 0.14635792001445225,
"children": {
"TorchPolicy.evaluate": {
"total": 8.448220916993705,
"count": 1864,
"self": 8.448220916993705
}
}
},
"workers": {
"total": 0.027646505989196157,
"count": 1864,
"self": 0.0,
"children": {
"worker_root": {
"total": 53.47082080098744,
"count": 1864,
"is_parallel": true,
"self": 30.205245973983892,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002046512000106304,
"count": 1,
"is_parallel": true,
"self": 0.000671051000153966,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013754609999523382,
"count": 10,
"is_parallel": true,
"self": 0.0013754609999523382
}
}
},
"UnityEnvironment.step": {
"total": 0.0355443829998876,
"count": 1,
"is_parallel": true,
"self": 0.0005598339994321577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003971500000261585,
"count": 1,
"is_parallel": true,
"self": 0.0003971500000261585
},
"communicator.exchange": {
"total": 0.03277499900013936,
"count": 1,
"is_parallel": true,
"self": 0.03277499900013936
},
"steps_from_proto": {
"total": 0.0018124000002899265,
"count": 1,
"is_parallel": true,
"self": 0.00034252600016770884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014698740001222177,
"count": 10,
"is_parallel": true,
"self": 0.0014698740001222177
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 23.265574827003547,
"count": 1863,
"is_parallel": true,
"self": 1.086245167006382,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5792968029982148,
"count": 1863,
"is_parallel": true,
"self": 0.5792968029982148
},
"communicator.exchange": {
"total": 18.181196178997652,
"count": 1863,
"is_parallel": true,
"self": 18.181196178997652
},
"steps_from_proto": {
"total": 3.4188366780012984,
"count": 1863,
"is_parallel": true,
"self": 0.6422251270219022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.7766115509793963,
"count": 18630,
"is_parallel": true,
"self": 2.7766115509793963
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 16.42044131401144,
"count": 1864,
"self": 0.0626499330014667,
"children": {
"process_trajectory": {
"total": 5.32722642500994,
"count": 1864,
"self": 5.188741588009634,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13848483700030556,
"count": 1,
"self": 0.13848483700030556
}
}
},
"_update_policy": {
"total": 11.030564956000035,
"count": 9,
"self": 4.442429172993798,
"children": {
"TorchPPOOptimizer.update": {
"total": 6.588135783006237,
"count": 456,
"self": 6.588135783006237
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0120002116309479e-06,
"count": 1,
"self": 1.0120002116309479e-06
},
"TrainerController._save_models": {
"total": 0.11120037599994248,
"count": 1,
"self": 0.0010221789998468012,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11017819700009568,
"count": 1,
"self": 0.11017819700009568
}
}
}
}
}
}
}