LoxKing's picture
First Push
efe75b2 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.4985753893852234,
"min": 0.46304672956466675,
"max": 2.8018839359283447,
"count": 75
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9871.79296875,
"min": 9168.3251953125,
"max": 57449.828125,
"count": 75
},
"SnowballTarget.Step.mean": {
"value": 1499992.0,
"min": 19992.0,
"max": 1499992.0,
"count": 75
},
"SnowballTarget.Step.sum": {
"value": 1499992.0,
"min": 19992.0,
"max": 1499992.0,
"count": 75
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.304326057434082,
"min": 0.7332926392555237,
"max": 14.308523178100586,
"count": 75
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5721.73046875,
"min": 292.5837707519531,
"max": 5792.43896484375,
"count": 75
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06880019677030053,
"min": 0.062238117414595125,
"max": 0.07444671285630372,
"count": 75
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.6192017709327047,
"min": 0.5601430567313561,
"max": 0.7062532744802964,
"count": 75
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15539597731270302,
"min": 0.13809698708111942,
"max": 0.2905109069575931,
"count": 75
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.398563795814327,
"min": 1.2428728837300749,
"max": 2.6145981626183374,
"count": 75
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.137699287466669e-06,
"min": 2.137699287466669e-06,
"max": 0.00029781760072746666,
"count": 75
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.9239293587200022e-05,
"min": 1.9239293587200022e-05,
"max": 0.0026803584065472,
"count": 75
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10071253333333334,
"min": 0.10071253333333334,
"max": 0.19927253333333333,
"count": 75
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9064128,
"min": 0.9064128,
"max": 1.859992,
"count": 75
},
"SnowballTarget.Policy.Beta.mean": {
"value": 4.555541333333338e-05,
"min": 4.555541333333338e-05,
"max": 0.004963699413333333,
"count": 75
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00040999872000000044,
"min": 0.00040999872000000044,
"max": 0.04467329472,
"count": 75
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 75
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 21890.0,
"count": 75
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.949494949494948,
"min": 4.424242424242424,
"max": 28.050505050505052,
"count": 75
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2767.0,
"min": 438.0,
"max": 3038.0,
"count": 75
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.949494949494948,
"min": 4.424242424242424,
"max": 28.050505050505052,
"count": 75
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2767.0,
"min": 438.0,
"max": 3038.0,
"count": 75
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 75
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744225242",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744228428"
},
"total": 3185.677139519,
"count": 1,
"self": 0.4327775710003152,
"children": {
"run_training.setup": {
"total": 0.029538844999933644,
"count": 1,
"self": 0.029538844999933644
},
"TrainerController.start_learning": {
"total": 3185.2148231029996,
"count": 1,
"self": 2.561118526984046,
"children": {
"TrainerController._reset_env": {
"total": 3.373048165,
"count": 1,
"self": 3.373048165
},
"TrainerController.advance": {
"total": 3179.192648567016,
"count": 136392,
"self": 2.7793952292154245,
"children": {
"env_step": {
"total": 2257.266501999906,
"count": 136392,
"self": 1719.6759252059464,
"children": {
"SubprocessEnvManager._take_step": {
"total": 536.0487083010185,
"count": 136392,
"self": 9.287196705972292,
"children": {
"TorchPolicy.evaluate": {
"total": 526.7615115950462,
"count": 136392,
"self": 526.7615115950462
}
}
},
"workers": {
"total": 1.541868492940921,
"count": 136392,
"self": 0.0,
"children": {
"worker_root": {
"total": 3176.084010152001,
"count": 136392,
"is_parallel": true,
"self": 1665.9977284289366,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006903885000156151,
"count": 1,
"is_parallel": true,
"self": 0.0044072950004192535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002496589999736898,
"count": 10,
"is_parallel": true,
"self": 0.002496589999736898
}
}
},
"UnityEnvironment.step": {
"total": 0.03599983499998416,
"count": 1,
"is_parallel": true,
"self": 0.0005505949998223514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004153480001605203,
"count": 1,
"is_parallel": true,
"self": 0.0004153480001605203
},
"communicator.exchange": {
"total": 0.033193868999887854,
"count": 1,
"is_parallel": true,
"self": 0.033193868999887854
},
"steps_from_proto": {
"total": 0.0018400230001134332,
"count": 1,
"is_parallel": true,
"self": 0.000343156000099043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014968670000143902,
"count": 10,
"is_parallel": true,
"self": 0.0014968670000143902
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1510.0862817230645,
"count": 136391,
"is_parallel": true,
"self": 73.33208243930653,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 40.66284399184519,
"count": 136391,
"is_parallel": true,
"self": 40.66284399184519
},
"communicator.exchange": {
"total": 1157.8840263219997,
"count": 136391,
"is_parallel": true,
"self": 1157.8840263219997
},
"steps_from_proto": {
"total": 238.20732896991308,
"count": 136391,
"is_parallel": true,
"self": 42.76381879592623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 195.44351017398685,
"count": 1363910,
"is_parallel": true,
"self": 195.44351017398685
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 919.1467513378948,
"count": 136392,
"self": 3.221576735858889,
"children": {
"process_trajectory": {
"total": 199.52636471104097,
"count": 136392,
"self": 197.9970469720413,
"children": {
"RLTrainer._checkpoint": {
"total": 1.52931773899968,
"count": 15,
"self": 1.52931773899968
}
}
},
"_update_policy": {
"total": 716.3988098909949,
"count": 681,
"self": 287.86309445096254,
"children": {
"TorchPPOOptimizer.update": {
"total": 428.5357154400324,
"count": 34728,
"self": 428.5357154400324
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.719998731976375e-07,
"count": 1,
"self": 8.719998731976375e-07
},
"TrainerController._save_models": {
"total": 0.08800697199967544,
"count": 1,
"self": 0.0009391049989062594,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08706786700076918,
"count": 1,
"self": 0.08706786700076918
}
}
}
}
}
}
}