Charles0831's picture
First Push
e7baea4 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7798912525177002,
"min": 0.7317385673522949,
"max": 2.868882417678833,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8012.6025390625,
"min": 7143.69970703125,
"max": 29348.66796875,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.326889991760254,
"min": 0.2520301938056946,
"max": 13.365326881408691,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2718.685546875,
"min": 48.893856048583984,
"max": 2730.86279296875,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0711754250907403,
"min": 0.06030058827189565,
"max": 0.07471460248925724,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.35587712545370154,
"min": 0.2495502715219296,
"max": 0.3735730124462862,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19849113301903593,
"min": 0.08895471693112461,
"max": 0.29364358137051266,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9924556650951797,
"min": 0.35581886772449844,
"max": 1.3565708895524344,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333331e-06,
"min": 5.288098237333331e-06,
"max": 0.00029458800180399996,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6440491186666655e-05,
"min": 2.6440491186666655e-05,
"max": 0.0014234400255199997,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666667,
"min": 0.10176266666666667,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333333,
"min": 0.42025066666666666,
"max": 0.97448,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804000000006,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999998,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.654545454545456,
"min": 2.8181818181818183,
"max": 26.90909090909091,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1411.0,
"min": 124.0,
"max": 1452.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.654545454545456,
"min": 2.8181818181818183,
"max": 26.90909090909091,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1411.0,
"min": 124.0,
"max": 1452.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722499307",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722500105"
},
"total": 797.6435648949999,
"count": 1,
"self": 0.4879158629998983,
"children": {
"run_training.setup": {
"total": 0.0639951299999666,
"count": 1,
"self": 0.0639951299999666
},
"TrainerController.start_learning": {
"total": 797.0916539020001,
"count": 1,
"self": 1.1205131260157941,
"children": {
"TrainerController._reset_env": {
"total": 2.9180555550000236,
"count": 1,
"self": 2.9180555550000236
},
"TrainerController.advance": {
"total": 792.9540435119843,
"count": 27334,
"self": 0.5225153579847301,
"children": {
"env_step": {
"total": 792.4315281539996,
"count": 27334,
"self": 521.3855595940139,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.49822138300374,
"count": 27334,
"self": 2.7050272200043537,
"children": {
"TorchPolicy.evaluate": {
"total": 267.7931941629994,
"count": 27334,
"self": 267.7931941629994
}
}
},
"workers": {
"total": 0.5477471769819431,
"count": 27334,
"self": 0.0,
"children": {
"worker_root": {
"total": 794.7759077039967,
"count": 27334,
"is_parallel": true,
"self": 397.08989565800994,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005545482000002266,
"count": 1,
"is_parallel": true,
"self": 0.0037723800001003838,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017731019999018827,
"count": 10,
"is_parallel": true,
"self": 0.0017731019999018827
}
}
},
"UnityEnvironment.step": {
"total": 0.04171406299997216,
"count": 1,
"is_parallel": true,
"self": 0.0007029589999092423,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047164000000066153,
"count": 1,
"is_parallel": true,
"self": 0.00047164000000066153
},
"communicator.exchange": {
"total": 0.03824911800006703,
"count": 1,
"is_parallel": true,
"self": 0.03824911800006703
},
"steps_from_proto": {
"total": 0.0022903459999952247,
"count": 1,
"is_parallel": true,
"self": 0.0005079219999970519,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017824239999981728,
"count": 10,
"is_parallel": true,
"self": 0.0017824239999981728
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 397.6860120459868,
"count": 27333,
"is_parallel": true,
"self": 17.60987992996536,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.460063297000033,
"count": 27333,
"is_parallel": true,
"self": 9.460063297000033
},
"communicator.exchange": {
"total": 310.23227515699784,
"count": 27333,
"is_parallel": true,
"self": 310.23227515699784
},
"steps_from_proto": {
"total": 60.38379366202355,
"count": 27333,
"is_parallel": true,
"self": 11.729539792002583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 48.654253870020966,
"count": 273330,
"is_parallel": true,
"self": 48.654253870020966
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0004901190000055067,
"count": 1,
"self": 0.0004901190000055067,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 782.8882450709428,
"count": 1124036,
"is_parallel": true,
"self": 25.697712267837346,
"children": {
"process_trajectory": {
"total": 432.0619166981045,
"count": 1124036,
"is_parallel": true,
"self": 430.64759788910453,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4143188089999512,
"count": 6,
"is_parallel": true,
"self": 1.4143188089999512
}
}
},
"_update_policy": {
"total": 325.128616105001,
"count": 136,
"is_parallel": true,
"self": 102.99310199800561,
"children": {
"TorchPPOOptimizer.update": {
"total": 222.1355141069954,
"count": 6933,
"is_parallel": true,
"self": 222.1355141069954
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09855158999994273,
"count": 1,
"self": 0.0010628069999256695,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09748878300001707,
"count": 1,
"self": 0.09748878300001707
}
}
}
}
}
}
}