Destiny0621's picture
Training of SnowballTarget
0b6c9e6
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.154209852218628,
"min": 2.154209852218628,
"max": 2.8870773315429688,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 20592.091796875,
"min": 20592.091796875,
"max": 29632.75,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.432118892669678,
"min": 0.24084840714931488,
"max": 6.432118892669678,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1254.26318359375,
"min": 46.72459030151367,
"max": 1275.7470703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 13.204545454545455,
"min": 2.659090909090909,
"max": 13.204545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 581.0,
"min": 117.0,
"max": 725.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 13.204545454545455,
"min": 2.659090909090909,
"max": 13.204545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 581.0,
"min": 117.0,
"max": 725.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06748109559474888,
"min": 0.06398367981355656,
"max": 0.07662719841755461,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.13496219118949776,
"min": 0.12796735962711311,
"max": 0.2197673792166977,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.41078278298179305,
"min": 0.19014298588599715,
"max": 0.46419012488103384,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8215655659635861,
"min": 0.3802859717719943,
"max": 1.3925703746431015,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.144097856e-06,
"min": 2.144097856e-06,
"max": 9.674400325600002e-05,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.288195712e-06,
"min": 4.288195712e-06,
"max": 0.000247332052668,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.19999999999999996,
"min": 0.19999999999999996,
"max": 0.29999999999999993,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00011698560000000007,
"min": 0.00011698560000000007,
"max": 0.004837525599999998,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00023397120000000015,
"min": 0.00023397120000000015,
"max": 0.0123718668,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704687681",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704688156"
},
"total": 475.5943794650002,
"count": 1,
"self": 0.43731098500029475,
"children": {
"run_training.setup": {
"total": 0.05113849500003198,
"count": 1,
"self": 0.05113849500003198
},
"TrainerController.start_learning": {
"total": 475.1059299849999,
"count": 1,
"self": 0.6422891300198899,
"children": {
"TrainerController._reset_env": {
"total": 3.2390860040000007,
"count": 1,
"self": 3.2390860040000007
},
"TrainerController.advance": {
"total": 471.1307779849799,
"count": 18200,
"self": 0.28176186797372793,
"children": {
"env_step": {
"total": 470.84901611700616,
"count": 18200,
"self": 328.7966816080225,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.74169097099957,
"count": 18200,
"self": 1.4870724530201187,
"children": {
"TorchPolicy.evaluate": {
"total": 140.25461851797945,
"count": 18200,
"self": 140.25461851797945
}
}
},
"workers": {
"total": 0.31064353798410593,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 473.84528638002257,
"count": 18200,
"is_parallel": true,
"self": 241.2247903190646,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005206666000049154,
"count": 1,
"is_parallel": true,
"self": 0.003906567999820254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013000980002288998,
"count": 10,
"is_parallel": true,
"self": 0.0013000980002288998
}
}
},
"UnityEnvironment.step": {
"total": 0.038740966000204935,
"count": 1,
"is_parallel": true,
"self": 0.0006339520000437915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042153399999733665,
"count": 1,
"is_parallel": true,
"self": 0.00042153399999733665
},
"communicator.exchange": {
"total": 0.03561914200008687,
"count": 1,
"is_parallel": true,
"self": 0.03561914200008687
},
"steps_from_proto": {
"total": 0.0020663380000769394,
"count": 1,
"is_parallel": true,
"self": 0.0004119240002182778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016544139998586616,
"count": 10,
"is_parallel": true,
"self": 0.0016544139998586616
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 232.62049606095798,
"count": 18199,
"is_parallel": true,
"self": 11.05575847992759,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.596000220981296,
"count": 18199,
"is_parallel": true,
"self": 5.596000220981296
},
"communicator.exchange": {
"total": 180.9075685510079,
"count": 18199,
"is_parallel": true,
"self": 180.9075685510079
},
"steps_from_proto": {
"total": 35.06116880904119,
"count": 18199,
"is_parallel": true,
"self": 6.588972757031115,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.472196052010077,
"count": 181990,
"is_parallel": true,
"self": 28.472196052010077
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013113900013195234,
"count": 1,
"self": 0.00013113900013195234,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 463.706864495814,
"count": 902195,
"is_parallel": true,
"self": 18.41172192278509,
"children": {
"process_trajectory": {
"total": 306.0700432450294,
"count": 902195,
"is_parallel": true,
"self": 305.2533147630297,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8167284819996894,
"count": 4,
"is_parallel": true,
"self": 0.8167284819996894
}
}
},
"_update_policy": {
"total": 139.2250993279995,
"count": 45,
"is_parallel": true,
"self": 55.66188322001108,
"children": {
"TorchPPOOptimizer.update": {
"total": 83.56321610798841,
"count": 4584,
"is_parallel": true,
"self": 83.56321610798841
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09364572699996643,
"count": 1,
"self": 0.0011021159998563235,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0925436110001101,
"count": 1,
"self": 0.0925436110001101
}
}
}
}
}
}
}