Bjqrn's picture
My First PPO
c9a27e7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.686301589012146,
"min": 0.686301589012146,
"max": 0.872236430644989,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6507.51171875,
"min": 6507.51171875,
"max": 8659.0400390625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 399968.0,
"min": 209936.0,
"max": 399968.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 399968.0,
"min": 209936.0,
"max": 399968.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.411480903625488,
"min": 12.930449485778809,
"max": 13.41840648651123,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2615.23876953125,
"min": 2368.681396484375,
"max": 2737.35498046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0752752537721558,
"min": 0.061901776149569945,
"max": 0.07561943785791905,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3011010150886232,
"min": 0.255060635023755,
"max": 0.35945824067725796,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1808815458241631,
"min": 0.16905227080717974,
"max": 0.21255744526199266,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7235261832966524,
"min": 0.676209083228719,
"max": 1.0611284643995997,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.525098824999995e-06,
"min": 3.525098824999995e-06,
"max": 0.000145425051525,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.410039529999998e-05,
"min": 1.410039529999998e-05,
"max": 0.0006570002810000001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10117499999999999,
"min": 0.10117499999999999,
"max": 0.14847500000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40469999999999995,
"min": 0.40469999999999995,
"max": 0.7190000000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.863249999999992e-05,
"min": 6.863249999999992e-05,
"max": 0.0024289025000000007,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00027452999999999966,
"min": 0.00027452999999999966,
"max": 0.0109781,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 24.818181818181817,
"max": 26.568181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 1092.0,
"max": 1454.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 24.818181818181817,
"max": 26.568181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 1092.0,
"max": 1454.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699653392",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/bjqrn/.local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1699654003"
},
"total": 611.4405966690001,
"count": 1,
"self": 0.2722963520000121,
"children": {
"run_training.setup": {
"total": 0.02393323500018596,
"count": 1,
"self": 0.02393323500018596
},
"TrainerController.start_learning": {
"total": 611.1443670819999,
"count": 1,
"self": 0.4650195620379236,
"children": {
"TrainerController._reset_env": {
"total": 1.3800296809999963,
"count": 1,
"self": 1.3800296809999963
},
"TrainerController.advance": {
"total": 609.220174194962,
"count": 18135,
"self": 0.2277508879637935,
"children": {
"env_step": {
"total": 608.9924233069983,
"count": 18135,
"self": 392.31379029001073,
"children": {
"SubprocessEnvManager._take_step": {
"total": 216.4327723810111,
"count": 18135,
"self": 1.5582408440191102,
"children": {
"TorchPolicy.evaluate": {
"total": 214.87453153699198,
"count": 18135,
"self": 214.87453153699198
}
}
},
"workers": {
"total": 0.2458606359764417,
"count": 18135,
"self": 0.0,
"children": {
"worker_root": {
"total": 610.2111079279898,
"count": 18135,
"is_parallel": true,
"self": 305.28091542896163,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016190739997909986,
"count": 1,
"is_parallel": true,
"self": 0.00043863599967153277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011804380001194659,
"count": 10,
"is_parallel": true,
"self": 0.0011804380001194659
}
}
},
"UnityEnvironment.step": {
"total": 0.03704551999999239,
"count": 1,
"is_parallel": true,
"self": 0.00034856899992519175,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021579799977189396,
"count": 1,
"is_parallel": true,
"self": 0.00021579799977189396
},
"communicator.exchange": {
"total": 0.035477051000270876,
"count": 1,
"is_parallel": true,
"self": 0.035477051000270876
},
"steps_from_proto": {
"total": 0.0010041020000244316,
"count": 1,
"is_parallel": true,
"self": 0.00022033600089343963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000783765999130992,
"count": 10,
"is_parallel": true,
"self": 0.000783765999130992
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 304.9301924990282,
"count": 18134,
"is_parallel": true,
"self": 5.967438889092136,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.6019252910032264,
"count": 18134,
"is_parallel": true,
"self": 3.6019252910032264
},
"communicator.exchange": {
"total": 276.9330691019718,
"count": 18134,
"is_parallel": true,
"self": 276.9330691019718
},
"steps_from_proto": {
"total": 18.42775921696102,
"count": 18134,
"is_parallel": true,
"self": 3.824020415968789,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.603738800992232,
"count": 181340,
"is_parallel": true,
"self": 14.603738800992232
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.075599998846883e-05,
"count": 1,
"self": 8.075599998846883e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 605.8552068730751,
"count": 503727,
"is_parallel": true,
"self": 9.598303305113404,
"children": {
"process_trajectory": {
"total": 339.71181379896234,
"count": 503727,
"is_parallel": true,
"self": 339.1843451029622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5274686960001418,
"count": 4,
"is_parallel": true,
"self": 0.5274686960001418
}
}
},
"_update_policy": {
"total": 256.54508976899933,
"count": 90,
"is_parallel": true,
"self": 27.442524785029036,
"children": {
"TorchPPOOptimizer.update": {
"total": 229.1025649839703,
"count": 4587,
"is_parallel": true,
"self": 229.1025649839703
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.07906288799995309,
"count": 1,
"self": 0.0008434490000581718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07821943899989492,
"count": 1,
"self": 0.07821943899989492
}
}
}
}
}
}
}