WimStraetemans's picture
First snowball model
9a1fa29
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.792105495929718,
"min": 0.7758181095123291,
"max": 2.8579037189483643,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8138.091796875,
"min": 7492.8515625,
"max": 29267.79296875,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.694283485412598,
"min": 0.3181127607822418,
"max": 13.694283485412598,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2793.6337890625,
"min": 61.71387481689453,
"max": 2793.6337890625,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06915106905730414,
"min": 0.06381127500452588,
"max": 0.07541321034317233,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3457553452865207,
"min": 0.2552451000181035,
"max": 0.3663400788614287,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1667632478300263,
"min": 0.10653472682837781,
"max": 0.2881792398410685,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8338162391501315,
"min": 0.42613890731351123,
"max": 1.4408961992053426,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333332e-06,
"min": 5.288098237333332e-06,
"max": 0.0002945880018039999,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.644049118666666e-05,
"min": 2.644049118666666e-05,
"max": 0.00142344002552,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666664,
"min": 0.10176266666666664,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333332,
"min": 0.42025066666666666,
"max": 0.9744800000000001,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999995,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.01818181818182,
"min": 3.1136363636363638,
"max": 27.01818181818182,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1486.0,
"min": 137.0,
"max": 1486.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.01818181818182,
"min": 3.1136363636363638,
"max": 27.01818181818182,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1486.0,
"min": 137.0,
"max": 1486.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674210888",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674211625"
},
"total": 737.034203302,
"count": 1,
"self": 0.5238513240000202,
"children": {
"run_training.setup": {
"total": 0.09872923099999298,
"count": 1,
"self": 0.09872923099999298
},
"TrainerController.start_learning": {
"total": 736.411622747,
"count": 1,
"self": 0.9634377360063127,
"children": {
"TrainerController._reset_env": {
"total": 5.824934661000043,
"count": 1,
"self": 5.824934661000043
},
"TrainerController.advance": {
"total": 729.4945674249936,
"count": 27334,
"self": 0.4327708349803743,
"children": {
"env_step": {
"total": 729.0617965900133,
"count": 27334,
"self": 508.67243450201784,
"children": {
"SubprocessEnvManager._take_step": {
"total": 219.96661330699817,
"count": 27334,
"self": 2.321933783980171,
"children": {
"TorchPolicy.evaluate": {
"total": 217.644679523018,
"count": 27334,
"self": 50.29809734602543,
"children": {
"TorchPolicy.sample_actions": {
"total": 167.34658217699257,
"count": 27334,
"self": 167.34658217699257
}
}
}
}
},
"workers": {
"total": 0.4227487809972672,
"count": 27334,
"self": 0.0,
"children": {
"worker_root": {
"total": 734.3503475309932,
"count": 27334,
"is_parallel": true,
"self": 391.29188311899657,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019014020000440723,
"count": 1,
"is_parallel": true,
"self": 0.0007369899999503104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011644120000937619,
"count": 10,
"is_parallel": true,
"self": 0.0011644120000937619
}
}
},
"UnityEnvironment.step": {
"total": 0.034149022999940826,
"count": 1,
"is_parallel": true,
"self": 0.00033122399997864704,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031290300000819116,
"count": 1,
"is_parallel": true,
"self": 0.00031290300000819116
},
"communicator.exchange": {
"total": 0.031619260999946164,
"count": 1,
"is_parallel": true,
"self": 0.031619260999946164
},
"steps_from_proto": {
"total": 0.0018856350000078237,
"count": 1,
"is_parallel": true,
"self": 0.0004427769998756048,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014428580001322189,
"count": 10,
"is_parallel": true,
"self": 0.0014428580001322189
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 343.0584644119966,
"count": 27333,
"is_parallel": true,
"self": 12.848609621004698,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.577825656993696,
"count": 27333,
"is_parallel": true,
"self": 7.577825656993696
},
"communicator.exchange": {
"total": 272.4847254920031,
"count": 27333,
"is_parallel": true,
"self": 272.4847254920031
},
"steps_from_proto": {
"total": 50.14730364199511,
"count": 27333,
"is_parallel": true,
"self": 10.389026664043627,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39.75827697795148,
"count": 273330,
"is_parallel": true,
"self": 39.75827697795148
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.6827000005578157e-05,
"count": 1,
"self": 4.6827000005578157e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 725.2856880560327,
"count": 530987,
"is_parallel": true,
"self": 13.922179442963625,
"children": {
"process_trajectory": {
"total": 370.3275730830685,
"count": 530987,
"is_parallel": true,
"self": 369.0382123200685,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2893607629999906,
"count": 6,
"is_parallel": true,
"self": 1.2893607629999906
}
}
},
"_update_policy": {
"total": 341.03593553000053,
"count": 136,
"is_parallel": true,
"self": 102.05563047500095,
"children": {
"TorchPPOOptimizer.update": {
"total": 238.98030505499958,
"count": 11555,
"is_parallel": true,
"self": 238.98030505499958
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1286360980000154,
"count": 1,
"self": 0.0012780260001363786,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12735807199987903,
"count": 1,
"self": 0.12735807199987903
}
}
}
}
}
}
}