Pham Hung Manh
First Push
5bb5d37
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.9350531101226807,
"min": 1.9350531101226807,
"max": 2.863603353500366,
"count": 7
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 19710.451171875,
"min": 19635.755859375,
"max": 29389.162109375,
"count": 7
},
"SnowballTarget.Step.mean": {
"value": 69992.0,
"min": 9952.0,
"max": 69992.0,
"count": 7
},
"SnowballTarget.Step.sum": {
"value": 69992.0,
"min": 9952.0,
"max": 69992.0,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.660382270812988,
"min": 0.3661629557609558,
"max": 6.660382270812988,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1298.7745361328125,
"min": 71.03561401367188,
"max": 1298.7745361328125,
"count": 7
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 7
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 7
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0718393026726713,
"min": 0.06422478883548904,
"max": 0.07440936712708777,
"count": 7
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2873572106906852,
"min": 0.2601585548163355,
"max": 0.3459746781865012,
"count": 7
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2652840692476899,
"min": 0.11865237397461326,
"max": 0.2905561280455075,
"count": 7
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0611362769907595,
"min": 0.474609495898453,
"max": 1.3430366852119857,
"count": 7
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00020278203240600001,
"min": 0.00020278203240600001,
"max": 0.000291882002706,
"count": 7
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0008111281296240001,
"min": 0.0008111281296240001,
"max": 0.00138516003828,
"count": 7
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.16759400000000002,
"min": 0.16759400000000002,
"max": 0.19729400000000002,
"count": 7
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6703760000000001,
"min": 0.6703760000000001,
"max": 0.96172,
"count": 7
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.003382940600000001,
"min": 0.003382940600000001,
"max": 0.0048649706,
"count": 7
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.013531762400000005,
"min": 0.013531762400000005,
"max": 0.023089828,
"count": 7
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 15.454545454545455,
"min": 3.4545454545454546,
"max": 15.454545454545455,
"count": 7
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 680.0,
"min": 152.0,
"max": 803.0,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 15.454545454545455,
"min": 3.4545454545454546,
"max": 15.454545454545455,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 680.0,
"min": 152.0,
"max": 803.0,
"count": 7
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692550778",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692551004"
},
"total": 225.50208193399988,
"count": 1,
"self": 0.25961433099973874,
"children": {
"run_training.setup": {
"total": 0.055227037999998174,
"count": 1,
"self": 0.055227037999998174
},
"TrainerController.start_learning": {
"total": 225.18724056500014,
"count": 1,
"self": 0.32448834698936935,
"children": {
"TrainerController._reset_env": {
"total": 2.1109650349999356,
"count": 1,
"self": 2.1109650349999356
},
"TrainerController.advance": {
"total": 222.48186456201086,
"count": 6716,
"self": 0.16419165800448354,
"children": {
"env_step": {
"total": 222.31767290400637,
"count": 6716,
"self": 178.8557574930055,
"children": {
"SubprocessEnvManager._take_step": {
"total": 43.30977939699824,
"count": 6716,
"self": 0.9578943569923695,
"children": {
"TorchPolicy.evaluate": {
"total": 42.35188504000587,
"count": 6716,
"self": 42.35188504000587
}
}
},
"workers": {
"total": 0.15213601400262178,
"count": 6715,
"self": 0.0,
"children": {
"worker_root": {
"total": 224.21016970999824,
"count": 6715,
"is_parallel": true,
"self": 98.50669140600826,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00968438499990043,
"count": 1,
"is_parallel": true,
"self": 0.0065752859998156055,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003109099000084825,
"count": 10,
"is_parallel": true,
"self": 0.003109099000084825
}
}
},
"UnityEnvironment.step": {
"total": 0.10456211200005328,
"count": 1,
"is_parallel": true,
"self": 0.0007724590001316756,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004944820000218897,
"count": 1,
"is_parallel": true,
"self": 0.0004944820000218897
},
"communicator.exchange": {
"total": 0.10015086699991116,
"count": 1,
"is_parallel": true,
"self": 0.10015086699991116
},
"steps_from_proto": {
"total": 0.0031443039999885514,
"count": 1,
"is_parallel": true,
"self": 0.000636820000067928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025074839999206233,
"count": 10,
"is_parallel": true,
"self": 0.0025074839999206233
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 125.70347830398998,
"count": 6714,
"is_parallel": true,
"self": 5.3179153459909685,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.86269563799749,
"count": 6714,
"is_parallel": true,
"self": 2.86269563799749
},
"communicator.exchange": {
"total": 99.43970948699871,
"count": 6714,
"is_parallel": true,
"self": 99.43970948699871
},
"steps_from_proto": {
"total": 18.083157833002815,
"count": 6714,
"is_parallel": true,
"self": 3.4510481430103255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.63210968999249,
"count": 67140,
"is_parallel": true,
"self": 14.63210968999249
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.9810000038851285e-05,
"count": 1,
"self": 5.9810000038851285e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 220.48880456099812,
"count": 223210,
"is_parallel": true,
"self": 5.324298198981069,
"children": {
"process_trajectory": {
"total": 120.5664836220169,
"count": 223210,
"is_parallel": true,
"self": 120.29075641001691,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2757272119999925,
"count": 1,
"is_parallel": true,
"self": 0.2757272119999925
}
}
},
"_update_policy": {
"total": 94.59802274000015,
"count": 33,
"is_parallel": true,
"self": 34.303516076001756,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.29450666399839,
"count": 1680,
"is_parallel": true,
"self": 60.29450666399839
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.26986281099993903,
"count": 1,
"self": 0.0014951919999930396,
"children": {
"RLTrainer._checkpoint": {
"total": 0.268367618999946,
"count": 1,
"self": 0.268367618999946
}
}
}
}
}
}
}