ppo-Pyramids / run_logs /timers.json
yizhangliu's picture
First Push
0c09906
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.42781710624694824,
"min": 0.3987566828727722,
"max": 1.4374667406082153,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12766.0625,
"min": 11975.4609375,
"max": 43606.9921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3795369863510132,
"min": -0.2196071892976761,
"max": 0.3795369863510132,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 100.95684051513672,
"min": -52.046905517578125,
"max": 100.95684051513672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.005423555616289377,
"min": -0.01937665417790413,
"max": 0.6555060744285583,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.4426658153533936,
"min": -4.979800224304199,
"max": 157.3214569091797,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.04890970276097679,
"min": 0.04420265212621418,
"max": 0.052743349841646976,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.684735838653675,
"min": 0.36920344889152884,
"max": 0.7323900866322218,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014641974369407675,
"min": 0.0011712158364539376,
"max": 0.014641974369407675,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20498764117170745,
"min": 0.01405459003744725,
"max": 0.20498764117170745,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.517776065535711e-06,
"min": 7.517776065535711e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010524886491749996,
"min": 0.00010524886491749996,
"max": 0.0033798308733897996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250589285714286,
"min": 0.10250589285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350825,
"min": 1.3886848,
"max": 2.5266102,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002603386964285714,
"min": 0.0002603386964285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036447417499999996,
"min": 0.0036447417499999996,
"max": 0.11268835898,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01846681721508503,
"min": 0.018234508112072945,
"max": 0.8788381814956665,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2585354447364807,
"min": 0.2552831172943115,
"max": 6.151867389678955,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 444.6818181818182,
"min": 444.6818181818182,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29349.0,
"min": 15984.0,
"max": 32650.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4037545240963951,
"min": -1.0000000521540642,
"max": 1.4037545240963951,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.64779859036207,
"min": -29.983001671731472,
"max": 92.64779859036207,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4037545240963951,
"min": -1.0000000521540642,
"max": 1.4037545240963951,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.64779859036207,
"min": -29.983001671731472,
"max": 92.64779859036207,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08413686282019074,
"min": 0.08413686282019074,
"max": 12.842686912044883,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.553032946132589,
"min": 5.553032946132589,
"max": 205.48299059271812,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675006070",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675007891"
},
"total": 1820.5453580289998,
"count": 1,
"self": 0.4437269400000332,
"children": {
"run_training.setup": {
"total": 0.10418590099999392,
"count": 1,
"self": 0.10418590099999392
},
"TrainerController.start_learning": {
"total": 1819.9974451879998,
"count": 1,
"self": 1.2821515949569857,
"children": {
"TrainerController._reset_env": {
"total": 9.623354914000004,
"count": 1,
"self": 9.623354914000004
},
"TrainerController.advance": {
"total": 1808.998927162043,
"count": 63518,
"self": 1.3982047540171152,
"children": {
"env_step": {
"total": 1224.953974950998,
"count": 63518,
"self": 1121.66778938999,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.52533720898083,
"count": 63518,
"self": 4.243416661964659,
"children": {
"TorchPolicy.evaluate": {
"total": 98.28192054701617,
"count": 62573,
"self": 32.991862805068365,
"children": {
"TorchPolicy.sample_actions": {
"total": 65.2900577419478,
"count": 62573,
"self": 65.2900577419478
}
}
}
}
},
"workers": {
"total": 0.760848352026926,
"count": 63518,
"self": 0.0,
"children": {
"worker_root": {
"total": 1816.6398605229929,
"count": 63518,
"is_parallel": true,
"self": 791.4069730459905,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005635764999965431,
"count": 1,
"is_parallel": true,
"self": 0.0033116189999873313,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023241459999781,
"count": 8,
"is_parallel": true,
"self": 0.0023241459999781
}
}
},
"UnityEnvironment.step": {
"total": 0.0436283489999596,
"count": 1,
"is_parallel": true,
"self": 0.00047688799992329223,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004015449999883458,
"count": 1,
"is_parallel": true,
"self": 0.0004015449999883458
},
"communicator.exchange": {
"total": 0.04121040800004039,
"count": 1,
"is_parallel": true,
"self": 0.04121040800004039
},
"steps_from_proto": {
"total": 0.0015395080000075723,
"count": 1,
"is_parallel": true,
"self": 0.0004055639999478444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011339440000597278,
"count": 8,
"is_parallel": true,
"self": 0.0011339440000597278
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1025.2328874770024,
"count": 63517,
"is_parallel": true,
"self": 27.00487553695939,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.51991991302333,
"count": 63517,
"is_parallel": true,
"self": 21.51991991302333
},
"communicator.exchange": {
"total": 888.5866067600116,
"count": 63517,
"is_parallel": true,
"self": 888.5866067600116
},
"steps_from_proto": {
"total": 88.12148526700798,
"count": 63517,
"is_parallel": true,
"self": 20.732534112045016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.38895115496297,
"count": 508136,
"is_parallel": true,
"self": 67.38895115496297
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 582.6467474570279,
"count": 63518,
"self": 2.432134936010584,
"children": {
"process_trajectory": {
"total": 150.80490508602003,
"count": 63518,
"self": 150.61827570202018,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1866293839998434,
"count": 2,
"self": 0.1866293839998434
}
}
},
"_update_policy": {
"total": 429.4097074349973,
"count": 451,
"self": 194.2230596089879,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.1866478260094,
"count": 11298,
"self": 235.1866478260094
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.511999926151475e-06,
"count": 1,
"self": 1.511999926151475e-06
},
"TrainerController._save_models": {
"total": 0.09301000499999645,
"count": 1,
"self": 0.0014668209996671067,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09154318400032935,
"count": 1,
"self": 0.09154318400032935
}
}
}
}
}
}
}