ppo-Pyramids / run_logs /timers.json
chaowu's picture
First Push
84293c0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3695603311061859,
"min": 0.3656662404537201,
"max": 1.4379774332046509,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11228.720703125,
"min": 11013.6708984375,
"max": 43622.484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989993.0,
"min": 29952.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989993.0,
"min": 29952.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6549415588378906,
"min": -0.07943497598171234,
"max": 0.6753299236297607,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 187.96823120117188,
"min": -19.143829345703125,
"max": 194.49501037597656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009754575788974762,
"min": 0.0017544170841574669,
"max": 0.3510623574256897,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.79956316947937,
"min": 0.4859735369682312,
"max": 83.2017822265625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0696561697266242,
"min": 0.06568736907017107,
"max": 0.07377090357369319,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9751863761727387,
"min": 0.4917831659135034,
"max": 1.0610465479257982,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016841941632308243,
"min": 0.0014188096310544932,
"max": 0.01710731952473344,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2357871828523154,
"min": 0.009931667417381452,
"max": 0.2501647881364596,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.574861760792856e-06,
"min": 7.574861760792856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010604806465109998,
"min": 0.00010604806465109998,
"max": 0.0035080610306464,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252492142857143,
"min": 0.10252492142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353489000000001,
"min": 1.3691136000000002,
"max": 2.5693536,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026223965071428567,
"min": 0.00026223965071428567,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036713551099999994,
"min": 0.0036713551099999994,
"max": 0.11695842464,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011613845825195312,
"min": 0.011613845825195312,
"max": 0.5306715369224548,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16259384155273438,
"min": 0.16259384155273438,
"max": 3.714700698852539,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 282.22857142857146,
"min": 280.04587155963304,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29634.0,
"min": 15984.0,
"max": 33027.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6796590277836436,
"min": -1.0000000521540642,
"max": 1.7040097882934646,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 176.36419791728258,
"min": -32.000001668930054,
"max": 181.47259814292192,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6796590277836436,
"min": -1.0000000521540642,
"max": 1.7040097882934646,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 176.36419791728258,
"min": -32.000001668930054,
"max": 181.47259814292192,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.034018340713685445,
"min": 0.034018340713685445,
"max": 10.693337187170982,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5719257749369717,
"min": 3.571084239287302,
"max": 171.09339499473572,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687908682",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687910927"
},
"total": 2245.2982545349996,
"count": 1,
"self": 0.4756851539996205,
"children": {
"run_training.setup": {
"total": 0.039400620000037634,
"count": 1,
"self": 0.039400620000037634
},
"TrainerController.start_learning": {
"total": 2244.783168761,
"count": 1,
"self": 1.3491435260493745,
"children": {
"TrainerController._reset_env": {
"total": 4.786498280000046,
"count": 1,
"self": 4.786498280000046
},
"TrainerController.advance": {
"total": 2238.55264446595,
"count": 64096,
"self": 1.3849333799471424,
"children": {
"env_step": {
"total": 1609.0986441410196,
"count": 64096,
"self": 1500.2147607408965,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.08499467007869,
"count": 64096,
"self": 4.681671674092058,
"children": {
"TorchPolicy.evaluate": {
"total": 103.40332299598663,
"count": 62559,
"self": 103.40332299598663
}
}
},
"workers": {
"total": 0.7988887300444958,
"count": 64096,
"self": 0.0,
"children": {
"worker_root": {
"total": 2239.5954521960925,
"count": 64096,
"is_parallel": true,
"self": 850.7204507801139,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018939029999955892,
"count": 1,
"is_parallel": true,
"self": 0.0005801280002515341,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001313774999744055,
"count": 8,
"is_parallel": true,
"self": 0.001313774999744055
}
}
},
"UnityEnvironment.step": {
"total": 0.07602796600008332,
"count": 1,
"is_parallel": true,
"self": 0.0005676370001310715,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004747929999666667,
"count": 1,
"is_parallel": true,
"self": 0.0004747929999666667
},
"communicator.exchange": {
"total": 0.07311825299996144,
"count": 1,
"is_parallel": true,
"self": 0.07311825299996144
},
"steps_from_proto": {
"total": 0.0018672830000241447,
"count": 1,
"is_parallel": true,
"self": 0.0003802480001695585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014870349998545862,
"count": 8,
"is_parallel": true,
"self": 0.0014870349998545862
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1388.8750014159787,
"count": 64095,
"is_parallel": true,
"self": 33.88781133391899,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.91642906100401,
"count": 64095,
"is_parallel": true,
"self": 21.91642906100401
},
"communicator.exchange": {
"total": 1233.365246109019,
"count": 64095,
"is_parallel": true,
"self": 1233.365246109019
},
"steps_from_proto": {
"total": 99.70551491203673,
"count": 64095,
"is_parallel": true,
"self": 19.427608965021363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.27790594701537,
"count": 512760,
"is_parallel": true,
"self": 80.27790594701537
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 628.0690669449834,
"count": 64096,
"self": 2.556651933937701,
"children": {
"process_trajectory": {
"total": 107.13645258504675,
"count": 64096,
"self": 106.9357295020468,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20072308299995711,
"count": 2,
"self": 0.20072308299995711
}
}
},
"_update_policy": {
"total": 518.375962425999,
"count": 451,
"self": 331.6901736310201,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.68578879497886,
"count": 22788,
"self": 186.68578879497886
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0030003068095539e-06,
"count": 1,
"self": 1.0030003068095539e-06
},
"TrainerController._save_models": {
"total": 0.094881486000304,
"count": 1,
"self": 0.0013733480000155396,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09350813800028845,
"count": 1,
"self": 0.09350813800028845
}
}
}
}
}
}
}