ppo-PyramidsRND / run_logs /timers.json
SuburbanLion's picture
first push
13004db
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43258121609687805,
"min": 0.43258121609687805,
"max": 1.4753376245498657,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12915.14453125,
"min": 12915.14453125,
"max": 44755.84375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989888.0,
"min": 29952.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989888.0,
"min": 29952.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5676494240760803,
"min": -0.10332801192998886,
"max": 0.5676494240760803,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 158.3741912841797,
"min": -25.00537872314453,
"max": 158.3741912841797,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.033701274544000626,
"min": -0.033701274544000626,
"max": 0.3505095839500427,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -9.402655601501465,
"min": -9.402655601501465,
"max": 83.07077026367188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06861793426571791,
"min": 0.06416170422328703,
"max": 0.07307524685021863,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9606510797200507,
"min": 0.48958709732051964,
"max": 1.062914101125595,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015473870816163643,
"min": 0.000319722598690465,
"max": 0.016339953650539325,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.216634191426291,
"min": 0.0038366711842855797,
"max": 0.24509930475808986,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.570618905064284e-06,
"min": 7.570618905064284e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010598866467089997,
"min": 0.00010598866467089997,
"max": 0.0036329683890105996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252350714285716,
"min": 0.10252350714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353291000000001,
"min": 1.3886848,
"max": 2.6109894000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026209836357142855,
"min": 0.00026209836357142855,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036693770899999998,
"min": 0.0036693770899999998,
"max": 0.12111784106000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009612570516765118,
"min": 0.009612570516765118,
"max": 0.43380114436149597,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1345759928226471,
"min": 0.1345759928226471,
"max": 3.0366079807281494,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 338.5376344086022,
"min": 337.5243902439024,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31484.0,
"min": 15984.0,
"max": 32389.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6184472898321767,
"min": -1.0000000521540642,
"max": 1.662475589026765,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 150.51559795439243,
"min": -31.99760167300701,
"max": 150.51559795439243,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6184472898321767,
"min": -1.0000000521540642,
"max": 1.662475589026765,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 150.51559795439243,
"min": -31.99760167300701,
"max": 150.51559795439243,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.033490525001862774,
"min": 0.033490525001862774,
"max": 9.157164509408176,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1146188251732383,
"min": 2.8527765020844527,
"max": 146.51463215053082,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673535432",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673537650"
},
"total": 2217.76918483,
"count": 1,
"self": 0.47526730800018413,
"children": {
"run_training.setup": {
"total": 0.1211880260000271,
"count": 1,
"self": 0.1211880260000271
},
"TrainerController.start_learning": {
"total": 2217.172729496,
"count": 1,
"self": 1.607003030974738,
"children": {
"TrainerController._reset_env": {
"total": 6.5794888249999985,
"count": 1,
"self": 6.5794888249999985
},
"TrainerController.advance": {
"total": 2208.869894805025,
"count": 63749,
"self": 1.704469885040453,
"children": {
"env_step": {
"total": 1509.2917041580297,
"count": 63749,
"self": 1386.2767269530545,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.9872410970554,
"count": 63749,
"self": 4.7647184360751,
"children": {
"TorchPolicy.evaluate": {
"total": 117.2225226609803,
"count": 62587,
"self": 39.27744317390534,
"children": {
"TorchPolicy.sample_actions": {
"total": 77.94507948707496,
"count": 62587,
"self": 77.94507948707496
}
}
}
}
},
"workers": {
"total": 1.0277361079197362,
"count": 63749,
"self": 0.0,
"children": {
"worker_root": {
"total": 2211.685368711995,
"count": 63749,
"is_parallel": true,
"self": 940.8029477389377,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001760944999887215,
"count": 1,
"is_parallel": true,
"self": 0.000617103999502433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001143841000384782,
"count": 8,
"is_parallel": true,
"self": 0.001143841000384782
}
}
},
"UnityEnvironment.step": {
"total": 0.07558176699990327,
"count": 1,
"is_parallel": true,
"self": 0.0005746119998093491,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004695170000559301,
"count": 1,
"is_parallel": true,
"self": 0.0004695170000559301
},
"communicator.exchange": {
"total": 0.0727412579999509,
"count": 1,
"is_parallel": true,
"self": 0.0727412579999509
},
"steps_from_proto": {
"total": 0.0017963800000870833,
"count": 1,
"is_parallel": true,
"self": 0.00042513400012467173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013712459999624116,
"count": 8,
"is_parallel": true,
"self": 0.0013712459999624116
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1270.8824209730574,
"count": 63748,
"is_parallel": true,
"self": 31.25788780788116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.288579189108077,
"count": 63748,
"is_parallel": true,
"self": 24.288579189108077
},
"communicator.exchange": {
"total": 1113.6616401200722,
"count": 63748,
"is_parallel": true,
"self": 1113.6616401200722
},
"steps_from_proto": {
"total": 101.67431385599593,
"count": 63748,
"is_parallel": true,
"self": 24.63689708615857,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.03741676983736,
"count": 509984,
"is_parallel": true,
"self": 77.03741676983736
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 697.8737207619547,
"count": 63749,
"self": 3.1363523580178025,
"children": {
"process_trajectory": {
"total": 152.84846244393316,
"count": 63749,
"self": 152.63742640193345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21103604199970505,
"count": 2,
"self": 0.21103604199970505
}
}
},
"_update_policy": {
"total": 541.8889059600037,
"count": 452,
"self": 208.63664599000526,
"children": {
"TorchPPOOptimizer.update": {
"total": 333.2522599699985,
"count": 22764,
"self": 333.2522599699985
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.479999789618887e-07,
"count": 1,
"self": 9.479999789618887e-07
},
"TrainerController._save_models": {
"total": 0.11634188700008963,
"count": 1,
"self": 0.001485841999965487,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11485604500012414,
"count": 1,
"self": 0.11485604500012414
}
}
}
}
}
}
}