ppo-Pyramids / run_logs /timers.json
Fangliuwh's picture
first push
9991555 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5250925421714783,
"min": 0.5250925421714783,
"max": 1.4701387882232666,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15685.564453125,
"min": 15685.564453125,
"max": 44598.12890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29979.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29979.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.27301257848739624,
"min": -0.12793001532554626,
"max": 0.31010231375694275,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 69.61820983886719,
"min": -30.447341918945312,
"max": 80.31649780273438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010765371844172478,
"min": -0.00022499257465824485,
"max": 0.5418302416801453,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.7451698780059814,
"min": -0.054673194885253906,
"max": 128.95559692382812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06993935342133957,
"min": 0.06566988492184983,
"max": 0.07219877807905764,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0490903013200936,
"min": 0.4920834284658803,
"max": 1.0490903013200936,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01420253910555343,
"min": 0.0004574477229942237,
"max": 0.01486172111981432,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21303808658330145,
"min": 0.004117029506948013,
"max": 0.21303808658330145,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.501977499373333e-06,
"min": 7.501977499373333e-06,
"max": 0.00029523724444472857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001125296624906,
"min": 0.0001125296624906,
"max": 0.0031394468535178,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250062666666668,
"min": 0.10250062666666668,
"max": 0.19841241428571427,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375094000000002,
"min": 1.3888869,
"max": 2.4007173,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000259812604,
"min": 0.000259812604,
"max": 0.009841400187142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038971890600000007,
"min": 0.0038971890600000007,
"max": 0.10467357178,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011450548656284809,
"min": 0.011450548656284809,
"max": 0.3489531874656677,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.171758234500885,
"min": 0.16367559134960175,
"max": 2.4426722526550293,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 498.56140350877195,
"min": 498.56140350877195,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28418.0,
"min": 16650.0,
"max": 32473.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.255754357890079,
"min": -0.9999936006722911,
"max": 1.255754357890079,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 71.5779983997345,
"min": -31.999601677060127,
"max": 71.5779983997345,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.255754357890079,
"min": -0.9999936006722911,
"max": 1.255754357890079,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 71.5779983997345,
"min": -31.999601677060127,
"max": 71.5779983997345,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05968442037211437,
"min": 0.05968442037211437,
"max": 6.843078768428634,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.402011961210519,
"min": 3.402011961210519,
"max": 116.33233906328678,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735420172",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735422305"
},
"total": 2133.6128095259996,
"count": 1,
"self": 0.5271703579996938,
"children": {
"run_training.setup": {
"total": 0.09472916000004261,
"count": 1,
"self": 0.09472916000004261
},
"TrainerController.start_learning": {
"total": 2132.990910008,
"count": 1,
"self": 1.3555142679479104,
"children": {
"TrainerController._reset_env": {
"total": 2.1510065620000205,
"count": 1,
"self": 2.1510065620000205
},
"TrainerController.advance": {
"total": 2129.3930461920518,
"count": 63373,
"self": 1.4357720680495731,
"children": {
"env_step": {
"total": 1438.7150318410395,
"count": 63373,
"self": 1287.1159522270405,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.79945558089184,
"count": 63373,
"self": 4.636686686887515,
"children": {
"TorchPolicy.evaluate": {
"total": 146.16276889400433,
"count": 62562,
"self": 146.16276889400433
}
}
},
"workers": {
"total": 0.7996240331071931,
"count": 63373,
"self": 0.0,
"children": {
"worker_root": {
"total": 2128.0341907050047,
"count": 63373,
"is_parallel": true,
"self": 957.6517383869805,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022348649999912595,
"count": 1,
"is_parallel": true,
"self": 0.0007350920009230322,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014997729990682274,
"count": 8,
"is_parallel": true,
"self": 0.0014997729990682274
}
}
},
"UnityEnvironment.step": {
"total": 0.04873469899985139,
"count": 1,
"is_parallel": true,
"self": 0.0006289409998316842,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004775560000780388,
"count": 1,
"is_parallel": true,
"self": 0.0004775560000780388
},
"communicator.exchange": {
"total": 0.045872278999922855,
"count": 1,
"is_parallel": true,
"self": 0.045872278999922855
},
"steps_from_proto": {
"total": 0.0017559230000188109,
"count": 1,
"is_parallel": true,
"self": 0.0003960759995607077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013598470004581031,
"count": 8,
"is_parallel": true,
"self": 0.0013598470004581031
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1170.3824523180242,
"count": 63372,
"is_parallel": true,
"self": 33.07074013713509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.788007705957625,
"count": 63372,
"is_parallel": true,
"self": 23.788007705957625
},
"communicator.exchange": {
"total": 1013.8126401930031,
"count": 63372,
"is_parallel": true,
"self": 1013.8126401930031
},
"steps_from_proto": {
"total": 99.71106428192843,
"count": 63372,
"is_parallel": true,
"self": 20.369639748121244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.34142453380719,
"count": 506976,
"is_parallel": true,
"self": 79.34142453380719
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 689.2422422829627,
"count": 63373,
"self": 2.531996254036585,
"children": {
"process_trajectory": {
"total": 132.97342978992265,
"count": 63373,
"self": 132.77515346292284,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19827632699980313,
"count": 2,
"self": 0.19827632699980313
}
}
},
"_update_policy": {
"total": 553.7368162390035,
"count": 439,
"self": 309.94946597504395,
"children": {
"TorchPPOOptimizer.update": {
"total": 243.78735026395952,
"count": 22815,
"self": 243.78735026395952
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0140001904801466e-06,
"count": 1,
"self": 1.0140001904801466e-06
},
"TrainerController._save_models": {
"total": 0.09134197200000926,
"count": 1,
"self": 0.0016589810002187733,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08968299099979049,
"count": 1,
"self": 0.08968299099979049
}
}
}
}
}
}
}