ppo-PyramidsRND / run_logs /timers.json
coke0zero's picture
First Push
d91814e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3684692680835724,
"min": 0.3578437566757202,
"max": 1.4210155010223389,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11006.9140625,
"min": 10729.5869140625,
"max": 43107.92578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29995.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29995.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.38654443621635437,
"min": -0.096100352704525,
"max": 0.5439761281013489,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 103.20736694335938,
"min": -23.064085006713867,
"max": 151.7693328857422,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01998446322977543,
"min": 0.006764187011867762,
"max": 0.3527693450450897,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.335851669311523,
"min": 1.6842825412750244,
"max": 83.9591064453125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06792522436345001,
"min": 0.06334115784210595,
"max": 0.07233228066972656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9509531410883002,
"min": 0.49443029696570073,
"max": 1.0286479569040239,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015161994724740651,
"min": 0.0004526815782604029,
"max": 0.015495821125213518,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2122679261463691,
"min": 0.0063375420956456405,
"max": 0.21694149575298927,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2550190102642846e-06,
"min": 7.2550190102642846e-06,
"max": 0.0002952365587306714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010157026614369998,
"min": 0.00010157026614369998,
"max": 0.003633586988804399,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241830714285716,
"min": 0.10241830714285716,
"max": 0.1984121857142857,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338563000000002,
"min": 1.3888852999999999,
"max": 2.6111956,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002515888835714286,
"min": 0.0002515888835714286,
"max": 0.009841377352857143,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00352224437,
"min": 0.00352224437,
"max": 0.12113844044,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010798096656799316,
"min": 0.010798096656799316,
"max": 0.5842085480690002,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15117335319519043,
"min": 0.15117335319519043,
"max": 4.0894598960876465,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 440.59154929577466,
"min": 348.39285714285717,
"max": 997.0967741935484,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31282.0,
"min": 16538.0,
"max": 33982.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3015999758823051,
"min": -0.9332516654845207,
"max": 1.6112433566027378,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 93.71519826352596,
"min": -28.93080163002014,
"max": 133.73319859802723,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3015999758823051,
"min": -0.9332516654845207,
"max": 1.6112433566027378,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 93.71519826352596,
"min": -28.93080163002014,
"max": 133.73319859802723,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.049021377686383656,
"min": 0.03999345808578178,
"max": 10.794142111259347,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.529539193419623,
"min": 3.2991481129356544,
"max": 183.50041589140892,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706501155",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706503463"
},
"total": 2308.129902916,
"count": 1,
"self": 0.49170943399985845,
"children": {
"run_training.setup": {
"total": 0.05699374099992838,
"count": 1,
"self": 0.05699374099992838
},
"TrainerController.start_learning": {
"total": 2307.581199741,
"count": 1,
"self": 1.4317969400326547,
"children": {
"TrainerController._reset_env": {
"total": 3.0447921439999845,
"count": 1,
"self": 3.0447921439999845
},
"TrainerController.advance": {
"total": 2303.016596910967,
"count": 63770,
"self": 1.601564037933258,
"children": {
"env_step": {
"total": 1664.610752839992,
"count": 63770,
"self": 1526.5127461119637,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.18364373304803,
"count": 63770,
"self": 4.924271198028919,
"children": {
"TorchPolicy.evaluate": {
"total": 132.2593725350191,
"count": 62563,
"self": 132.2593725350191
}
}
},
"workers": {
"total": 0.9143629949802516,
"count": 63770,
"self": 0.0,
"children": {
"worker_root": {
"total": 2302.1687234540086,
"count": 63770,
"is_parallel": true,
"self": 902.4272290980675,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004328435999923386,
"count": 1,
"is_parallel": true,
"self": 0.00300995699979012,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013184790001332658,
"count": 8,
"is_parallel": true,
"self": 0.0013184790001332658
}
}
},
"UnityEnvironment.step": {
"total": 0.04856184699997357,
"count": 1,
"is_parallel": true,
"self": 0.0005905759999222937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000501502000020082,
"count": 1,
"is_parallel": true,
"self": 0.000501502000020082
},
"communicator.exchange": {
"total": 0.04582509499994103,
"count": 1,
"is_parallel": true,
"self": 0.04582509499994103
},
"steps_from_proto": {
"total": 0.0016446740000901627,
"count": 1,
"is_parallel": true,
"self": 0.00034038699982374965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001304287000266413,
"count": 8,
"is_parallel": true,
"self": 0.001304287000266413
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1399.741494355941,
"count": 63769,
"is_parallel": true,
"self": 36.19557494505602,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.36831988095605,
"count": 63769,
"is_parallel": true,
"self": 25.36831988095605
},
"communicator.exchange": {
"total": 1234.0900331099735,
"count": 63769,
"is_parallel": true,
"self": 1234.0900331099735
},
"steps_from_proto": {
"total": 104.08756641995546,
"count": 63769,
"is_parallel": true,
"self": 21.466564085883192,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.62100233407227,
"count": 510152,
"is_parallel": true,
"self": 82.62100233407227
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 636.804280033042,
"count": 63770,
"self": 2.9100215810192367,
"children": {
"process_trajectory": {
"total": 133.43294661502375,
"count": 63770,
"self": 133.2436610020235,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18928561300026558,
"count": 2,
"self": 0.18928561300026558
}
}
},
"_update_policy": {
"total": 500.461311836999,
"count": 457,
"self": 297.77852626596996,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.68278557102906,
"count": 22821,
"self": 202.68278557102906
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.71000338520389e-07,
"count": 1,
"self": 8.71000338520389e-07
},
"TrainerController._save_models": {
"total": 0.08801287500000399,
"count": 1,
"self": 0.00144073199999184,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08657214300001215,
"count": 1,
"self": 0.08657214300001215
}
}
}
}
}
}
}