ppo-Pyramids / run_logs /timers.json
Bjqrn's picture
First PPO
980eaf1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3939434885978699,
"min": 0.3939434885978699,
"max": 1.394322156906128,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11881.3359375,
"min": 11851.8017578125,
"max": 42298.15625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29907.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29907.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5625903010368347,
"min": -0.11684034019708633,
"max": 0.6459452509880066,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.52528381347656,
"min": -28.15852165222168,
"max": 182.15655517578125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014059197157621384,
"min": 0.00679186824709177,
"max": 0.33688104152679443,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.936575174331665,
"min": 1.7115508317947388,
"max": 80.85144805908203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0668055855200974,
"min": 0.0651183268935729,
"max": 0.07349430345987004,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9352781972813636,
"min": 0.5431228465686242,
"max": 1.0616784234617964,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01519523043274659,
"min": 0.0006633689741362352,
"max": 0.01651806503133331,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21273322605845227,
"min": 0.008623796663771058,
"max": 0.24777097546999963,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.345297551599997e-06,
"min": 7.345297551599997e-06,
"max": 0.0002952096390967875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010283416572239997,
"min": 0.00010283416572239997,
"max": 0.0036335167888278,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024484,
"min": 0.1024484,
"max": 0.19840321249999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342776,
"min": 1.4342776,
"max": 2.6111722000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025459515999999994,
"min": 0.00025459515999999994,
"max": 0.00984048092875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003564332239999999,
"min": 0.003564332239999999,
"max": 0.12113610278,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011390263214707375,
"min": 0.011094793677330017,
"max": 0.5234177112579346,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15946368873119354,
"min": 0.15532711148262024,
"max": 4.187341690063477,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 327.7608695652174,
"min": 306.319587628866,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30154.0,
"min": 16834.0,
"max": 32898.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5640637144282623,
"min": -0.9999250518158078,
"max": 1.6387850840358025,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.32979801297188,
"min": -31.99760165810585,
"max": 159.95899794250727,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5640637144282623,
"min": -0.9999250518158078,
"max": 1.6387850840358025,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.32979801297188,
"min": -31.99760165810585,
"max": 159.95899794250727,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0383908855097156,
"min": 0.0383908855097156,
"max": 9.864942222833633,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4935705813841196,
"min": 3.4472129786154255,
"max": 167.70401778817177,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699655308",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/bjqrn/.local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1699657768"
},
"total": 2460.3603948770005,
"count": 1,
"self": 0.5287520240008234,
"children": {
"run_training.setup": {
"total": 0.023333496999839554,
"count": 1,
"self": 0.023333496999839554
},
"TrainerController.start_learning": {
"total": 2459.808309356,
"count": 1,
"self": 1.2394986199196865,
"children": {
"TrainerController._reset_env": {
"total": 1.9446183729996847,
"count": 1,
"self": 1.9446183729996847
},
"TrainerController.advance": {
"total": 2456.512537464081,
"count": 63958,
"self": 1.19454152734761,
"children": {
"env_step": {
"total": 1601.6272744249554,
"count": 63958,
"self": 1373.3329923829197,
"children": {
"SubprocessEnvManager._take_step": {
"total": 227.34783217101267,
"count": 63958,
"self": 4.329091906124631,
"children": {
"TorchPolicy.evaluate": {
"total": 223.01874026488804,
"count": 62558,
"self": 223.01874026488804
}
}
},
"workers": {
"total": 0.9464498710231055,
"count": 63958,
"self": 0.0,
"children": {
"worker_root": {
"total": 2456.6035100961344,
"count": 63958,
"is_parallel": true,
"self": 1175.338604782195,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001395990000673919,
"count": 1,
"is_parallel": true,
"self": 0.0004016329994556145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009943570012183045,
"count": 8,
"is_parallel": true,
"self": 0.0009943570012183045
}
}
},
"UnityEnvironment.step": {
"total": 0.052877744999932474,
"count": 1,
"is_parallel": true,
"self": 0.0003290390013717115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003801019993261434,
"count": 1,
"is_parallel": true,
"self": 0.0003801019993261434
},
"communicator.exchange": {
"total": 0.05069922999973642,
"count": 1,
"is_parallel": true,
"self": 0.05069922999973642
},
"steps_from_proto": {
"total": 0.0014693739994982025,
"count": 1,
"is_parallel": true,
"self": 0.0004485039999053697,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010208699995928328,
"count": 8,
"is_parallel": true,
"self": 0.0010208699995928328
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1281.2649053139394,
"count": 63957,
"is_parallel": true,
"self": 19.218331353144094,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 14.453738863869148,
"count": 63957,
"is_parallel": true,
"self": 14.453738863869148
},
"communicator.exchange": {
"total": 1192.6881953389639,
"count": 63957,
"is_parallel": true,
"self": 1192.6881953389639
},
"steps_from_proto": {
"total": 54.904639757962286,
"count": 63957,
"is_parallel": true,
"self": 12.232616655577658,
"children": {
"_process_rank_one_or_two_observation": {
"total": 42.67202310238463,
"count": 511656,
"is_parallel": true,
"self": 42.67202310238463
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 853.6907215117781,
"count": 63958,
"self": 2.7290541088086684,
"children": {
"process_trajectory": {
"total": 122.26911447996918,
"count": 63958,
"self": 122.00264219296969,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26647228699948755,
"count": 2,
"self": 0.26647228699948755
}
}
},
"_update_policy": {
"total": 728.6925529230002,
"count": 457,
"self": 292.66773103409105,
"children": {
"TorchPPOOptimizer.update": {
"total": 436.0248218889092,
"count": 22848,
"self": 436.0248218889092
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000000424450263e-06,
"count": 1,
"self": 1.2000000424450263e-06
},
"TrainerController._save_models": {
"total": 0.11165369899936195,
"count": 1,
"self": 0.0010513589995753136,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11060233999978664,
"count": 1,
"self": 0.11060233999978664
}
}
}
}
}
}
}