JoyboyXoXo's picture
First Push
959c33c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7218261957168579,
"min": 0.7218261957168579,
"max": 1.4441133737564087,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 21597.0390625,
"min": 21597.0390625,
"max": 43808.625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989889.0,
"min": 29952.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989889.0,
"min": 29952.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3889804482460022,
"min": -0.09802351146936417,
"max": 0.396660178899765,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 103.07981872558594,
"min": -23.52564239501953,
"max": 106.70159149169922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016674673184752464,
"min": 0.009995872154831886,
"max": 0.2865060269832611,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.418788433074951,
"min": 2.428997039794922,
"max": 68.76144409179688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07481592784946164,
"min": 0.06519285933990474,
"max": 0.07481592784946164,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.047422989892463,
"min": 0.47856608152271773,
"max": 1.0637602270797681,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012172644650144364,
"min": 0.00012304434243024328,
"max": 0.014614346451748876,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17041702510202109,
"min": 0.001353487766732676,
"max": 0.2074666752549306,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.462404655421423e-06,
"min": 7.462404655421423e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010447366517589992,
"min": 0.00010447366517589992,
"max": 0.0031407428530858,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248743571428573,
"min": 0.10248743571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348241000000002,
"min": 1.3691136000000002,
"max": 2.4008654000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025849482785714276,
"min": 0.00025849482785714276,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003618927589999999,
"min": 0.003618927589999999,
"max": 0.10471672858,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008232370018959045,
"min": 0.008232370018959045,
"max": 0.44423121213912964,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11525318026542664,
"min": 0.11525318026542664,
"max": 3.1096184253692627,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 485.75,
"min": 438.3692307692308,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31088.0,
"min": 15984.0,
"max": 33397.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3579593476606533,
"min": -1.0000000521540642,
"max": 1.469301529114063,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 86.90939825028181,
"min": -32.000001668930054,
"max": 95.5045993924141,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3579593476606533,
"min": -1.0000000521540642,
"max": 1.469301529114063,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 86.90939825028181,
"min": -32.000001668930054,
"max": 95.5045993924141,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04200664673862775,
"min": 0.03850839187641843,
"max": 9.124957736581564,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.688425391272176,
"min": 2.4974802503638784,
"max": 145.99932378530502,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693395655",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693397782"
},
"total": 2127.0012970630005,
"count": 1,
"self": 0.5313192870007697,
"children": {
"run_training.setup": {
"total": 0.047773855000059484,
"count": 1,
"self": 0.047773855000059484
},
"TrainerController.start_learning": {
"total": 2126.422203921,
"count": 1,
"self": 1.3480650299834451,
"children": {
"TrainerController._reset_env": {
"total": 4.7518856459998915,
"count": 1,
"self": 4.7518856459998915
},
"TrainerController.advance": {
"total": 2120.2239509120163,
"count": 63351,
"self": 1.3842721880764657,
"children": {
"env_step": {
"total": 1471.1773926179455,
"count": 63351,
"self": 1363.1775062759452,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.1790042800119,
"count": 63351,
"self": 4.712589226032605,
"children": {
"TorchPolicy.evaluate": {
"total": 102.46641505397929,
"count": 62558,
"self": 102.46641505397929
}
}
},
"workers": {
"total": 0.8208820619884136,
"count": 63351,
"self": 0.0,
"children": {
"worker_root": {
"total": 2121.9165200150132,
"count": 63351,
"is_parallel": true,
"self": 868.7291140800173,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020041120001224044,
"count": 1,
"is_parallel": true,
"self": 0.0007128890001695254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001291222999952879,
"count": 8,
"is_parallel": true,
"self": 0.001291222999952879
}
}
},
"UnityEnvironment.step": {
"total": 0.13497258899997178,
"count": 1,
"is_parallel": true,
"self": 0.0006195979997301038,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047441000015169266,
"count": 1,
"is_parallel": true,
"self": 0.00047441000015169266
},
"communicator.exchange": {
"total": 0.13194154100006017,
"count": 1,
"is_parallel": true,
"self": 0.13194154100006017
},
"steps_from_proto": {
"total": 0.0019370400000298105,
"count": 1,
"is_parallel": true,
"self": 0.0003688730002977536,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015681669997320569,
"count": 8,
"is_parallel": true,
"self": 0.0015681669997320569
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1253.187405934996,
"count": 63350,
"is_parallel": true,
"self": 34.02596217103587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.02366981399814,
"count": 63350,
"is_parallel": true,
"self": 23.02366981399814
},
"communicator.exchange": {
"total": 1091.1641074889872,
"count": 63350,
"is_parallel": true,
"self": 1091.1641074889872
},
"steps_from_proto": {
"total": 104.97366646097475,
"count": 63350,
"is_parallel": true,
"self": 20.52328970110807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.45037675986669,
"count": 506800,
"is_parallel": true,
"self": 84.45037675986669
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 647.6622861059946,
"count": 63351,
"self": 2.5141167879326076,
"children": {
"process_trajectory": {
"total": 109.22838273205775,
"count": 63351,
"self": 109.00756570805811,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22081702399964342,
"count": 2,
"self": 0.22081702399964342
}
}
},
"_update_policy": {
"total": 535.9197865860042,
"count": 438,
"self": 348.7457687740664,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.1740178119378,
"count": 22815,
"self": 187.1740178119378
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.320001481682993e-07,
"count": 1,
"self": 9.320001481682993e-07
},
"TrainerController._save_models": {
"total": 0.09830140099984419,
"count": 1,
"self": 0.0014311659997474635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09687023500009673,
"count": 1,
"self": 0.09687023500009673
}
}
}
}
}
}
}