| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.404713898897171, | |
| "min": 0.388362318277359, | |
| "max": 1.447376012802124, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 12167.318359375, | |
| "min": 11737.8623046875, | |
| "max": 43907.59765625, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989946.0, | |
| "min": 29952.0, | |
| "max": 989946.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989946.0, | |
| "min": 29952.0, | |
| "max": 989946.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.5149554014205933, | |
| "min": -0.09209084510803223, | |
| "max": 0.5149554014205933, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 141.0977783203125, | |
| "min": -22.193893432617188, | |
| "max": 141.0977783203125, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": -0.04016876593232155, | |
| "min": -0.04016876593232155, | |
| "max": 0.4002934992313385, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": -11.006241798400879, | |
| "min": -11.006241798400879, | |
| "max": 94.86956024169922, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.07029258864418982, | |
| "min": 0.06701452440361451, | |
| "max": 0.07344866531649316, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9840962410186574, | |
| "min": 0.5017477453264124, | |
| "max": 1.0413641529400288, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.014035726248472375, | |
| "min": 0.0005578439872526611, | |
| "max": 0.015477183326386363, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.19650016747861324, | |
| "min": 0.007251971834284593, | |
| "max": 0.21668056656940907, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.34004755335e-06, | |
| "min": 7.34004755335e-06, | |
| "max": 0.00029515063018788575, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.0001027606657469, | |
| "min": 0.0001027606657469, | |
| "max": 0.0033745624751458994, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10244665, | |
| "min": 0.10244665, | |
| "max": 0.19838354285714285, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4342531, | |
| "min": 1.3886848, | |
| "max": 2.4248541, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.00025442033500000006, | |
| "min": 0.00025442033500000006, | |
| "max": 0.00983851593142857, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.003561884690000001, | |
| "min": 0.003561884690000001, | |
| "max": 0.11250292459000001, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.01115137618035078, | |
| "min": 0.01115137618035078, | |
| "max": 0.3420438766479492, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.15611927211284637, | |
| "min": 0.15611927211284637, | |
| "max": 2.3943071365356445, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 366.29761904761904, | |
| "min": 366.29761904761904, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 30769.0, | |
| "min": 15984.0, | |
| "max": 33016.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.5108047313988209, | |
| "min": -1.0000000521540642, | |
| "max": 1.5108047313988209, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 126.90759743750095, | |
| "min": -30.352201730012894, | |
| "max": 126.90759743750095, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.5108047313988209, | |
| "min": -1.0000000521540642, | |
| "max": 1.5108047313988209, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 126.90759743750095, | |
| "min": -30.352201730012894, | |
| "max": 126.90759743750095, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.04211168914385087, | |
| "min": 0.04211168914385087, | |
| "max": 6.5513052036985755, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 3.5373818880834733, | |
| "min": 3.5084612832870334, | |
| "max": 104.82088325917721, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1682591934", | |
| "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1682593994" | |
| }, | |
| "total": 2060.1114051409995, | |
| "count": 1, | |
| "self": 0.4768918109994047, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.1095395570000619, | |
| "count": 1, | |
| "self": 0.1095395570000619 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2059.524973773, | |
| "count": 1, | |
| "self": 1.3185361310061126, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 3.7919083669999054, | |
| "count": 1, | |
| "self": 3.7919083669999054 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2054.3235150619935, | |
| "count": 63600, | |
| "self": 1.3575455678897015, | |
| "children": { | |
| "env_step": { | |
| "total": 1430.0237639810039, | |
| "count": 63600, | |
| "self": 1324.904932217009, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 104.28504615801194, | |
| "count": 63600, | |
| "self": 4.656458474026294, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 99.62858768398564, | |
| "count": 62552, | |
| "self": 99.62858768398564 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.8337856059829392, | |
| "count": 63600, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2054.867181988064, | |
| "count": 63600, | |
| "is_parallel": true, | |
| "self": 836.3669420321385, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.001812748000247666, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005708360008611635, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0012419119993865024, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0012419119993865024 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.046316713000123855, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005523410000023432, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0004899619998468552, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004899619998468552 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.043639080000048125, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.043639080000048125 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0016353300002265314, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00038040100025682477, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0012549289999697066, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0012549289999697066 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1218.5002399559257, | |
| "count": 63599, | |
| "is_parallel": true, | |
| "self": 32.03386976389129, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 22.499093553989496, | |
| "count": 63599, | |
| "is_parallel": true, | |
| "self": 22.499093553989496 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1072.855127122058, | |
| "count": 63599, | |
| "is_parallel": true, | |
| "self": 1072.855127122058 | |
| }, | |
| "steps_from_proto": { | |
| "total": 91.11214951598686, | |
| "count": 63599, | |
| "is_parallel": true, | |
| "self": 19.206868793090052, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 71.9052807228968, | |
| "count": 508792, | |
| "is_parallel": true, | |
| "self": 71.9052807228968 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 622.9422055130999, | |
| "count": 63600, | |
| "self": 2.521415385218006, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 103.45003195489062, | |
| "count": 63600, | |
| "self": 103.24269225589069, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.207339698999931, | |
| "count": 2, | |
| "self": 0.207339698999931 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 516.9707581729913, | |
| "count": 447, | |
| "self": 332.0455394859555, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 184.9252186870358, | |
| "count": 22770, | |
| "self": 184.9252186870358 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 8.540000635548495e-07, | |
| "count": 1, | |
| "self": 8.540000635548495e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.0910133590004989, | |
| "count": 1, | |
| "self": 0.0015288690010493156, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.08948448999944958, | |
| "count": 1, | |
| "self": 0.08948448999944958 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |