ppo-Pyramids / run_logs /timers.json
amostof's picture
First Push
9a40cb8 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6913639307022095,
"min": 0.6807979345321655,
"max": 1.5183219909667969,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20740.91796875,
"min": 20423.9375,
"max": 46059.81640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989924.0,
"min": 29952.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989924.0,
"min": 29952.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.20129531621932983,
"min": -0.13373512029647827,
"max": 0.20129531621932983,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 51.33030700683594,
"min": -31.69522476196289,
"max": 51.33030700683594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.013980908319354057,
"min": -0.05154495686292648,
"max": 0.2703520655632019,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.565131664276123,
"min": -12.937784194946289,
"max": 64.07344055175781,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06798828124609667,
"min": 0.06421265105921264,
"max": 0.07283667660642766,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9518359374453534,
"min": 0.49039784042166695,
"max": 1.0432544064613323,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007759642990563776,
"min": 0.00011295626906233294,
"max": 0.008942335689685674,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10863500186789286,
"min": 0.0014684314978103282,
"max": 0.12519269965559943,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.65084744975e-06,
"min": 7.65084744975e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001071118642965,
"min": 0.0001071118642965,
"max": 0.0030216965927679002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255025000000002,
"min": 0.10255025000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357035000000002,
"min": 1.3691136000000002,
"max": 2.3072321000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026476997500000005,
"min": 0.00026476997500000005,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037067796500000003,
"min": 0.0037067796500000003,
"max": 0.10075248678999997,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011857847683131695,
"min": 0.011429036036133766,
"max": 0.3653551936149597,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16600987315177917,
"min": 0.16000650823116302,
"max": 2.5574862957000732,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 661.0816326530612,
"min": 639.2826086956521,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32393.0,
"min": 15984.0,
"max": 32413.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6791124644999703,
"min": -1.0000000521540642,
"max": 0.7083130028584729,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 32.59739829599857,
"min": -32.000001668930054,
"max": 32.59739829599857,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6791124644999703,
"min": -1.0000000521540642,
"max": 0.7083130028584729,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 32.59739829599857,
"min": -32.000001668930054,
"max": 32.59739829599857,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08081953146150529,
"min": 0.07828857509593945,
"max": 7.081630400381982,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8793375101522543,
"min": 3.508715468691662,
"max": 113.30608640611172,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739589091",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739592062"
},
"total": 2970.544901956,
"count": 1,
"self": 0.6347414580000077,
"children": {
"run_training.setup": {
"total": 0.0363617970000405,
"count": 1,
"self": 0.0363617970000405
},
"TrainerController.start_learning": {
"total": 2969.873798701,
"count": 1,
"self": 2.3931772859664306,
"children": {
"TrainerController._reset_env": {
"total": 3.40054928699999,
"count": 1,
"self": 3.40054928699999
},
"TrainerController.advance": {
"total": 2963.9980465770336,
"count": 63221,
"self": 2.3385225339638964,
"children": {
"env_step": {
"total": 1899.1079108229694,
"count": 63221,
"self": 1735.822659021967,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.9474087539901,
"count": 63221,
"self": 6.517275140983429,
"children": {
"TorchPolicy.evaluate": {
"total": 155.43013361300666,
"count": 62568,
"self": 155.43013361300666
}
}
},
"workers": {
"total": 1.3378430470122566,
"count": 63221,
"self": 0.0,
"children": {
"worker_root": {
"total": 2962.8447243310206,
"count": 63221,
"is_parallel": true,
"self": 1392.0379128000177,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006175393999910739,
"count": 1,
"is_parallel": true,
"self": 0.004150149999986752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002025243999923987,
"count": 8,
"is_parallel": true,
"self": 0.002025243999923987
}
}
},
"UnityEnvironment.step": {
"total": 0.06153402000006736,
"count": 1,
"is_parallel": true,
"self": 0.0006652440001744253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005367569999634725,
"count": 1,
"is_parallel": true,
"self": 0.0005367569999634725
},
"communicator.exchange": {
"total": 0.05834507199995187,
"count": 1,
"is_parallel": true,
"self": 0.05834507199995187
},
"steps_from_proto": {
"total": 0.00198694699997759,
"count": 1,
"is_parallel": true,
"self": 0.0005589420001115286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014280049998660616,
"count": 8,
"is_parallel": true,
"self": 0.0014280049998660616
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1570.806811531003,
"count": 63220,
"is_parallel": true,
"self": 44.23472342996229,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.16921319200253,
"count": 63220,
"is_parallel": true,
"self": 29.16921319200253
},
"communicator.exchange": {
"total": 1375.7555031360425,
"count": 63220,
"is_parallel": true,
"self": 1375.7555031360425
},
"steps_from_proto": {
"total": 121.64737177299537,
"count": 63220,
"is_parallel": true,
"self": 26.428034123023735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 95.21933764997164,
"count": 505760,
"is_parallel": true,
"self": 95.21933764997164
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1062.5516132201005,
"count": 63221,
"self": 4.061315938058215,
"children": {
"process_trajectory": {
"total": 155.63963185304488,
"count": 63221,
"self": 155.30712347704525,
"children": {
"RLTrainer._checkpoint": {
"total": 0.33250837599962324,
"count": 2,
"self": 0.33250837599962324
}
}
},
"_update_policy": {
"total": 902.8506654289974,
"count": 427,
"self": 357.3207624990075,
"children": {
"TorchPPOOptimizer.update": {
"total": 545.5299029299899,
"count": 22848,
"self": 545.5299029299899
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3409999155555852e-06,
"count": 1,
"self": 1.3409999155555852e-06
},
"TrainerController._save_models": {
"total": 0.08202420999987226,
"count": 1,
"self": 0.0019679239999277343,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08005628599994452,
"count": 1,
"self": 0.08005628599994452
}
}
}
}
}
}
}