| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.5952093601226807, | |
| "min": 0.5936236381530762, | |
| "max": 1.4108970165252686, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 17732.4765625, | |
| "min": 17732.4765625, | |
| "max": 42800.97265625, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989959.0, | |
| "min": 29952.0, | |
| "max": 989959.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989959.0, | |
| "min": 29952.0, | |
| "max": 989959.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.4324115514755249, | |
| "min": -0.0915769711136818, | |
| "max": 0.45736128091812134, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 116.31871032714844, | |
| "min": -22.070049285888672, | |
| "max": 123.487548828125, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": -0.019235147163271904, | |
| "min": -0.019235147163271904, | |
| "max": 0.49983710050582886, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": -5.174254417419434, | |
| "min": -5.174254417419434, | |
| "max": 118.46139526367188, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06864761246925889, | |
| "min": 0.0662840637118539, | |
| "max": 0.07300515278798736, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9610665745696245, | |
| "min": 0.4698178067840348, | |
| "max": 1.0950772918198104, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.012593749437479153, | |
| "min": 0.0008008037610477074, | |
| "max": 0.013472014001481944, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.17631249212470815, | |
| "min": 0.006406430088381659, | |
| "max": 0.18860819602074722, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.287054713871425e-06, | |
| "min": 7.287054713871425e-06, | |
| "max": 0.00029515063018788575, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010201876599419995, | |
| "min": 0.00010201876599419995, | |
| "max": 0.0035073728308757995, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10242898571428571, | |
| "min": 0.10242898571428571, | |
| "max": 0.19838354285714285, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4340058, | |
| "min": 1.3691136000000002, | |
| "max": 2.5691241999999996, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0002526556728571428, | |
| "min": 0.0002526556728571428, | |
| "max": 0.00983851593142857, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.003537179419999999, | |
| "min": 0.003537179419999999, | |
| "max": 0.11693550758000001, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.007976380176842213, | |
| "min": 0.007976380176842213, | |
| "max": 0.4858739376068115, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.11166931688785553, | |
| "min": 0.11166931688785553, | |
| "max": 3.4011175632476807, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 457.0833333333333, | |
| "min": 377.4736842105263, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 27425.0, | |
| "min": 15984.0, | |
| "max": 34374.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.4503796380960334, | |
| "min": -1.0000000521540642, | |
| "max": 1.4645868200612695, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 85.57239864766598, | |
| "min": -32.000001668930054, | |
| "max": 111.30859832465649, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.4503796380960334, | |
| "min": -1.0000000521540642, | |
| "max": 1.4645868200612695, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 85.57239864766598, | |
| "min": -32.000001668930054, | |
| "max": 111.30859832465649, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.037360298733416376, | |
| "min": 0.03447665761862146, | |
| "max": 9.467917624861002, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 2.204257625271566, | |
| "min": 2.204257625271566, | |
| "max": 151.48668199777603, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1739102957", | |
| "python_version": "3.10.12 (main, Jan 20 2025, 19:07:01) [GCC 13.3.0]", | |
| "command_line_arguments": "/home/fabiofava98/Desktop_Ubuntu/AI/HuggingFace_RL_Course/venv_unit5/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "1.2.0.dev0", | |
| "mlagents_envs_version": "1.2.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.6.0+cu124", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1739104021" | |
| }, | |
| "total": 1028.2208814229998, | |
| "count": 1, | |
| "self": 0.31956409200029157, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.013821429999552493, | |
| "count": 1, | |
| "self": 0.013821429999552493 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 1027.887495901, | |
| "count": 1, | |
| "self": 0.8814325980292779, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 2.4636900090008567, | |
| "count": 1, | |
| "self": 2.4636900090008567 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 1024.4874424929694, | |
| "count": 63684, | |
| "self": 0.8643212408751424, | |
| "children": { | |
| "env_step": { | |
| "total": 589.5078557270144, | |
| "count": 63684, | |
| "self": 534.0079630019045, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 54.986202329122534, | |
| "count": 63684, | |
| "self": 2.561091439239135, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 52.4251108898834, | |
| "count": 62569, | |
| "self": 52.4251108898834 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.5136903959873962, | |
| "count": 63684, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 1025.6970519850302, | |
| "count": 63684, | |
| "is_parallel": true, | |
| "self": 554.6811108889551, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0015305120004995842, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.000549878000128956, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0009806340003706282, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0009806340003706282 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.022198167000169633, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00021658199966623215, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0003364809999766294, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0003364809999766294 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.020879351000075985, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.020879351000075985 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0007657530004507862, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00020046800091222394, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0005652849995385623, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0005652849995385623 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 471.01594109607504, | |
| "count": 63683, | |
| "is_parallel": true, | |
| "self": 11.167894433190668, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 9.606073139018008, | |
| "count": 63683, | |
| "is_parallel": true, | |
| "self": 9.606073139018008 | |
| }, | |
| "communicator.exchange": { | |
| "total": 413.9840255259578, | |
| "count": 63683, | |
| "is_parallel": true, | |
| "self": 413.9840255259578 | |
| }, | |
| "steps_from_proto": { | |
| "total": 36.25794799790856, | |
| "count": 63683, | |
| "is_parallel": true, | |
| "self": 8.6795996435103, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 27.57834835439826, | |
| "count": 509464, | |
| "is_parallel": true, | |
| "self": 27.57834835439826 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 434.11526552507985, | |
| "count": 63684, | |
| "self": 1.5193886010838469, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 73.94152356099494, | |
| "count": 63684, | |
| "self": 73.81471225399491, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.1268113070000254, | |
| "count": 2, | |
| "self": 0.1268113070000254 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 358.65435336300106, | |
| "count": 439, | |
| "self": 198.41289354699074, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 160.24145981601032, | |
| "count": 22824, | |
| "self": 160.24145981601032 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 7.329999789362773e-07, | |
| "count": 1, | |
| "self": 7.329999789362773e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.054930068000430765, | |
| "count": 1, | |
| "self": 0.0025450580005781376, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.05238500999985263, | |
| "count": 1, | |
| "self": 0.05238500999985263 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |