LuisMBA's picture
First Push
8de7c05 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.860474705696106,
"min": 0.8507747054100037,
"max": 1.4599720239639282,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 25772.9375,
"min": 25604.916015625,
"max": 44289.7109375,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479921.0,
"min": 29936.0,
"max": 479921.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479921.0,
"min": 29936.0,
"max": 479921.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08633483946323395,
"min": -0.0933464765548706,
"max": 0.14759346842765808,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -20.720361709594727,
"min": -22.49650001525879,
"max": 34.979652404785156,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02134035900235176,
"min": 0.020866960287094116,
"max": 0.38976967334747314,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.121685981750488,
"min": 5.028937339782715,
"max": 92.37541198730469,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07045765900947591,
"min": 0.06456249554104121,
"max": 0.07374542261499072,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9864072261326629,
"min": 0.5038141946431755,
"max": 1.0002483224714387,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0014725677149772993,
"min": 0.0001381654980486046,
"max": 0.005452014365306383,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.02061594800968219,
"min": 0.0019343169726804643,
"max": 0.038164100557144684,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4203264326985714e-05,
"min": 1.4203264326985714e-05,
"max": 0.00019353417466148572,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001988457005778,
"min": 0.0001988457005778,
"max": 0.00221242019379,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10710158571428574,
"min": 0.10710158571428574,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4994222000000004,
"min": 1.3773696000000002,
"max": 2.4062099999999997,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007194484128571428,
"min": 0.0007194484128571428,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.01007227778,
"min": 0.01007227778,
"max": 0.11064037899999998,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.019012952223420143,
"min": 0.019012952223420143,
"max": 0.386423259973526,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2661813199520111,
"min": 0.2633576989173889,
"max": 2.704962730407715,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 959.8064516129032,
"min": 948.9142857142857,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29754.0,
"min": 16863.0,
"max": 33212.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.6924467186133066,
"min": -0.999706718325615,
"max": -0.6220933807392915,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -20.7734015583992,
"min": -30.254201635718346,
"max": -14.879000931978226,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.6924467186133066,
"min": -0.999706718325615,
"max": -0.6220933807392915,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -20.7734015583992,
"min": -30.254201635718346,
"max": -14.879000931978226,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1911732585169375,
"min": 0.1911732585169375,
"max": 8.418042814030366,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.735197755508125,
"min": 5.735197755508125,
"max": 143.10672783851624,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742671765",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742672750"
},
"total": 984.9606637719999,
"count": 1,
"self": 0.3724853539997639,
"children": {
"run_training.setup": {
"total": 0.020612013000118168,
"count": 1,
"self": 0.020612013000118168
},
"TrainerController.start_learning": {
"total": 984.567566405,
"count": 1,
"self": 0.5336774200359287,
"children": {
"TrainerController._reset_env": {
"total": 2.1383214720003707,
"count": 1,
"self": 2.1383214720003707
},
"TrainerController.advance": {
"total": 981.7997582709636,
"count": 31557,
"self": 0.5612103280886913,
"children": {
"env_step": {
"total": 529.4930222219691,
"count": 31557,
"self": 461.6148163758962,
"children": {
"SubprocessEnvManager._take_step": {
"total": 67.53283688108058,
"count": 31557,
"self": 2.0855110980969584,
"children": {
"TorchPolicy.evaluate": {
"total": 65.44732578298363,
"count": 31315,
"self": 65.44732578298363
}
}
},
"workers": {
"total": 0.3453689649923035,
"count": 31557,
"self": 0.0,
"children": {
"worker_root": {
"total": 983.3628534069717,
"count": 31557,
"is_parallel": true,
"self": 566.8870407599529,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002041222999650927,
"count": 1,
"is_parallel": true,
"self": 0.000656636999337934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013845860003129928,
"count": 8,
"is_parallel": true,
"self": 0.0013845860003129928
}
}
},
"UnityEnvironment.step": {
"total": 0.034134743999857164,
"count": 1,
"is_parallel": true,
"self": 0.0003203910005140642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030481699968731846,
"count": 1,
"is_parallel": true,
"self": 0.00030481699968731846
},
"communicator.exchange": {
"total": 0.03235170799962361,
"count": 1,
"is_parallel": true,
"self": 0.03235170799962361
},
"steps_from_proto": {
"total": 0.0011578280000321683,
"count": 1,
"is_parallel": true,
"self": 0.0002679290000742185,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008898989999579499,
"count": 8,
"is_parallel": true,
"self": 0.0008898989999579499
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 416.47581264701876,
"count": 31556,
"is_parallel": true,
"self": 11.627004572065289,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.100347243016586,
"count": 31556,
"is_parallel": true,
"self": 8.100347243016586
},
"communicator.exchange": {
"total": 360.65947089496194,
"count": 31556,
"is_parallel": true,
"self": 360.65947089496194
},
"steps_from_proto": {
"total": 36.08898993697494,
"count": 31556,
"is_parallel": true,
"self": 7.383609667007931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.70538026996701,
"count": 252448,
"is_parallel": true,
"self": 28.70538026996701
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 451.7455257209058,
"count": 31557,
"self": 0.9348974858730799,
"children": {
"process_trajectory": {
"total": 53.326847987029396,
"count": 31557,
"self": 53.202704742029255,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12414324500014118,
"count": 1,
"self": 0.12414324500014118
}
}
},
"_update_policy": {
"total": 397.4837802480033,
"count": 217,
"self": 217.0145857399798,
"children": {
"TorchPPOOptimizer.update": {
"total": 180.46919450802352,
"count": 19065,
"self": 180.46919450802352
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0939997991954442e-06,
"count": 1,
"self": 1.0939997991954442e-06
},
"TrainerController._save_models": {
"total": 0.09580814800028747,
"count": 1,
"self": 0.0014969140001994674,
"children": {
"RLTrainer._checkpoint": {
"total": 0.094311234000088,
"count": 1,
"self": 0.094311234000088
}
}
}
}
}
}
}