AIventurer's picture
First Push
d755f08 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45914390683174133,
"min": 0.45914390683174133,
"max": 1.4588837623596191,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13855.126953125,
"min": 13855.126953125,
"max": 44256.69921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29878.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29878.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.17446760833263397,
"min": -0.10128428786993027,
"max": 0.27677589654922485,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 43.44243621826172,
"min": -24.004375457763672,
"max": 71.4081802368164,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.24335265159606934,
"min": -0.011399206705391407,
"max": 0.4013155698776245,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 60.594810485839844,
"min": -2.929596185684204,
"max": 95.1117935180664,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06882866969958525,
"min": 0.06580035236188916,
"max": 0.0737672676221014,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9636013757941934,
"min": 0.5901381409768112,
"max": 1.072544629959499,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01917452075100473,
"min": 0.0009726821677556754,
"max": 0.030523146569054003,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.26844329051406624,
"min": 0.013617550348579456,
"max": 0.42732405196675605,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.59406175439286e-06,
"min": 7.59406175439286e-06,
"max": 0.0002950659391446875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010631686456150003,
"min": 0.00010631686456150003,
"max": 0.0036336544887818996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253132142857144,
"min": 0.10253132142857144,
"max": 0.1983553125,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354385,
"min": 1.4354385,
"max": 2.6112181000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002628790107142858,
"min": 0.0002628790107142858,
"max": 0.00983569571875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036803061500000006,
"min": 0.0036803061500000006,
"max": 0.12114068819,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.019837597385048866,
"min": 0.019457383081316948,
"max": 0.6277450323104858,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.27772635221481323,
"min": 0.272403359413147,
"max": 5.021960258483887,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 657.6170212765958,
"min": 583.8333333333334,
"max": 996.625,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30908.0,
"min": 16932.0,
"max": 32519.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6833956139567106,
"min": -0.8725375514477491,
"max": 1.0456851512469627,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 31.436198242008686,
"min": -27.921201646327972,
"max": 56.46699816733599,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6833956139567106,
"min": -0.8725375514477491,
"max": 1.0456851512469627,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 31.436198242008686,
"min": -27.921201646327972,
"max": 56.46699816733599,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1352590031380279,
"min": 0.11835112364928203,
"max": 12.151342125402557,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.2219141443492845,
"min": 5.8914016170892864,
"max": 218.72415825724602,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740103325",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740105406"
},
"total": 2080.555144395,
"count": 1,
"self": 0.47649820300011925,
"children": {
"run_training.setup": {
"total": 0.019465487999923425,
"count": 1,
"self": 0.019465487999923425
},
"TrainerController.start_learning": {
"total": 2080.059180704,
"count": 1,
"self": 1.329447091014572,
"children": {
"TrainerController._reset_env": {
"total": 2.074724477000018,
"count": 1,
"self": 2.074724477000018
},
"TrainerController.advance": {
"total": 2076.5658582009855,
"count": 63387,
"self": 1.3520690978598395,
"children": {
"env_step": {
"total": 1412.3077175301275,
"count": 63387,
"self": 1260.557045361144,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.94413542602751,
"count": 63387,
"self": 4.6978988751022825,
"children": {
"TorchPolicy.evaluate": {
"total": 146.24623655092523,
"count": 62553,
"self": 146.24623655092523
}
}
},
"workers": {
"total": 0.806536742956041,
"count": 63387,
"self": 0.0,
"children": {
"worker_root": {
"total": 2075.3051750059312,
"count": 63387,
"is_parallel": true,
"self": 924.8460338049047,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00240218800013281,
"count": 1,
"is_parallel": true,
"self": 0.0008844020003380137,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015177859997947962,
"count": 8,
"is_parallel": true,
"self": 0.0015177859997947962
}
}
},
"UnityEnvironment.step": {
"total": 0.05056049899985737,
"count": 1,
"is_parallel": true,
"self": 0.0005624489997444471,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004633999999441585,
"count": 1,
"is_parallel": true,
"self": 0.0004633999999441585
},
"communicator.exchange": {
"total": 0.0478260020001926,
"count": 1,
"is_parallel": true,
"self": 0.0478260020001926
},
"steps_from_proto": {
"total": 0.001708647999976165,
"count": 1,
"is_parallel": true,
"self": 0.00034728899981928407,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001361359000156881,
"count": 8,
"is_parallel": true,
"self": 0.001361359000156881
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1150.4591412010266,
"count": 63386,
"is_parallel": true,
"self": 30.99715660019683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.161846702971843,
"count": 63386,
"is_parallel": true,
"self": 23.161846702971843
},
"communicator.exchange": {
"total": 1001.2106033889361,
"count": 63386,
"is_parallel": true,
"self": 1001.2106033889361
},
"steps_from_proto": {
"total": 95.08953450892182,
"count": 63386,
"is_parallel": true,
"self": 18.964110740967044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.12542376795477,
"count": 507088,
"is_parallel": true,
"self": 76.12542376795477
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 662.9060715729981,
"count": 63387,
"self": 2.552738361067668,
"children": {
"process_trajectory": {
"total": 124.17760097793735,
"count": 63387,
"self": 123.96648169693754,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21111928099981014,
"count": 2,
"self": 0.21111928099981014
}
}
},
"_update_policy": {
"total": 536.1757322339931,
"count": 454,
"self": 295.6663376089641,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.50939462502902,
"count": 22818,
"self": 240.50939462502902
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.759998308960348e-07,
"count": 1,
"self": 8.759998308960348e-07
},
"TrainerController._save_models": {
"total": 0.089150059000076,
"count": 1,
"self": 0.001307761000134633,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08784229799994137,
"count": 1,
"self": 0.08784229799994137
}
}
}
}
}
}
}