ZyXin's picture
First Push
21bfa69
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4138519763946533,
"min": 0.3812945485115051,
"max": 1.4703525304794312,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12355.96484375,
"min": 11448.685546875,
"max": 44604.61328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29935.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29935.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4712086617946625,
"min": -0.10479633510112762,
"max": 0.48208585381507874,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 127.69754791259766,
"min": -24.83673095703125,
"max": 132.09152221679688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0022070547565817833,
"min": -0.028307657688856125,
"max": 0.5397907495498657,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.5981118679046631,
"min": -7.473221778869629,
"max": 127.93040466308594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06894058265256323,
"min": 0.06314746864306651,
"max": 0.07290178474833771,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9651681571358852,
"min": 0.5103124932383639,
"max": 1.0622252809698693,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01469826077146365,
"min": 0.00035959729742020367,
"max": 0.021611026998027517,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2057756508004911,
"min": 0.003955570271622241,
"max": 0.20701315502810758,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.377633255107145e-06,
"min": 7.377633255107145e-06,
"max": 0.0002952336444459286,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010328686557150003,
"min": 0.00010328686557150003,
"max": 0.0035071028309657994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245917857142858,
"min": 0.10245917857142858,
"max": 0.19841121428571426,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344285,
"min": 1.3888785,
"max": 2.5690342,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002556719392857143,
"min": 0.0002556719392857143,
"max": 0.009841280307142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003579407150000001,
"min": 0.003579407150000001,
"max": 0.11692651658,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012044703587889671,
"min": 0.012044703587889671,
"max": 0.5879420042037964,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1686258465051651,
"min": 0.1686258465051651,
"max": 4.115593910217285,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 404.0133333333333,
"min": 400.22972972972974,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30301.0,
"min": 16734.0,
"max": 33236.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4359519854187965,
"min": -0.9999677942645165,
"max": 1.5186594361187638,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 107.69639890640974,
"min": -30.999001622200012,
"max": 112.38079827278852,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4359519854187965,
"min": -0.9999677942645165,
"max": 1.5186594361187638,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 107.69639890640974,
"min": -30.999001622200012,
"max": 112.38079827278852,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0507132926544485,
"min": 0.0507132926544485,
"max": 11.913546909304227,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8034969490836374,
"min": 3.445794581217342,
"max": 202.53029745817184,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683160661",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683162873"
},
"total": 2211.989206421,
"count": 1,
"self": 0.54561238199949,
"children": {
"run_training.setup": {
"total": 0.05845111899998301,
"count": 1,
"self": 0.05845111899998301
},
"TrainerController.start_learning": {
"total": 2211.3851429200004,
"count": 1,
"self": 1.3628680290721604,
"children": {
"TrainerController._reset_env": {
"total": 3.9207027200000084,
"count": 1,
"self": 3.9207027200000084
},
"TrainerController.advance": {
"total": 2205.991675693928,
"count": 63652,
"self": 1.433937643882473,
"children": {
"env_step": {
"total": 1574.132112423082,
"count": 63652,
"self": 1463.6317080490628,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.68685011094976,
"count": 63652,
"self": 4.886190178885272,
"children": {
"TorchPolicy.evaluate": {
"total": 104.80065993206449,
"count": 62558,
"self": 104.80065993206449
}
}
},
"workers": {
"total": 0.8135542630693635,
"count": 63652,
"self": 0.0,
"children": {
"worker_root": {
"total": 2206.2379859579787,
"count": 63652,
"is_parallel": true,
"self": 858.0656854670376,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016912280000269675,
"count": 1,
"is_parallel": true,
"self": 0.00047362600025735446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001217601999769613,
"count": 8,
"is_parallel": true,
"self": 0.001217601999769613
}
}
},
"UnityEnvironment.step": {
"total": 0.05178520000004028,
"count": 1,
"is_parallel": true,
"self": 0.0005184849997021956,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048198800004684017,
"count": 1,
"is_parallel": true,
"self": 0.00048198800004684017
},
"communicator.exchange": {
"total": 0.04909257000008438,
"count": 1,
"is_parallel": true,
"self": 0.04909257000008438
},
"steps_from_proto": {
"total": 0.001692157000206862,
"count": 1,
"is_parallel": true,
"self": 0.00035349399968254147,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013386630005243205,
"count": 8,
"is_parallel": true,
"self": 0.0013386630005243205
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1348.172300490941,
"count": 63651,
"is_parallel": true,
"self": 31.35869364489463,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.820554875059315,
"count": 63651,
"is_parallel": true,
"self": 23.820554875059315
},
"communicator.exchange": {
"total": 1192.2163464650196,
"count": 63651,
"is_parallel": true,
"self": 1192.2163464650196
},
"steps_from_proto": {
"total": 100.77670550596758,
"count": 63651,
"is_parallel": true,
"self": 20.71778217890369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.0589233270639,
"count": 509208,
"is_parallel": true,
"self": 80.0589233270639
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 630.4256256269634,
"count": 63652,
"self": 2.520469835925951,
"children": {
"process_trajectory": {
"total": 109.40068988803478,
"count": 63652,
"self": 109.13656169603473,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2641281920000438,
"count": 2,
"self": 0.2641281920000438
}
}
},
"_update_policy": {
"total": 518.5044659030027,
"count": 453,
"self": 333.98100324501706,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.52346265798565,
"count": 22806,
"self": 184.52346265798565
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.440000232832972e-06,
"count": 1,
"self": 1.440000232832972e-06
},
"TrainerController._save_models": {
"total": 0.10989503700011483,
"count": 1,
"self": 0.0015170299998317205,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1083780070002831,
"count": 1,
"self": 0.1083780070002831
}
}
}
}
}
}
}