rodri2023's picture
First Push
32422cc
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38289639353752136,
"min": 0.38289639353752136,
"max": 1.4671725034713745,
"count": 30
},
"Pyramids.Policy.Entropy.sum": {
"value": 11468.5126953125,
"min": 11468.5126953125,
"max": 44508.14453125,
"count": 30
},
"Pyramids.Step.mean": {
"value": 899937.0,
"min": 29952.0,
"max": 899937.0,
"count": 30
},
"Pyramids.Step.sum": {
"value": 899937.0,
"min": 29952.0,
"max": 899937.0,
"count": 30
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6015340685844421,
"min": -0.09834024310112,
"max": 0.6015340685844421,
"count": 30
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.8280029296875,
"min": -23.60165786743164,
"max": 167.8280029296875,
"count": 30
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02776433154940605,
"min": -0.008535606786608696,
"max": 0.33708858489990234,
"count": 30
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.746248245239258,
"min": -2.2192578315734863,
"max": 79.8899917602539,
"count": 30
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06813492316803052,
"min": 0.06533361311145297,
"max": 0.0739464787156543,
"count": 30
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9538889243524273,
"min": 0.49966837179123813,
"max": 1.0550514084575744,
"count": 30
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01646403969923903,
"min": 0.0010248820948676816,
"max": 0.01757756372500721,
"count": 30
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2304965557893464,
"min": 0.010696856924519419,
"max": 0.24608589215010096,
"count": 30
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00021155762233794525,
"min": 0.00021155762233794525,
"max": 0.00029838354339596195,
"count": 30
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0029618067127312334,
"min": 0.0020691136102954665,
"max": 0.0040106773631075655,
"count": 30
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1705191976190476,
"min": 0.1705191976190476,
"max": 0.19946118095238097,
"count": 30
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.3872687666666663,
"min": 1.3897045333333333,
"max": 2.7975096666666666,
"count": 30
},
"Pyramids.Policy.Beta.mean": {
"value": 0.007054867842142857,
"min": 0.007054867842142857,
"max": 0.009946171977142856,
"count": 30
},
"Pyramids.Policy.Beta.sum": {
"value": 0.09876814979,
"min": 0.06897148288,
"max": 0.13369555409,
"count": 30
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010570526123046875,
"min": 0.010072222910821438,
"max": 0.3360268473625183,
"count": 30
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14798736572265625,
"min": 0.14101111888885498,
"max": 2.3521878719329834,
"count": 30
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 318.39325842696627,
"min": 316.39,
"max": 999.0,
"count": 30
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28337.0,
"min": 15984.0,
"max": 32429.0,
"count": 30
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6566704388877207,
"min": -1.0000000521540642,
"max": 1.6566704388877207,
"count": 30
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.78699862211943,
"min": -32.000001668930054,
"max": 160.46799820661545,
"count": 30
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6566704388877207,
"min": -1.0000000521540642,
"max": 1.6566704388877207,
"count": 30
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.78699862211943,
"min": -32.000001668930054,
"max": 160.46799820661545,
"count": 30
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03513560092406723,
"min": 0.03464109231351634,
"max": 6.631076209247112,
"count": 30
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0919328813179163,
"min": 3.0393250471533975,
"max": 106.0972193479538,
"count": 30
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682278843",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682280792"
},
"total": 1948.8466804410002,
"count": 1,
"self": 10.005622212000162,
"children": {
"run_training.setup": {
"total": 0.11235217500006911,
"count": 1,
"self": 0.11235217500006911
},
"TrainerController.start_learning": {
"total": 1938.728706054,
"count": 1,
"self": 1.2574157109652333,
"children": {
"TrainerController._reset_env": {
"total": 4.263221717999841,
"count": 1,
"self": 4.263221717999841
},
"TrainerController.advance": {
"total": 1933.0527441040351,
"count": 57661,
"self": 1.2906165560157206,
"children": {
"env_step": {
"total": 1380.127794013002,
"count": 57661,
"self": 1285.5665758609894,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.80935422198695,
"count": 57661,
"self": 4.192971117012803,
"children": {
"TorchPolicy.evaluate": {
"total": 89.61638310497415,
"count": 56496,
"self": 89.61638310497415
}
}
},
"workers": {
"total": 0.7518639300255927,
"count": 57660,
"self": 0.0,
"children": {
"worker_root": {
"total": 1934.5166208870041,
"count": 57660,
"is_parallel": true,
"self": 747.9451174280277,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016499139999268664,
"count": 1,
"is_parallel": true,
"self": 0.0005088329994578089,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011410810004690575,
"count": 8,
"is_parallel": true,
"self": 0.0011410810004690575
}
}
},
"UnityEnvironment.step": {
"total": 0.06977963200006343,
"count": 1,
"is_parallel": true,
"self": 0.000539827000011428,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000493629999937184,
"count": 1,
"is_parallel": true,
"self": 0.000493629999937184
},
"communicator.exchange": {
"total": 0.06716480500017497,
"count": 1,
"is_parallel": true,
"self": 0.06716480500017497
},
"steps_from_proto": {
"total": 0.0015813699999398523,
"count": 1,
"is_parallel": true,
"self": 0.000383953999971709,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011974159999681433,
"count": 8,
"is_parallel": true,
"self": 0.0011974159999681433
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1186.5715034589764,
"count": 57659,
"is_parallel": true,
"self": 29.070901100047422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.918828731956637,
"count": 57659,
"is_parallel": true,
"self": 20.918828731956637
},
"communicator.exchange": {
"total": 1052.4095796089778,
"count": 57659,
"is_parallel": true,
"self": 1052.4095796089778
},
"steps_from_proto": {
"total": 84.17219401799457,
"count": 57659,
"is_parallel": true,
"self": 17.768055549886867,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.4041384681077,
"count": 461272,
"is_parallel": true,
"self": 66.4041384681077
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 551.6343335350175,
"count": 57660,
"self": 2.3233068150352665,
"children": {
"process_trajectory": {
"total": 94.76383601298085,
"count": 57660,
"self": 94.65807533898078,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1057606740000665,
"count": 1,
"self": 0.1057606740000665
}
}
},
"_update_policy": {
"total": 454.5471907070014,
"count": 401,
"self": 292.10445572296453,
"children": {
"TorchPPOOptimizer.update": {
"total": 162.44273498403686,
"count": 20595,
"self": 162.44273498403686
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6549997781112324e-06,
"count": 1,
"self": 1.6549997781112324e-06
},
"TrainerController._save_models": {
"total": 0.1553228660000059,
"count": 1,
"self": 0.001977660000193282,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1533452059998126,
"count": 1,
"self": 0.1533452059998126
}
}
}
}
}
}
}