ppo-PyramidsRND / run_logs /timers.json
LuisChDev's picture
First Version
4b767fa
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.48057231307029724,
"min": 0.48057231307029724,
"max": 1.4081648588180542,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14409.48046875,
"min": 14409.48046875,
"max": 42718.08984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3426908850669861,
"min": -0.09782693535089493,
"max": 0.3610565662384033,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 89.44232177734375,
"min": -23.77194595336914,
"max": 94.64352416992188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.013578117825090885,
"min": -0.014480626210570335,
"max": 0.45009177923202515,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.543888807296753,
"min": -3.79392409324646,
"max": 106.6717529296875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06758718178661328,
"min": 0.06598928218028516,
"max": 0.07406140120649037,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.946220545012586,
"min": 0.5020416006318764,
"max": 1.0781023802119307,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012084584167524382,
"min": 0.0003187637627335864,
"max": 0.013872996639681742,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16918417834534136,
"min": 0.00446269267827021,
"max": 0.20809494959522612,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3980761054357125e-06,
"min": 7.3980761054357125e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010357306547609997,
"min": 0.00010357306547609997,
"max": 0.0036089292970235996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246599285714286,
"min": 0.10246599285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345239,
"min": 1.3886848,
"max": 2.5699463,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025635268642857143,
"min": 0.00025635268642857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035889376099999997,
"min": 0.0035889376099999997,
"max": 0.12030734235999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010087795555591583,
"min": 0.009743563830852509,
"max": 0.3417781889438629,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14122913777828217,
"min": 0.13640989363193512,
"max": 2.3924472332000732,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 502.33870967741933,
"min": 444.171875,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31145.0,
"min": 15984.0,
"max": 32946.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1427451320473225,
"min": -1.0000000521540642,
"max": 1.30573434534017,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 70.850198186934,
"min": -31.9904016405344,
"max": 83.56699810177088,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1427451320473225,
"min": -1.0000000521540642,
"max": 1.30573434534017,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 70.850198186934,
"min": -31.9904016405344,
"max": 83.56699810177088,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.053023557227964875,
"min": 0.04822272442341331,
"max": 6.842718632891774,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.287460548133822,
"min": 2.9898089142516255,
"max": 109.48349812626839,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699072135",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699074473"
},
"total": 2338.375828478,
"count": 1,
"self": 0.47696013299992046,
"children": {
"run_training.setup": {
"total": 0.043896528000004764,
"count": 1,
"self": 0.043896528000004764
},
"TrainerController.start_learning": {
"total": 2337.854971817,
"count": 1,
"self": 1.442077876996791,
"children": {
"TrainerController._reset_env": {
"total": 4.335847167999873,
"count": 1,
"self": 4.335847167999873
},
"TrainerController.advance": {
"total": 2331.9950591100037,
"count": 63474,
"self": 1.5405124929034173,
"children": {
"env_step": {
"total": 1675.2842744320778,
"count": 63474,
"self": 1534.7119378370444,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.6901400270085,
"count": 63474,
"self": 4.886391546925552,
"children": {
"TorchPolicy.evaluate": {
"total": 134.80374848008296,
"count": 62575,
"self": 134.80374848008296
}
}
},
"workers": {
"total": 0.8821965680249377,
"count": 63474,
"self": 0.0,
"children": {
"worker_root": {
"total": 2332.7033661261153,
"count": 63474,
"is_parallel": true,
"self": 925.8737740890815,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00214249300006486,
"count": 1,
"is_parallel": true,
"self": 0.0006773010004508251,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001465191999614035,
"count": 8,
"is_parallel": true,
"self": 0.001465191999614035
}
}
},
"UnityEnvironment.step": {
"total": 0.06091771699993842,
"count": 1,
"is_parallel": true,
"self": 0.000680489999922429,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.004698485000062647,
"count": 1,
"is_parallel": true,
"self": 0.004698485000062647
},
"communicator.exchange": {
"total": 0.053454548999980034,
"count": 1,
"is_parallel": true,
"self": 0.053454548999980034
},
"steps_from_proto": {
"total": 0.002084192999973311,
"count": 1,
"is_parallel": true,
"self": 0.0004019480006718368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016822449993014743,
"count": 8,
"is_parallel": true,
"self": 0.0016822449993014743
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1406.8295920370338,
"count": 63473,
"is_parallel": true,
"self": 35.213328616073795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.370593887985706,
"count": 63473,
"is_parallel": true,
"self": 26.370593887985706
},
"communicator.exchange": {
"total": 1240.4803654880038,
"count": 63473,
"is_parallel": true,
"self": 1240.4803654880038
},
"steps_from_proto": {
"total": 104.76530404497043,
"count": 63473,
"is_parallel": true,
"self": 21.257314394055356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.50798965091508,
"count": 507784,
"is_parallel": true,
"self": 83.50798965091508
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.1702721850224,
"count": 63474,
"self": 2.665763442035768,
"children": {
"process_trajectory": {
"total": 129.7462589409788,
"count": 63474,
"self": 129.56458953597848,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1816694050003207,
"count": 2,
"self": 0.1816694050003207
}
}
},
"_update_policy": {
"total": 522.7582498020079,
"count": 453,
"self": 312.37700509702154,
"children": {
"TorchPPOOptimizer.update": {
"total": 210.38124470498633,
"count": 22839,
"self": 210.38124470498633
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6979993233690038e-06,
"count": 1,
"self": 1.6979993233690038e-06
},
"TrainerController._save_models": {
"total": 0.08198596400052338,
"count": 1,
"self": 0.0014855210001769592,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08050044300034642,
"count": 1,
"self": 0.08050044300034642
}
}
}
}
}
}
}