ppo-Pyramids / run_logs /timers.json
Ginger1704's picture
Pyramids
6151bc1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.11772998422384262,
"min": 0.06954898685216904,
"max": 1.324138879776001,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 3526.24853515625,
"min": 2083.13134765625,
"max": 40169.078125,
"count": 7
},
"Pyramids.Step.mean": {
"value": 209896.0,
"min": 29952.0,
"max": 209896.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 209896.0,
"min": 29952.0,
"max": 209896.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10627956688404083,
"min": -0.40380460023880005,
"max": -0.10466883331537247,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -25.507095336914062,
"min": -97.31690979003906,
"max": -25.225189208984375,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.4675624370574951,
"min": 1.4675624370574951,
"max": 5.764090061187744,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 352.2149963378906,
"min": 352.2149963378906,
"max": 1389.145751953125,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.019694466279166885,
"min": 0.015178072399942689,
"max": 0.027342300717016713,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.2560280616291695,
"min": 0.18213686879931226,
"max": 0.2560280616291695,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004094986796267649,
"min": 0.004094986796267649,
"max": 0.41671686099162175,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.05323482835147944,
"min": 0.05323482835147944,
"max": 2.9170180269413524,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002418489424606154,
"min": 0.0002418489424606154,
"max": 0.00029515063018788575,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.003144036251988,
"min": 0.0020660544113152,
"max": 0.0033771241742919997,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.18061630769230771,
"min": 0.18061630769230771,
"max": 0.19838354285714285,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.348012,
"min": 1.3886848,
"max": 2.425708,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.008063569138461539,
"min": 0.008063569138461539,
"max": 0.00983851593142857,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.10482639880000001,
"min": 0.06886961152,
"max": 0.11258822920000001,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 1.3876062631607056,
"min": 1.3876062631607056,
"max": 6.680552959442139,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 18.038881301879883,
"min": 17.73118782043457,
"max": 46.76387023925781,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 973.875,
"max": 999.0,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30969.0,
"min": 15984.0,
"max": 32734.0,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9998968262826243,
"min": -1.0000000521540642,
"max": -0.7968188058584929,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -30.996801614761353,
"min": -31.99640166759491,
"max": -16.000000834465027,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9998968262826243,
"min": -1.0000000521540642,
"max": -0.7968188058584929,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -30.996801614761353,
"min": -31.99640166759491,
"max": -16.000000834465027,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 13.922048792723686,
"min": 13.922048792723686,
"max": 77.73568969242501,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 431.5835125744343,
"min": 431.5835125744343,
"max": 2565.277759850025,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701001801",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701002328"
},
"total": 527.125634517,
"count": 1,
"self": 0.4865877850002107,
"children": {
"run_training.setup": {
"total": 0.05713905999982671,
"count": 1,
"self": 0.05713905999982671
},
"TrainerController.start_learning": {
"total": 526.5819076719999,
"count": 1,
"self": 0.46664083794030375,
"children": {
"TrainerController._reset_env": {
"total": 4.376451669999824,
"count": 1,
"self": 4.376451669999824
},
"TrainerController.advance": {
"total": 521.7375176260598,
"count": 14321,
"self": 0.48261267304269495,
"children": {
"env_step": {
"total": 409.8400084350201,
"count": 14321,
"self": 365.1203195511034,
"children": {
"SubprocessEnvManager._take_step": {
"total": 44.43252864797887,
"count": 14321,
"self": 1.4356362789935702,
"children": {
"TorchPolicy.evaluate": {
"total": 42.9968923689853,
"count": 14287,
"self": 42.9968923689853
}
}
},
"workers": {
"total": 0.2871602359377903,
"count": 14320,
"self": 0.0,
"children": {
"worker_root": {
"total": 525.276098807984,
"count": 14320,
"is_parallel": true,
"self": 196.39681395902744,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002671348999683687,
"count": 1,
"is_parallel": true,
"self": 0.0007470840000678436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019242649996158434,
"count": 8,
"is_parallel": true,
"self": 0.0019242649996158434
}
}
},
"UnityEnvironment.step": {
"total": 0.12041197200005627,
"count": 1,
"is_parallel": true,
"self": 0.0006894470006955089,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005085959996904421,
"count": 1,
"is_parallel": true,
"self": 0.0005085959996904421
},
"communicator.exchange": {
"total": 0.11724589100003868,
"count": 1,
"is_parallel": true,
"self": 0.11724589100003868
},
"steps_from_proto": {
"total": 0.0019680379996316333,
"count": 1,
"is_parallel": true,
"self": 0.00043672499987224,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015313129997593933,
"count": 8,
"is_parallel": true,
"self": 0.0015313129997593933
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 328.87928484895656,
"count": 14319,
"is_parallel": true,
"self": 9.117802288994426,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.496996475000742,
"count": 14319,
"is_parallel": true,
"self": 6.496996475000742
},
"communicator.exchange": {
"total": 285.87748170098484,
"count": 14319,
"is_parallel": true,
"self": 285.87748170098484
},
"steps_from_proto": {
"total": 27.387004383976546,
"count": 14319,
"is_parallel": true,
"self": 5.848195669905181,
"children": {
"_process_rank_one_or_two_observation": {
"total": 21.538808714071365,
"count": 114552,
"is_parallel": true,
"self": 21.538808714071365
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 111.41489651799702,
"count": 14320,
"self": 0.6273751060043651,
"children": {
"process_trajectory": {
"total": 33.43045565899001,
"count": 14320,
"self": 33.43045565899001
},
"_update_policy": {
"total": 77.35706575300264,
"count": 86,
"self": 60.008939288999954,
"children": {
"TorchPPOOptimizer.update": {
"total": 17.34812646400269,
"count": 621,
"self": 17.34812646400269
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4990000636316836e-06,
"count": 1,
"self": 1.4990000636316836e-06
},
"TrainerController._save_models": {
"total": 0.0012960389999534527,
"count": 1,
"self": 2.818100028889603e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0012678579996645567,
"count": 1,
"self": 0.0012678579996645567
}
}
}
}
}
}
}