ppo-PyramidsRND / run_logs /timers.json
hubertau's picture
First Push
1258266 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31149953603744507,
"min": 0.31149953603744507,
"max": 1.5054049491882324,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9240.322265625,
"min": 9240.322265625,
"max": 45667.96484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989999.0,
"min": 29952.0,
"max": 989999.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989999.0,
"min": 29952.0,
"max": 989999.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5664092302322388,
"min": -0.08925566077232361,
"max": 0.621580958366394,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 156.32894897460938,
"min": -21.421358108520508,
"max": 177.73406982421875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.01701180450618267,
"min": -0.01701180450618267,
"max": 0.2739257514476776,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -4.695258140563965,
"min": -4.695258140563965,
"max": 64.92040252685547,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06974016503185856,
"min": 0.06475338852185196,
"max": 0.07266470423909333,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0461024754778785,
"min": 0.49969235278502766,
"max": 1.0488053988297263,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01631732677651374,
"min": 0.00043589524960384817,
"max": 0.016319147356192668,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24475990164770606,
"min": 0.0034871619968307854,
"max": 0.24475990164770606,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.448017517360004e-06,
"min": 7.448017517360004e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011172026276040006,
"min": 0.00011172026276040006,
"max": 0.0033798326733891996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248264000000001,
"min": 0.10248264000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372396000000002,
"min": 1.3691136000000002,
"max": 2.5266108,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025801573600000015,
"min": 0.00025801573600000015,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003870236040000002,
"min": 0.003870236040000002,
"max": 0.11268841891999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006922535132616758,
"min": 0.006922535132616758,
"max": 0.3583793044090271,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10383802652359009,
"min": 0.09947595000267029,
"max": 2.508655071258545,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 327.96590909090907,
"min": 311.78,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28861.0,
"min": 15984.0,
"max": 32607.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6492977044121786,
"min": -1.0000000521540642,
"max": 1.686135400707523,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.1381979882717,
"min": -32.000001668930054,
"max": 162.8201985359192,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6492977044121786,
"min": -1.0000000521540642,
"max": 1.686135400707523,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.1381979882717,
"min": -32.000001668930054,
"max": 162.8201985359192,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02363273614826705,
"min": 0.023066475686012684,
"max": 7.290255395695567,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.0796807810475,
"min": 2.0796807810475,
"max": 116.64408633112907,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745405681",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1745409184"
},
"total": 3502.399443611,
"count": 1,
"self": 0.8616084939999382,
"children": {
"run_training.setup": {
"total": 0.0251284130000613,
"count": 1,
"self": 0.0251284130000613
},
"TrainerController.start_learning": {
"total": 3501.5127067040003,
"count": 1,
"self": 2.448035442998844,
"children": {
"TrainerController._reset_env": {
"total": 3.475324412999953,
"count": 1,
"self": 3.475324412999953
},
"TrainerController.advance": {
"total": 3495.496224503002,
"count": 64152,
"self": 2.527061338976182,
"children": {
"env_step": {
"total": 2360.910764074025,
"count": 64152,
"self": 2188.4673556669877,
"children": {
"SubprocessEnvManager._take_step": {
"total": 171.0563418580241,
"count": 64152,
"self": 7.500303754998072,
"children": {
"TorchPolicy.evaluate": {
"total": 163.55603810302603,
"count": 62557,
"self": 163.55603810302603
}
}
},
"workers": {
"total": 1.3870665490129568,
"count": 64152,
"self": 0.0,
"children": {
"worker_root": {
"total": 3493.6942606029643,
"count": 64152,
"is_parallel": true,
"self": 1483.4353514479599,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0061989729999822885,
"count": 1,
"is_parallel": true,
"self": 0.00469746299995677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015015100000255188,
"count": 8,
"is_parallel": true,
"self": 0.0015015100000255188
}
}
},
"UnityEnvironment.step": {
"total": 0.06917131499994866,
"count": 1,
"is_parallel": true,
"self": 0.0006635390000155894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000505793000002086,
"count": 1,
"is_parallel": true,
"self": 0.000505793000002086
},
"communicator.exchange": {
"total": 0.06587512799990236,
"count": 1,
"is_parallel": true,
"self": 0.06587512799990236
},
"steps_from_proto": {
"total": 0.002126855000028627,
"count": 1,
"is_parallel": true,
"self": 0.0004565320001574946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016703229998711322,
"count": 8,
"is_parallel": true,
"self": 0.0016703229998711322
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2010.2589091550044,
"count": 64151,
"is_parallel": true,
"self": 45.659849706987416,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.555541570010405,
"count": 64151,
"is_parallel": true,
"self": 31.555541570010405
},
"communicator.exchange": {
"total": 1804.8428072129761,
"count": 64151,
"is_parallel": true,
"self": 1804.8428072129761
},
"steps_from_proto": {
"total": 128.20071066503056,
"count": 64151,
"is_parallel": true,
"self": 28.07377664898354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 100.12693401604702,
"count": 513208,
"is_parallel": true,
"self": 100.12693401604702
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1132.0583990900009,
"count": 64152,
"self": 4.662982029972454,
"children": {
"process_trajectory": {
"total": 168.1223440760216,
"count": 64152,
"self": 167.68798442102138,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43435965500020757,
"count": 2,
"self": 0.43435965500020757
}
}
},
"_update_policy": {
"total": 959.2730729840068,
"count": 445,
"self": 371.6692763780454,
"children": {
"TorchPPOOptimizer.update": {
"total": 587.6037966059614,
"count": 22830,
"self": 587.6037966059614
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1419997463235632e-06,
"count": 1,
"self": 1.1419997463235632e-06
},
"TrainerController._save_models": {
"total": 0.0931212030000097,
"count": 1,
"self": 0.0023389979996863985,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0907822050003233,
"count": 1,
"self": 0.0907822050003233
}
}
}
}
}
}
}