ppo-Pyramids / run_logs /timers.json
DengJunTTT's picture
First Push
0f30e94 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.13707095384597778,
"min": 0.11631232500076294,
"max": 1.4741979837417603,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4153.79833984375,
"min": 3454.0107421875,
"max": 44721.26953125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999996.0,
"min": 29952.0,
"max": 2999996.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999996.0,
"min": 29952.0,
"max": 2999996.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7104477882385254,
"min": -0.13911187648773193,
"max": 0.734244167804718,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 209.58209228515625,
"min": -33.525962829589844,
"max": 214.3992919921875,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.004477185662835836,
"min": -0.07881761342287064,
"max": 0.3197421431541443,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3207697868347168,
"min": -21.359573364257812,
"max": 75.77888488769531,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06738436084508508,
"min": 0.06221204199846143,
"max": 0.07443871274180881,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0107654126762762,
"min": 0.5201176695916039,
"max": 1.0771193681353264,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01687649020307326,
"min": 7.539750213428352e-05,
"max": 0.01729370055926709,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2531473530460989,
"min": 0.0009801675277456858,
"max": 0.2564206355697631,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4963728345755552e-06,
"min": 1.4963728345755552e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.244559251863333e-05,
"min": 2.244559251863333e-05,
"max": 0.0037577675474109,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1004987577777778,
"min": 0.1004987577777778,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5074813666666669,
"min": 1.3962282666666668,
"max": 2.7525891000000007,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.9825902000000005e-05,
"min": 5.9825902000000005e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008973885300000001,
"min": 0.0008973885300000001,
"max": 0.12528365109,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.002623029053211212,
"min": 0.0022417742293328047,
"max": 0.4448506534099579,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.03934543579816818,
"min": 0.03138484060764313,
"max": 3.113954544067383,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 253.21008403361344,
"min": 250.49107142857142,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30132.0,
"min": 15984.0,
"max": 33002.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6973299836119016,
"min": -1.0000000521540642,
"max": 1.7133405248861056,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 203.6795980334282,
"min": -32.000001668930054,
"max": 203.6795980334282,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6973299836119016,
"min": -1.0000000521540642,
"max": 1.7133405248861056,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 203.6795980334282,
"min": -32.000001668930054,
"max": 203.6795980334282,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.006939750808578537,
"min": 0.00658107829778081,
"max": 10.403893576003611,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 0.8327700970294245,
"min": 0.7083868069839809,
"max": 166.46229721605778,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745310503",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/home/ippc-zq/miniconda3/envs/hf_drl/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1745312942"
},
"total": 2439.341082864441,
"count": 1,
"self": 0.21803830564022064,
"children": {
"run_training.setup": {
"total": 0.015170782804489136,
"count": 1,
"self": 0.015170782804489136
},
"TrainerController.start_learning": {
"total": 2439.1078737759963,
"count": 1,
"self": 1.702069528400898,
"children": {
"TrainerController._reset_env": {
"total": 1.4264001185074449,
"count": 1,
"self": 1.4264001185074449
},
"TrainerController.advance": {
"total": 2435.92876159586,
"count": 193379,
"self": 1.7260248772799969,
"children": {
"env_step": {
"total": 1469.2598752379417,
"count": 193379,
"self": 1265.202232089825,
"children": {
"SubprocessEnvManager._take_step": {
"total": 202.9445812255144,
"count": 193379,
"self": 5.794512763619423,
"children": {
"TorchPolicy.evaluate": {
"total": 197.150068461895,
"count": 187566,
"self": 197.150068461895
}
}
},
"workers": {
"total": 1.1130619226023555,
"count": 193379,
"self": 0.0,
"children": {
"worker_root": {
"total": 2436.4521194547415,
"count": 193379,
"is_parallel": true,
"self": 1298.8864110549912,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011889347806572914,
"count": 1,
"is_parallel": true,
"self": 0.00036335457116365433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008255802094936371,
"count": 8,
"is_parallel": true,
"self": 0.0008255802094936371
}
}
},
"UnityEnvironment.step": {
"total": 0.018491373397409916,
"count": 1,
"is_parallel": true,
"self": 0.00030610524117946625,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002986127510666847,
"count": 1,
"is_parallel": true,
"self": 0.0002986127510666847
},
"communicator.exchange": {
"total": 0.01699640415608883,
"count": 1,
"is_parallel": true,
"self": 0.01699640415608883
},
"steps_from_proto": {
"total": 0.0008902512490749359,
"count": 1,
"is_parallel": true,
"self": 0.00019835308194160461,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006918981671333313,
"count": 8,
"is_parallel": true,
"self": 0.0006918981671333313
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1137.5657083997503,
"count": 193378,
"is_parallel": true,
"self": 33.40197414159775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.14669872354716,
"count": 193378,
"is_parallel": true,
"self": 24.14669872354716
},
"communicator.exchange": {
"total": 983.1190599706024,
"count": 193378,
"is_parallel": true,
"self": 983.1190599706024
},
"steps_from_proto": {
"total": 96.89797556400299,
"count": 193378,
"is_parallel": true,
"self": 21.063353597186506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.83462196681648,
"count": 1547024,
"is_parallel": true,
"self": 75.83462196681648
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 964.9428614806384,
"count": 193379,
"self": 3.6503547243773937,
"children": {
"process_trajectory": {
"total": 189.14197521377355,
"count": 193379,
"self": 188.79272856842726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3492466453462839,
"count": 6,
"self": 0.3492466453462839
}
}
},
"_update_policy": {
"total": 772.1505315424874,
"count": 1388,
"self": 422.01692544762045,
"children": {
"TorchPPOOptimizer.update": {
"total": 350.133606094867,
"count": 68439,
"self": 350.133606094867
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.494803190231323e-07,
"count": 1,
"self": 5.494803190231323e-07
},
"TrainerController._save_models": {
"total": 0.0506419837474823,
"count": 1,
"self": 0.0010500717908143997,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0495919119566679,
"count": 1,
"self": 0.0495919119566679
}
}
}
}
}
}
}