ppo-Pyramids / run_logs /timers.json
Anish13's picture
First Push
04e8aaf verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5161076188087463,
"min": 0.5161076188087463,
"max": 1.4936848878860474,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15598.8359375,
"min": 15598.8359375,
"max": 45312.42578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989925.0,
"min": 29952.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989925.0,
"min": 29952.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.47120046615600586,
"min": -0.13133865594863892,
"max": 0.47120046615600586,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 128.63772583007812,
"min": -31.521276473999023,
"max": 128.63772583007812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05474507436156273,
"min": 0.001904353848658502,
"max": 0.2407860904932022,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 14.945405006408691,
"min": 0.49894070625305176,
"max": 58.029449462890625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.066600032370641,
"min": 0.06507727222696773,
"max": 0.0736484283472583,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9990004855596148,
"min": 0.48489305820187617,
"max": 1.0310779968616162,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01751438698071676,
"min": 8.55934089587326e-05,
"max": 0.01751438698071676,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2627158047107514,
"min": 0.001112714316463524,
"max": 0.2627158047107514,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.420217526626666e-06,
"min": 7.420217526626666e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011130326289939999,
"min": 0.00011130326289939999,
"max": 0.0031441697519435,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247337333333333,
"min": 0.10247337333333333,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371005999999998,
"min": 1.3886848,
"max": 2.4004058,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000257089996,
"min": 0.000257089996,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00385634994,
"min": 0.00385634994,
"max": 0.10483084434999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009520360268652439,
"min": 0.009520360268652439,
"max": 0.36685672402381897,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14280539751052856,
"min": 0.13471350073814392,
"max": 2.5679969787597656,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 412.85897435897436,
"min": 399.32876712328766,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32203.0,
"min": 15984.0,
"max": 32724.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5101871593640401,
"min": -1.0000000521540642,
"max": 1.5101871593640401,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 117.79459843039513,
"min": -31.999601677060127,
"max": 118.17599832266569,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5101871593640401,
"min": -1.0000000521540642,
"max": 1.5101871593640401,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 117.79459843039513,
"min": -31.999601677060127,
"max": 118.17599832266569,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.041070632850721434,
"min": 0.03989610851438763,
"max": 7.60607307497412,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2035093623562716,
"min": 2.7270330960163847,
"max": 121.69716919958591,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745563601",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1745565636"
},
"total": 2034.5256311470002,
"count": 1,
"self": 0.7312690100004602,
"children": {
"run_training.setup": {
"total": 0.028805401999989044,
"count": 1,
"self": 0.028805401999989044
},
"TrainerController.start_learning": {
"total": 2033.7655567349998,
"count": 1,
"self": 1.2680037000461652,
"children": {
"TrainerController._reset_env": {
"total": 2.4302450720001616,
"count": 1,
"self": 2.4302450720001616
},
"TrainerController.advance": {
"total": 2029.9492480459537,
"count": 63461,
"self": 1.3134736679228354,
"children": {
"env_step": {
"total": 1372.316460849963,
"count": 63461,
"self": 1228.1729681169033,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.44138143306964,
"count": 63461,
"self": 4.471143138138359,
"children": {
"TorchPolicy.evaluate": {
"total": 138.97023829493128,
"count": 62559,
"self": 138.97023829493128
}
}
},
"workers": {
"total": 0.7021112999900652,
"count": 63461,
"self": 0.0,
"children": {
"worker_root": {
"total": 2029.0064228759952,
"count": 63461,
"is_parallel": true,
"self": 906.8849795879958,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020866039999418717,
"count": 1,
"is_parallel": true,
"self": 0.0006737880003129249,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014128159996289469,
"count": 8,
"is_parallel": true,
"self": 0.0014128159996289469
}
}
},
"UnityEnvironment.step": {
"total": 0.04674846900002194,
"count": 1,
"is_parallel": true,
"self": 0.0005282640001951222,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044892999994772254,
"count": 1,
"is_parallel": true,
"self": 0.00044892999994772254
},
"communicator.exchange": {
"total": 0.04420879799999966,
"count": 1,
"is_parallel": true,
"self": 0.04420879799999966
},
"steps_from_proto": {
"total": 0.0015624769998794363,
"count": 1,
"is_parallel": true,
"self": 0.00034545500011518016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012170219997642562,
"count": 8,
"is_parallel": true,
"self": 0.0012170219997642562
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1122.1214432879995,
"count": 63460,
"is_parallel": true,
"self": 30.820193375039935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.101115486982508,
"count": 63460,
"is_parallel": true,
"self": 23.101115486982508
},
"communicator.exchange": {
"total": 976.8406327590078,
"count": 63460,
"is_parallel": true,
"self": 976.8406327590078
},
"steps_from_proto": {
"total": 91.35950166696921,
"count": 63460,
"is_parallel": true,
"self": 18.09784919197523,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.26165247499398,
"count": 507680,
"is_parallel": true,
"self": 73.26165247499398
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 656.3193135280678,
"count": 63461,
"self": 2.4047042520110153,
"children": {
"process_trajectory": {
"total": 122.84559224605596,
"count": 63461,
"self": 122.60969056505587,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23590168100008668,
"count": 2,
"self": 0.23590168100008668
}
}
},
"_update_policy": {
"total": 531.0690170300009,
"count": 438,
"self": 293.4088703340183,
"children": {
"TorchPPOOptimizer.update": {
"total": 237.66014669598258,
"count": 22842,
"self": 237.66014669598258
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1259999155299738e-06,
"count": 1,
"self": 1.1259999155299738e-06
},
"TrainerController._save_models": {
"total": 0.1180587909998394,
"count": 1,
"self": 0.001492047999818169,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11656674300002123,
"count": 1,
"self": 0.11656674300002123
}
}
}
}
}
}
}