ppo-PyramidsRND / run_logs /timers.json
Sayyor's picture
1M timesteps, default
5411264 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.44644874334335327,
"min": 0.3676552474498749,
"max": 1.5143574476242065,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13500.6103515625,
"min": 10988.48046875,
"max": 45939.546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.023671751841902733,
"min": -0.12484000623226166,
"max": 0.020460186526179314,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.823250770568848,
"min": -29.96160125732422,
"max": 5.033205986022949,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015064789913594723,
"min": 0.007779942825436592,
"max": 0.2985401451587677,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.7059383392333984,
"min": 1.8905260562896729,
"max": 70.75401306152344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06987025700126206,
"min": 0.06308633619039994,
"max": 0.07291689615127399,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.978183598017669,
"min": 0.48714370067945906,
"max": 1.020836546117836,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004308005329838892,
"min": 0.0003557545800103571,
"max": 0.006350466536295944,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.06031207461774449,
"min": 0.004269054960124285,
"max": 0.08134268601649333,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.26903329130714e-06,
"min": 7.26903329130714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010176646607829997,
"min": 0.00010176646607829997,
"max": 0.0032251465249512,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242297857142858,
"min": 0.10242297857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339217000000002,
"min": 1.3886848,
"max": 2.3591381,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002520555592857142,
"min": 0.0002520555592857142,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003528777829999999,
"min": 0.003528777829999999,
"max": 0.10751737512000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013996501453220844,
"min": 0.013996501453220844,
"max": 0.33533814549446106,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19595101475715637,
"min": 0.19595101475715637,
"max": 2.34736704826355,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 858.78125,
"min": 845.6,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27481.0,
"min": 15984.0,
"max": 33275.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.2573151940649206,
"min": -1.0000000521540642,
"max": -0.10974122255164034,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -8.49140140414238,
"min": -31.998001664876938,
"max": -3.7312015667557716,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.2573151940649206,
"min": -1.0000000521540642,
"max": -0.10974122255164034,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -8.49140140414238,
"min": -31.998001664876938,
"max": -3.7312015667557716,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.12426557639529082,
"min": 0.12426557639529082,
"max": 6.772108065895736,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.100764021044597,
"min": 4.100764021044597,
"max": 108.35372905433178,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1721085353",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1721087429"
},
"total": 2076.213223665,
"count": 1,
"self": 0.7710439369998312,
"children": {
"run_training.setup": {
"total": 0.05299816499996268,
"count": 1,
"self": 0.05299816499996268
},
"TrainerController.start_learning": {
"total": 2075.389181563,
"count": 1,
"self": 1.3396246060306112,
"children": {
"TrainerController._reset_env": {
"total": 3.0947472789999892,
"count": 1,
"self": 3.0947472789999892
},
"TrainerController.advance": {
"total": 2070.816000836969,
"count": 63084,
"self": 1.4424802599965005,
"children": {
"env_step": {
"total": 1431.9340216029757,
"count": 63084,
"self": 1295.174599659891,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.94093703706358,
"count": 63084,
"self": 4.702591132075781,
"children": {
"TorchPolicy.evaluate": {
"total": 131.2383459049878,
"count": 62560,
"self": 131.2383459049878
}
}
},
"workers": {
"total": 0.8184849060210126,
"count": 63084,
"self": 0.0,
"children": {
"worker_root": {
"total": 2070.184102519953,
"count": 63084,
"is_parallel": true,
"self": 895.4354172239314,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0074982170000339465,
"count": 1,
"is_parallel": true,
"self": 0.006018389000246316,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014798279997876307,
"count": 8,
"is_parallel": true,
"self": 0.0014798279997876307
}
}
},
"UnityEnvironment.step": {
"total": 0.046383439000010185,
"count": 1,
"is_parallel": true,
"self": 0.0006040429999529806,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000494645000003402,
"count": 1,
"is_parallel": true,
"self": 0.000494645000003402
},
"communicator.exchange": {
"total": 0.04369731300005242,
"count": 1,
"is_parallel": true,
"self": 0.04369731300005242
},
"steps_from_proto": {
"total": 0.0015874380000013844,
"count": 1,
"is_parallel": true,
"self": 0.00033080699995480245,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001256631000046582,
"count": 8,
"is_parallel": true,
"self": 0.001256631000046582
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1174.7486852960214,
"count": 63083,
"is_parallel": true,
"self": 33.69908443803752,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.340683978965558,
"count": 63083,
"is_parallel": true,
"self": 24.340683978965558
},
"communicator.exchange": {
"total": 1016.8807618280055,
"count": 63083,
"is_parallel": true,
"self": 1016.8807618280055
},
"steps_from_proto": {
"total": 99.82815505101269,
"count": 63083,
"is_parallel": true,
"self": 20.340872805867775,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.48728224514491,
"count": 504664,
"is_parallel": true,
"self": 79.48728224514491
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.439498973997,
"count": 63084,
"self": 2.432558802996823,
"children": {
"process_trajectory": {
"total": 127.38925482500076,
"count": 63084,
"self": 127.08381082400047,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30544400100029634,
"count": 2,
"self": 0.30544400100029634
}
}
},
"_update_policy": {
"total": 507.61768534599946,
"count": 436,
"self": 302.58717557801504,
"children": {
"TorchPPOOptimizer.update": {
"total": 205.03050976798443,
"count": 22776,
"self": 205.03050976798443
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2940004125994164e-06,
"count": 1,
"self": 1.2940004125994164e-06
},
"TrainerController._save_models": {
"total": 0.13880754699994213,
"count": 1,
"self": 0.002354190000005474,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13645335699993666,
"count": 1,
"self": 0.13645335699993666
}
}
}
}
}
}
}