ppo-Pyramids / run_logs /timers.json
luyi0619's picture
First Push
da62235 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6217910647392273,
"min": 0.6217910647392273,
"max": 1.3986022472381592,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18693.52734375,
"min": 18693.52734375,
"max": 42427.99609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989965.0,
"min": 29952.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989965.0,
"min": 29952.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.33308297395706177,
"min": -0.11171487718820572,
"max": 0.3903945982456207,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 87.26773834228516,
"min": -26.81157112121582,
"max": 102.28338623046875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.026259221136569977,
"min": -0.007511988282203674,
"max": 0.4488699436187744,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.879915714263916,
"min": -1.9080450534820557,
"max": 106.3821792602539,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07076957303104878,
"min": 0.06499893968058233,
"max": 0.07492170639918852,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9907740224346828,
"min": 0.5030805370382343,
"max": 1.0225232154286157,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010361524677093604,
"min": 0.00019326318392288037,
"max": 0.012174115216667492,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14506134547931046,
"min": 0.0019326318392288036,
"max": 0.1704376130333449,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.50785464027143e-06,
"min": 7.50785464027143e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010510996496380003,
"min": 0.00010510996496380003,
"max": 0.0030198908933698,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250258571428572,
"min": 0.10250258571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350362,
"min": 1.3691136000000002,
"max": 2.3584612000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002600083128571429,
"min": 0.0002600083128571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036401163800000007,
"min": 0.0036401163800000007,
"max": 0.10069235697999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009019111283123493,
"min": 0.008629250340163708,
"max": 0.5635762214660645,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12626755237579346,
"min": 0.12080950289964676,
"max": 3.945033550262451,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 511.8,
"min": 485.2295081967213,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30708.0,
"min": 15984.0,
"max": 32599.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1805796308790224,
"min": -1.0000000521540642,
"max": 1.2851966941942934,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 69.65419822186232,
"min": -32.000001668930054,
"max": 78.3969983458519,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1805796308790224,
"min": -1.0000000521540642,
"max": 1.2851966941942934,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 69.65419822186232,
"min": -32.000001668930054,
"max": 78.3969983458519,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047923821161729996,
"min": 0.04551308719413195,
"max": 10.685377827845514,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8275054485420696,
"min": 2.6397590572596528,
"max": 170.96604524552822,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747957016",
"python_version": "3.10.12 (main, Feb 4 2025, 14:57:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747959078"
},
"total": 2062.388096179,
"count": 1,
"self": 0.648155021000548,
"children": {
"run_training.setup": {
"total": 0.018212105999737105,
"count": 1,
"self": 0.018212105999737105
},
"TrainerController.start_learning": {
"total": 2061.721729052,
"count": 1,
"self": 1.2392506339456304,
"children": {
"TrainerController._reset_env": {
"total": 2.854934856,
"count": 1,
"self": 2.854934856
},
"TrainerController.advance": {
"total": 2057.5476565050544,
"count": 63446,
"self": 1.2557198249755857,
"children": {
"env_step": {
"total": 1390.047780130074,
"count": 63446,
"self": 1242.5126550110526,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.79199862501355,
"count": 63446,
"self": 4.454809547062723,
"children": {
"TorchPolicy.evaluate": {
"total": 142.33718907795082,
"count": 62562,
"self": 142.33718907795082
}
}
},
"workers": {
"total": 0.7431264940078108,
"count": 63446,
"self": 0.0,
"children": {
"worker_root": {
"total": 2057.2026753729388,
"count": 63446,
"is_parallel": true,
"self": 918.3083111049459,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020880949996353593,
"count": 1,
"is_parallel": true,
"self": 0.0006878979993416579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014001970002937014,
"count": 8,
"is_parallel": true,
"self": 0.0014001970002937014
}
}
},
"UnityEnvironment.step": {
"total": 0.06930246199999601,
"count": 1,
"is_parallel": true,
"self": 0.0005931760001658404,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004495159996622533,
"count": 1,
"is_parallel": true,
"self": 0.0004495159996622533
},
"communicator.exchange": {
"total": 0.06662056799996208,
"count": 1,
"is_parallel": true,
"self": 0.06662056799996208
},
"steps_from_proto": {
"total": 0.0016392020002058416,
"count": 1,
"is_parallel": true,
"self": 0.00034346900019954774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012957330000062939,
"count": 8,
"is_parallel": true,
"self": 0.0012957330000062939
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1138.8943642679928,
"count": 63445,
"is_parallel": true,
"self": 30.810982908969436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.615299728051014,
"count": 63445,
"is_parallel": true,
"self": 22.615299728051014
},
"communicator.exchange": {
"total": 991.3801436059744,
"count": 63445,
"is_parallel": true,
"self": 991.3801436059744
},
"steps_from_proto": {
"total": 94.08793802499804,
"count": 63445,
"is_parallel": true,
"self": 18.43567665198043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.65226137301761,
"count": 507560,
"is_parallel": true,
"self": 75.65226137301761
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.2441565500048,
"count": 63446,
"self": 2.322384149055779,
"children": {
"process_trajectory": {
"total": 123.12036692195215,
"count": 63446,
"self": 122.80594988995244,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31441703199971016,
"count": 2,
"self": 0.31441703199971016
}
}
},
"_update_policy": {
"total": 540.8014054789969,
"count": 431,
"self": 299.6271436849611,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.17426179403583,
"count": 22848,
"self": 241.17426179403583
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.799997885944322e-07,
"count": 1,
"self": 8.799997885944322e-07
},
"TrainerController._save_models": {
"total": 0.07988617699993483,
"count": 1,
"self": 0.0011133799998788163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07877279700005602,
"count": 1,
"self": 0.07877279700005602
}
}
}
}
}
}
}