jennifer-jy's picture
First Push
6eddd73 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6665168404579163,
"min": 0.6665168404579163,
"max": 1.5946495532989502,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20091.484375,
"min": 20091.484375,
"max": 48375.2890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989954.0,
"min": 29952.0,
"max": 989954.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.18970997631549835,
"min": -0.10716152936220169,
"max": 0.2471831738948822,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 48.94517517089844,
"min": -25.825927734375,
"max": 63.278892517089844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012041537091135979,
"min": 0.006680199410766363,
"max": 0.45380517840385437,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.1067166328430176,
"min": 1.6566894054412842,
"max": 107.55182647705078,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06943605918994374,
"min": 0.06585919048631608,
"max": 0.07194191388609512,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.48605241432960616,
"min": 0.2788896204778683,
"max": 0.562897801988069,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01099524283372808,
"min": 0.0001414999053914558,
"max": 0.01099524283372808,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.07696669983609657,
"min": 0.0009904993377401906,
"max": 0.08602394675108371,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.058011756757141e-06,
"min": 5.058011756757141e-06,
"max": 0.00019696640151680003,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.540608229729999e-05,
"min": 3.540608229729999e-05,
"max": 0.0012125400937299997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.09999999999999999,
"min": 0.09999999999999998,
"max": 0.1,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.7,
"min": 0.3999999999999999,
"max": 0.8,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001361949614285714,
"min": 0.0001361949614285714,
"max": 0.00492431168,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009533647299999998,
"min": 0.0009533647299999998,
"max": 0.030322873000000004,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01803797110915184,
"min": 0.01787019893527031,
"max": 0.534096896648407,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12626579403877258,
"min": 0.12509138882160187,
"max": 2.136387586593628,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 613.4,
"min": 601.3877551020408,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30670.0,
"min": 15984.0,
"max": 34234.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9464479768276215,
"min": -1.0000000521540642,
"max": 1.0311673174099045,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 47.32239884138107,
"min": -32.000001668930054,
"max": 50.52719855308533,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9464479768276215,
"min": -1.0000000521540642,
"max": 1.0311673174099045,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 47.32239884138107,
"min": -32.000001668930054,
"max": 50.52719855308533,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.11382588352775201,
"min": 0.11363009977325493,
"max": 12.171295398846269,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.691294176387601,
"min": 4.879276555497199,
"max": 194.7407263815403,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749443559",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training_2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749445647"
},
"total": 2087.5942535270005,
"count": 1,
"self": 0.47591953500068485,
"children": {
"run_training.setup": {
"total": 0.020292706999498478,
"count": 1,
"self": 0.020292706999498478
},
"TrainerController.start_learning": {
"total": 2087.0980412850004,
"count": 1,
"self": 1.2929630827520668,
"children": {
"TrainerController._reset_env": {
"total": 3.044654459999947,
"count": 1,
"self": 3.044654459999947
},
"TrainerController.advance": {
"total": 2082.6721911832474,
"count": 63274,
"self": 1.338734979455694,
"children": {
"env_step": {
"total": 1396.3939620187084,
"count": 63274,
"self": 1246.1081086676086,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.5401656600543,
"count": 63274,
"self": 4.545523447043706,
"children": {
"TorchPolicy.evaluate": {
"total": 144.9946422130106,
"count": 62561,
"self": 144.9946422130106
}
}
},
"workers": {
"total": 0.7456876910455321,
"count": 63274,
"self": 0.0,
"children": {
"worker_root": {
"total": 2082.456540916074,
"count": 63274,
"is_parallel": true,
"self": 943.1106912942269,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002521911999792792,
"count": 1,
"is_parallel": true,
"self": 0.0007934859995657462,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017284260002270457,
"count": 8,
"is_parallel": true,
"self": 0.0017284260002270457
}
}
},
"UnityEnvironment.step": {
"total": 0.049979223999798705,
"count": 1,
"is_parallel": true,
"self": 0.0005230350006968365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004515149994404055,
"count": 1,
"is_parallel": true,
"self": 0.0004515149994404055
},
"communicator.exchange": {
"total": 0.047415748000275926,
"count": 1,
"is_parallel": true,
"self": 0.047415748000275926
},
"steps_from_proto": {
"total": 0.0015889259993855376,
"count": 1,
"is_parallel": true,
"self": 0.00033522400008223485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012537019993033027,
"count": 8,
"is_parallel": true,
"self": 0.0012537019993033027
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1139.3458496218473,
"count": 63273,
"is_parallel": true,
"self": 31.04758551323812,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.727993597838577,
"count": 63273,
"is_parallel": true,
"self": 22.727993597838577
},
"communicator.exchange": {
"total": 993.7852744339243,
"count": 63273,
"is_parallel": true,
"self": 993.7852744339243
},
"steps_from_proto": {
"total": 91.78499607684626,
"count": 63273,
"is_parallel": true,
"self": 17.978266948128294,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.80672912871796,
"count": 506184,
"is_parallel": true,
"self": 73.80672912871796
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 684.9394941850833,
"count": 63274,
"self": 2.440142136817485,
"children": {
"process_trajectory": {
"total": 125.25541883626374,
"count": 63274,
"self": 125.05289813026411,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2025207059996319,
"count": 2,
"self": 0.2025207059996319
}
}
},
"_update_policy": {
"total": 557.243933212002,
"count": 231,
"self": 307.9904933010275,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.25343991097452,
"count": 23148,
"self": 249.25343991097452
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.320002962136641e-07,
"count": 1,
"self": 8.320002962136641e-07
},
"TrainerController._save_models": {
"total": 0.08823172700067516,
"count": 1,
"self": 0.0011536390011315234,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08707808799954364,
"count": 1,
"self": 0.08707808799954364
}
}
}
}
}
}
}