ppo-Pyramids / run_logs /timers.json
tbumi's picture
Pyramids v1
07ae117 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35942062735557556,
"min": 0.3515954911708832,
"max": 1.3989955186843872,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10679.10546875,
"min": 10514.111328125,
"max": 42439.9296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29961.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29961.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48153018951416016,
"min": -0.08552796393632889,
"max": 0.6536166667938232,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 131.93927001953125,
"min": -20.526710510253906,
"max": 185.62713623046875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018950866535305977,
"min": 0.00459246477112174,
"max": 0.6789603233337402,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.192537307739258,
"min": 1.2353730201721191,
"max": 161.59255981445312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06881369972058261,
"min": 0.06457408542961016,
"max": 0.07419323876697126,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0322054958087392,
"min": 0.5935459101357701,
"max": 1.0629670523048844,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01545152069431626,
"min": 0.001212646079764528,
"max": 0.017490255732251676,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2317728104147439,
"min": 0.014112671656550826,
"max": 0.24486358025152347,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.513257495613329e-06,
"min": 7.513257495613329e-06,
"max": 0.0002948533892155375,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011269886243419994,
"min": 0.00011269886243419994,
"max": 0.0036333712888763003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250438666666667,
"min": 0.10250438666666667,
"max": 0.19828446249999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375658,
"min": 1.4779021000000003,
"max": 2.6111237,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026018822799999997,
"min": 0.00026018822799999997,
"max": 0.009828617803749998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039028234199999993,
"min": 0.0039028234199999993,
"max": 0.12113125763000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012292472645640373,
"min": 0.012292472645640373,
"max": 0.755624532699585,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18438708782196045,
"min": 0.17792156338691711,
"max": 6.04499626159668,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 373.11494252873564,
"min": 281.1276595744681,
"max": 990.5151515151515,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32461.0,
"min": 16376.0,
"max": 33283.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.488910329667316,
"min": -0.9308303504279165,
"max": 1.6975850945616022,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 129.5351986810565,
"min": -30.717401564121246,
"max": 159.5729988887906,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.488910329667316,
"min": -0.9308303504279165,
"max": 1.6975850945616022,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 129.5351986810565,
"min": -30.717401564121246,
"max": 159.5729988887906,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047472099230816085,
"min": 0.04102697923820976,
"max": 16.747456265942137,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.130072633080999,
"min": 3.7756197740964126,
"max": 284.70675652101636,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720957566",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720959918"
},
"total": 2351.6290351439998,
"count": 1,
"self": 0.5373872420000225,
"children": {
"run_training.setup": {
"total": 0.050762946000077136,
"count": 1,
"self": 0.050762946000077136
},
"TrainerController.start_learning": {
"total": 2351.040884956,
"count": 1,
"self": 1.6544132050371445,
"children": {
"TrainerController._reset_env": {
"total": 2.2374863840000216,
"count": 1,
"self": 2.2374863840000216
},
"TrainerController.advance": {
"total": 2347.0612872829633,
"count": 64063,
"self": 1.6963289699838242,
"children": {
"env_step": {
"total": 1699.997304216942,
"count": 64063,
"self": 1552.7933969559795,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.18590215594713,
"count": 64063,
"self": 5.214304289935853,
"children": {
"TorchPolicy.evaluate": {
"total": 140.97159786601128,
"count": 62561,
"self": 140.97159786601128
}
}
},
"workers": {
"total": 1.0180051050153907,
"count": 64063,
"self": 0.0,
"children": {
"worker_root": {
"total": 2345.194280150963,
"count": 64063,
"is_parallel": true,
"self": 928.5509097650106,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020690149999609275,
"count": 1,
"is_parallel": true,
"self": 0.0006410080002297036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001428006999731224,
"count": 8,
"is_parallel": true,
"self": 0.001428006999731224
}
}
},
"UnityEnvironment.step": {
"total": 0.04820630299991535,
"count": 1,
"is_parallel": true,
"self": 0.0006240759998945578,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004546990001017548,
"count": 1,
"is_parallel": true,
"self": 0.0004546990001017548
},
"communicator.exchange": {
"total": 0.04528044699986822,
"count": 1,
"is_parallel": true,
"self": 0.04528044699986822
},
"steps_from_proto": {
"total": 0.0018470810000508209,
"count": 1,
"is_parallel": true,
"self": 0.0003565350002645573,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014905459997862636,
"count": 8,
"is_parallel": true,
"self": 0.0014905459997862636
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1416.6433703859525,
"count": 64062,
"is_parallel": true,
"self": 36.07008187405381,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.862057893971496,
"count": 64062,
"is_parallel": true,
"self": 24.862057893971496
},
"communicator.exchange": {
"total": 1249.9409761609488,
"count": 64062,
"is_parallel": true,
"self": 1249.9409761609488
},
"steps_from_proto": {
"total": 105.77025445697836,
"count": 64062,
"is_parallel": true,
"self": 22.659753215835508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.11050124114286,
"count": 512496,
"is_parallel": true,
"self": 83.11050124114286
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 645.3676540960375,
"count": 64063,
"self": 3.1393107670526206,
"children": {
"process_trajectory": {
"total": 134.5591547709805,
"count": 64063,
"self": 134.36237168297976,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19678308800075683,
"count": 2,
"self": 0.19678308800075683
}
}
},
"_update_policy": {
"total": 507.6691885580044,
"count": 457,
"self": 299.6890263719795,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.98016218602493,
"count": 22776,
"self": 207.98016218602493
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3779999790131114e-06,
"count": 1,
"self": 1.3779999790131114e-06
},
"TrainerController._save_models": {
"total": 0.08769670599940582,
"count": 1,
"self": 0.0014211039997462649,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08627560199965956,
"count": 1,
"self": 0.08627560199965956
}
}
}
}
}
}
}