ppo-Pyramids / run_logs /timers.json
TheBestMoldyCheese's picture
Upload trained Pyramids PPO+RND agent - Unit 5 HF DRL Course
22f54a6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.13689710199832916,
"min": 0.1177377700805664,
"max": 1.4125113487243652,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4100.341796875,
"min": 3518.946533203125,
"max": 42849.9453125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999985.0,
"min": 29987.0,
"max": 2999985.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999985.0,
"min": 29987.0,
"max": 2999985.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.871915340423584,
"min": -0.08985260128974915,
"max": 0.9122087955474854,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 271.1656799316406,
"min": -21.564624786376953,
"max": 280.048095703125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018306860700249672,
"min": -0.01786121539771557,
"max": 0.7513640522956848,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.69343376159668,
"min": -4.87611198425293,
"max": 178.82464599609375,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06929773124854774,
"min": 0.06350942469102197,
"max": 0.07448223320711993,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9701682374796684,
"min": 0.5835752889020469,
"max": 1.0866661710703434,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014570308731455052,
"min": 0.0009128543780996247,
"max": 0.026121248675566467,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20398432224037072,
"min": 0.011867106915295121,
"max": 0.25310028748936014,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5028137848095236e-06,
"min": 1.5028137848095236e-06,
"max": 0.0002984077130307625,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1039392987333332e-05,
"min": 2.1039392987333332e-05,
"max": 0.004010802063066,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050090476190476,
"min": 0.10050090476190476,
"max": 0.1994692375,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4070126666666667,
"min": 1.4070126666666667,
"max": 2.827554933333334,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.0040385714285714e-05,
"min": 6.0040385714285714e-05,
"max": 0.00994697682625,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008405654,
"min": 0.0008405654,
"max": 0.1336997066,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008786150254309177,
"min": 0.008670860901474953,
"max": 0.7231569290161133,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12300609797239304,
"min": 0.12139205634593964,
"max": 5.785255432128906,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 196.88888888888889,
"min": 192.7516339869281,
"max": 997.6875,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30124.0,
"min": 15891.0,
"max": 32976.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.803111099729351,
"min": -0.935956300701946,
"max": 1.805784302696683,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 275.8759982585907,
"min": -29.950601622462273,
"max": 276.2849983125925,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.803111099729351,
"min": -0.935956300701946,
"max": 1.805784302696683,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 275.8759982585907,
"min": -29.950601622462273,
"max": 276.2849983125925,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01800723419466902,
"min": 0.01800723419466902,
"max": 17.034468912519515,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.75510683178436,
"min": 2.567502475998481,
"max": 272.55150260031223,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1773754754",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1773762131"
},
"total": 7377.181409056,
"count": 1,
"self": 0.9136139219990582,
"children": {
"run_training.setup": {
"total": 0.03346269100029531,
"count": 1,
"self": 0.03346269100029531
},
"TrainerController.start_learning": {
"total": 7376.234332443,
"count": 1,
"self": 4.411734970826728,
"children": {
"TrainerController._reset_env": {
"total": 3.3494539970001824,
"count": 1,
"self": 3.3494539970001824
},
"TrainerController.advance": {
"total": 7368.355997679172,
"count": 195748,
"self": 4.338809334260986,
"children": {
"env_step": {
"total": 5374.1125881290245,
"count": 195748,
"self": 4916.786620469571,
"children": {
"SubprocessEnvManager._take_step": {
"total": 454.7656540121793,
"count": 195748,
"self": 14.076715744915418,
"children": {
"TorchPolicy.evaluate": {
"total": 440.6889382672639,
"count": 187555,
"self": 440.6889382672639
}
}
},
"workers": {
"total": 2.560313647273688,
"count": 195748,
"self": 0.0,
"children": {
"worker_root": {
"total": 7356.593362077077,
"count": 195748,
"is_parallel": true,
"self": 2795.657948138044,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004086649999862857,
"count": 1,
"is_parallel": true,
"self": 0.002756465000402386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013301849994604709,
"count": 8,
"is_parallel": true,
"self": 0.0013301849994604709
}
}
},
"UnityEnvironment.step": {
"total": 0.050104006999845296,
"count": 1,
"is_parallel": true,
"self": 0.0005435799998849689,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004720000001725566,
"count": 1,
"is_parallel": true,
"self": 0.0004720000001725566
},
"communicator.exchange": {
"total": 0.04737354200005939,
"count": 1,
"is_parallel": true,
"self": 0.04737354200005939
},
"steps_from_proto": {
"total": 0.0017148849997283833,
"count": 1,
"is_parallel": true,
"self": 0.0003799290007009404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013349559990274429,
"count": 8,
"is_parallel": true,
"self": 0.0013349559990274429
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4560.9354139390325,
"count": 195747,
"is_parallel": true,
"self": 103.36898465292325,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 69.48728274718678,
"count": 195747,
"is_parallel": true,
"self": 69.48728274718678
},
"communicator.exchange": {
"total": 4064.4594844567846,
"count": 195747,
"is_parallel": true,
"self": 4064.4594844567846
},
"steps_from_proto": {
"total": 323.61966208213744,
"count": 195747,
"is_parallel": true,
"self": 68.88087494683668,
"children": {
"_process_rank_one_or_two_observation": {
"total": 254.73878713530075,
"count": 1565976,
"is_parallel": true,
"self": 254.73878713530075
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1989.9046002158861,
"count": 195748,
"self": 8.633077081159172,
"children": {
"process_trajectory": {
"total": 392.7209531667422,
"count": 195748,
"self": 392.09866482074176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6222883460004596,
"count": 6,
"self": 0.6222883460004596
}
}
},
"_update_policy": {
"total": 1588.5505699679848,
"count": 1406,
"self": 868.9630616727623,
"children": {
"TorchPPOOptimizer.update": {
"total": 719.5875082952225,
"count": 68397,
"self": 719.5875082952225
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1860010999953374e-06,
"count": 1,
"self": 1.1860010999953374e-06
},
"TrainerController._save_models": {
"total": 0.11714460999974108,
"count": 1,
"self": 0.0015571159983664984,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11558749400137458,
"count": 1,
"self": 0.11558749400137458
}
}
}
}
}
}
}