sunxysun's picture
First Push
20b14ba verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3500016927719116,
"min": 0.3344663083553314,
"max": 1.4809441566467285,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10416.05078125,
"min": 10050.0439453125,
"max": 44925.921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5601391196250916,
"min": -0.11361859738826752,
"max": 0.5731185674667358,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 154.59840393066406,
"min": -27.382081985473633,
"max": 158.75384521484375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022462690249085426,
"min": -0.006063505541533232,
"max": 0.28878962993621826,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.199702262878418,
"min": -1.5704479217529297,
"max": 68.44314575195312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0671454301433793,
"min": 0.06355465978133903,
"max": 0.07365634562674177,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0071814521506894,
"min": 0.4850646444007918,
"max": 1.0696834848438133,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014299210030326828,
"min": 0.0007097883707957911,
"max": 0.015899259488066367,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2144881504549024,
"min": 0.008056258692009243,
"max": 0.22258963283292912,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.476457507880002e-06,
"min": 7.476457507880002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011214686261820002,
"min": 0.00011214686261820002,
"max": 0.0032253469248843996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249212000000002,
"min": 0.10249212000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373818000000004,
"min": 1.3691136000000002,
"max": 2.5275876999999998,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002589627880000001,
"min": 0.0002589627880000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038844418200000013,
"min": 0.0038844418200000013,
"max": 0.10752404843999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01431235857307911,
"min": 0.01431235857307911,
"max": 0.40245574712753296,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2146853804588318,
"min": 0.2064724862575531,
"max": 2.817190170288086,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 354.7857142857143,
"min": 325.8152173913044,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29802.0,
"min": 15984.0,
"max": 32490.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6440352681805106,
"min": -1.0000000521540642,
"max": 1.6440352681805106,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 139.7429977953434,
"min": -32.000001668930054,
"max": 148.40479864180088,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6440352681805106,
"min": -1.0000000521540642,
"max": 1.6440352681805106,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 139.7429977953434,
"min": -32.000001668930054,
"max": 148.40479864180088,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0526487674242269,
"min": 0.04941525829509463,
"max": 7.829933635890484,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.475145231059287,
"min": 4.475145231059287,
"max": 125.27893817424774,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754530489",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1754532728"
},
"total": 2238.885544115,
"count": 1,
"self": 0.8521220940001513,
"children": {
"run_training.setup": {
"total": 0.02871802699996806,
"count": 1,
"self": 0.02871802699996806
},
"TrainerController.start_learning": {
"total": 2238.004703994,
"count": 1,
"self": 1.3187972179352982,
"children": {
"TrainerController._reset_env": {
"total": 2.2851511809999465,
"count": 1,
"self": 2.2851511809999465
},
"TrainerController.advance": {
"total": 2234.284912933065,
"count": 63885,
"self": 1.3604620700998566,
"children": {
"env_step": {
"total": 1572.9085046990035,
"count": 63885,
"self": 1425.5180861770589,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.60185642795773,
"count": 63885,
"self": 4.615008509970721,
"children": {
"TorchPolicy.evaluate": {
"total": 141.986847917987,
"count": 62562,
"self": 141.986847917987
}
}
},
"workers": {
"total": 0.7885620939869113,
"count": 63885,
"self": 0.0,
"children": {
"worker_root": {
"total": 2233.2624382810172,
"count": 63885,
"is_parallel": true,
"self": 919.4604478279848,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00185215600004085,
"count": 1,
"is_parallel": true,
"self": 0.0006184229998780211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012337330001628288,
"count": 8,
"is_parallel": true,
"self": 0.0012337330001628288
}
}
},
"UnityEnvironment.step": {
"total": 0.05312682199996743,
"count": 1,
"is_parallel": true,
"self": 0.0005176849997496902,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044532600009006273,
"count": 1,
"is_parallel": true,
"self": 0.00044532600009006273
},
"communicator.exchange": {
"total": 0.050497333000066646,
"count": 1,
"is_parallel": true,
"self": 0.050497333000066646
},
"steps_from_proto": {
"total": 0.0016664780000610335,
"count": 1,
"is_parallel": true,
"self": 0.0003552230000423151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013112550000187184,
"count": 8,
"is_parallel": true,
"self": 0.0013112550000187184
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.8019904530324,
"count": 63884,
"is_parallel": true,
"self": 31.295020227038094,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.59586669302712,
"count": 63884,
"is_parallel": true,
"self": 22.59586669302712
},
"communicator.exchange": {
"total": 1165.1614726629755,
"count": 63884,
"is_parallel": true,
"self": 1165.1614726629755
},
"steps_from_proto": {
"total": 94.74963086999173,
"count": 63884,
"is_parallel": true,
"self": 19.005382520060493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.74424834993124,
"count": 511072,
"is_parallel": true,
"self": 75.74424834993124
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 660.0159461639614,
"count": 63885,
"self": 2.528007536011046,
"children": {
"process_trajectory": {
"total": 124.04155651395104,
"count": 63885,
"self": 123.8045186359509,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23703787800013743,
"count": 2,
"self": 0.23703787800013743
}
}
},
"_update_policy": {
"total": 533.4463821139993,
"count": 442,
"self": 298.87416348097213,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.5722186330272,
"count": 22821,
"self": 234.5722186330272
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1219999578315765e-06,
"count": 1,
"self": 1.1219999578315765e-06
},
"TrainerController._save_models": {
"total": 0.11584154000001945,
"count": 1,
"self": 0.0018048380002255726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11403670199979388,
"count": 1,
"self": 0.11403670199979388
}
}
}
}
}
}
}