waanney's picture
First Pusha
ef37a96 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41224193572998047,
"min": 0.41224193572998047,
"max": 1.4232133626937866,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12347.470703125,
"min": 12347.470703125,
"max": 43174.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989886.0,
"min": 29952.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989886.0,
"min": 29952.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.47863292694091797,
"min": -0.12098627537488937,
"max": 0.6040423512458801,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 128.75225830078125,
"min": -29.157691955566406,
"max": 167.31973266601562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02566806972026825,
"min": 0.0021338188089430332,
"max": 0.3815053105354309,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.90471076965332,
"min": 0.5803987383842468,
"max": 90.41675567626953,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06874033028309766,
"min": 0.06450550111152956,
"max": 0.0726938713029717,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9623646239633672,
"min": 0.477583357200713,
"max": 1.0716565478348867,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0143989032090065,
"min": 0.000642932614123657,
"max": 0.015834180997834273,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20158464492609102,
"min": 0.006924524550552579,
"max": 0.2303910442860797,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.289497570199999e-06,
"min": 7.289497570199999e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010205296598279998,
"min": 0.00010205296598279998,
"max": 0.0032544887151705,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024298,
"min": 0.1024298,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340172,
"min": 1.3691136000000002,
"max": 2.443934,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002527370200000001,
"min": 0.0002527370200000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003538318280000001,
"min": 0.003538318280000001,
"max": 0.10850446705,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011549736373126507,
"min": 0.011549736373126507,
"max": 0.3955830931663513,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16169631481170654,
"min": 0.16169631481170654,
"max": 2.7690815925598145,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 371.3125,
"min": 337.36046511627904,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29705.0,
"min": 15984.0,
"max": 32327.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5286624782718718,
"min": -1.0000000521540642,
"max": 1.6393813818346623,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.29299826174974,
"min": -32.000001668930054,
"max": 140.98679883778095,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5286624782718718,
"min": -1.0000000521540642,
"max": 1.6393813818346623,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.29299826174974,
"min": -32.000001668930054,
"max": 140.98679883778095,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.044625752406500394,
"min": 0.04261604530621446,
"max": 7.929269975051284,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5700601925200317,
"min": 3.5700601925200317,
"max": 126.86831960082054,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1767842868",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./ml-agents/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1767845384"
},
"total": 2515.879346885,
"count": 1,
"self": 0.5265149400006521,
"children": {
"run_training.setup": {
"total": 0.026363607999883243,
"count": 1,
"self": 0.026363607999883243
},
"TrainerController.start_learning": {
"total": 2515.3264683369994,
"count": 1,
"self": 1.6670974100243257,
"children": {
"TrainerController._reset_env": {
"total": 1.6671148139998877,
"count": 1,
"self": 1.6671148139998877
},
"TrainerController.advance": {
"total": 2511.9032832849753,
"count": 63682,
"self": 1.8347520801785322,
"children": {
"env_step": {
"total": 1771.5350845779112,
"count": 63682,
"self": 1599.1162185718244,
"children": {
"SubprocessEnvManager._take_step": {
"total": 171.4326844760585,
"count": 63682,
"self": 5.206103397183142,
"children": {
"TorchPolicy.evaluate": {
"total": 166.22658107887537,
"count": 62564,
"self": 166.22658107887537
}
}
},
"workers": {
"total": 0.9861815300282615,
"count": 63682,
"self": 0.0,
"children": {
"worker_root": {
"total": 2510.5742345729695,
"count": 63682,
"is_parallel": true,
"self": 1041.5905730218997,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022360600000865816,
"count": 1,
"is_parallel": true,
"self": 0.0007206569998743362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015154030002122454,
"count": 8,
"is_parallel": true,
"self": 0.0015154030002122454
}
}
},
"UnityEnvironment.step": {
"total": 0.05520673399996667,
"count": 1,
"is_parallel": true,
"self": 0.0005632809993585397,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004887230002168508,
"count": 1,
"is_parallel": true,
"self": 0.0004887230002168508
},
"communicator.exchange": {
"total": 0.052289777000169124,
"count": 1,
"is_parallel": true,
"self": 0.052289777000169124
},
"steps_from_proto": {
"total": 0.0018649530002221582,
"count": 1,
"is_parallel": true,
"self": 0.0004017060000478523,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014632470001743059,
"count": 8,
"is_parallel": true,
"self": 0.0014632470001743059
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1468.9836615510699,
"count": 63681,
"is_parallel": true,
"self": 38.07368533003773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.960759052917183,
"count": 63681,
"is_parallel": true,
"self": 25.960759052917183
},
"communicator.exchange": {
"total": 1279.8783573930268,
"count": 63681,
"is_parallel": true,
"self": 1279.8783573930268
},
"steps_from_proto": {
"total": 125.07085977508814,
"count": 63681,
"is_parallel": true,
"self": 26.818076713208484,
"children": {
"_process_rank_one_or_two_observation": {
"total": 98.25278306187965,
"count": 509448,
"is_parallel": true,
"self": 98.25278306187965
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 738.5334466268855,
"count": 63682,
"self": 3.225358753792989,
"children": {
"process_trajectory": {
"total": 133.55798164208682,
"count": 63682,
"self": 133.28847864208637,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2695030000004408,
"count": 2,
"self": 0.2695030000004408
}
}
},
"_update_policy": {
"total": 601.7501062310057,
"count": 443,
"self": 333.61280337000426,
"children": {
"TorchPPOOptimizer.update": {
"total": 268.13730286100144,
"count": 22800,
"self": 268.13730286100144
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2209993656142615e-06,
"count": 1,
"self": 1.2209993656142615e-06
},
"TrainerController._save_models": {
"total": 0.08897160700053064,
"count": 1,
"self": 0.0012226310000187368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0877489760005119,
"count": 1,
"self": 0.0877489760005119
}
}
}
}
}
}
}