PyramidsRND / run_logs /timers.json
NathanS-HuggingFace's picture
Why are you looking at this?
e751897
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8922141790390015,
"min": 0.8922141790390015,
"max": 1.4813717603683472,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 27066.208984375,
"min": 27066.208984375,
"max": 44938.89453125,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89961.0,
"min": 29952.0,
"max": 89961.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89961.0,
"min": 29952.0,
"max": 89961.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09253223985433578,
"min": -0.13578973710536957,
"max": -0.09253223985433578,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -22.300270080566406,
"min": -32.182167053222656,
"max": -22.300270080566406,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.199475958943367,
"min": 0.199475958943367,
"max": 0.33898448944091797,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 48.073707580566406,
"min": 48.073707580566406,
"max": 80.33932495117188,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07189497155276138,
"min": 0.06806003932545993,
"max": 0.07194849411406049,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.7908446870803751,
"min": 0.5036394587984234,
"max": 0.7908446870803751,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0012177477248836357,
"min": 0.0012177477248836357,
"max": 0.006861717372968955,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.013395224973719993,
"min": 0.013395224973719993,
"max": 0.04803202161078268,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.062171282309092e-05,
"min": 7.062171282309092e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0007768388410540001,
"min": 0.0007768388410540001,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12354054545454547,
"min": 0.12354054545454547,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.3589460000000002,
"min": 1.2868480000000002,
"max": 1.55428,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.002361700490909092,
"min": 0.002361700490909092,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.02597870540000001,
"min": 0.02597870540000001,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.13629108667373657,
"min": 0.13629108667373657,
"max": 0.5466458797454834,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.499202013015747,
"min": 1.499202013015747,
"max": 3.826521158218384,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 984.53125,
"min": 984.53125,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31505.0,
"min": 15984.0,
"max": 31896.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8604313000105321,
"min": -1.0000000521540642,
"max": -0.8604313000105321,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -27.53380160033703,
"min": -29.926401674747467,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8604313000105321,
"min": -1.0000000521540642,
"max": -0.8604313000105321,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -27.53380160033703,
"min": -29.926401674747467,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.6182480880524963,
"min": 1.6182480880524963,
"max": 11.05203421600163,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 51.78393881767988,
"min": 51.78393881767988,
"max": 176.83254745602608,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682274464",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682274671"
},
"total": 206.27008753400014,
"count": 1,
"self": 0.4815255750002052,
"children": {
"run_training.setup": {
"total": 0.18821459499986304,
"count": 1,
"self": 0.18821459499986304
},
"TrainerController.start_learning": {
"total": 205.60034736400007,
"count": 1,
"self": 0.14155843599451146,
"children": {
"TrainerController._reset_env": {
"total": 4.280351689000099,
"count": 1,
"self": 4.280351689000099
},
"TrainerController.advance": {
"total": 201.05487934600524,
"count": 6288,
"self": 0.1638357480042032,
"children": {
"env_step": {
"total": 140.15368671299962,
"count": 6288,
"self": 128.0463472810086,
"children": {
"SubprocessEnvManager._take_step": {
"total": 12.01207622099696,
"count": 6288,
"self": 0.5344480469882456,
"children": {
"TorchPolicy.evaluate": {
"total": 11.477628174008714,
"count": 6281,
"self": 11.477628174008714
}
}
},
"workers": {
"total": 0.09526321099406232,
"count": 6288,
"self": 0.0,
"children": {
"worker_root": {
"total": 205.1043499409866,
"count": 6288,
"is_parallel": true,
"self": 89.41321146598625,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028012989998842386,
"count": 1,
"is_parallel": true,
"self": 0.0009190779999244114,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018822209999598272,
"count": 8,
"is_parallel": true,
"self": 0.0018822209999598272
}
}
},
"UnityEnvironment.step": {
"total": 0.05221010299987938,
"count": 1,
"is_parallel": true,
"self": 0.0005575870000029681,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005302889999256877,
"count": 1,
"is_parallel": true,
"self": 0.0005302889999256877
},
"communicator.exchange": {
"total": 0.04929830700007187,
"count": 1,
"is_parallel": true,
"self": 0.04929830700007187
},
"steps_from_proto": {
"total": 0.0018239199998788536,
"count": 1,
"is_parallel": true,
"self": 0.00043421499981377565,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001389705000065078,
"count": 8,
"is_parallel": true,
"self": 0.001389705000065078
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 115.69113847500034,
"count": 6287,
"is_parallel": true,
"self": 3.3406498739839208,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.556138471013128,
"count": 6287,
"is_parallel": true,
"self": 2.556138471013128
},
"communicator.exchange": {
"total": 99.62355481899317,
"count": 6287,
"is_parallel": true,
"self": 99.62355481899317
},
"steps_from_proto": {
"total": 10.170795311010124,
"count": 6287,
"is_parallel": true,
"self": 2.23230032399465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.938494987015474,
"count": 50296,
"is_parallel": true,
"self": 7.938494987015474
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 60.73735688500142,
"count": 6288,
"self": 0.19752208400723248,
"children": {
"process_trajectory": {
"total": 10.757370307993824,
"count": 6288,
"self": 10.757370307993824
},
"_update_policy": {
"total": 49.78246449300036,
"count": 32,
"self": 31.788009819000536,
"children": {
"TorchPPOOptimizer.update": {
"total": 17.994454673999826,
"count": 2331,
"self": 17.994454673999826
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0390001534688054e-06,
"count": 1,
"self": 1.0390001534688054e-06
},
"TrainerController._save_models": {
"total": 0.12355685400007133,
"count": 1,
"self": 0.0014182580000579037,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12213859600001342,
"count": 1,
"self": 0.12213859600001342
}
}
}
}
}
}
}