PyramidsRND / run_logs /timers.json
mikegarts's picture
initial commit
8afb839
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5763959884643555,
"min": 0.5622408986091614,
"max": 1.3968300819396973,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17245.767578125,
"min": 16806.3671875,
"max": 42374.23828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4049261212348938,
"min": -0.10468528419733047,
"max": 0.4049261212348938,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.90049743652344,
"min": -25.22915267944336,
"max": 106.90049743652344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.24247002601623535,
"min": -0.03663599118590355,
"max": 0.320518434047699,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 64.0120849609375,
"min": -9.48872184753418,
"max": 76.92442321777344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07053798469787104,
"min": 0.06549101879946426,
"max": 0.07320299164937774,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0580697704680655,
"min": 0.48791460004292597,
"max": 1.0600812420986283,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018471352609153682,
"min": 0.00027001568072351827,
"max": 0.018471352609153682,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.27707028913730525,
"min": 0.002970172487958701,
"max": 0.27707028913730525,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.529917490060001e-06,
"min": 7.529917490060001e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011294876235090001,
"min": 0.00011294876235090001,
"max": 0.0033822524725826,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250994000000001,
"min": 0.10250994000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376491,
"min": 1.3691136000000002,
"max": 2.5274174000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000260743006,
"min": 0.000260743006,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00391114509,
"min": 0.00391114509,
"max": 0.11276899825999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010115506127476692,
"min": 0.010115506127476692,
"max": 0.5617181658744812,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15173259377479553,
"min": 0.14311164617538452,
"max": 3.9320273399353027,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 516.8524590163935,
"min": 483.87301587301585,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31528.0,
"min": 15984.0,
"max": 33281.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3557233028113842,
"min": -1.0000000521540642,
"max": 1.3573428334461317,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 81.34339816868305,
"min": -32.000001668930054,
"max": 85.5125985071063,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3557233028113842,
"min": -1.0000000521540642,
"max": 1.3573428334461317,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 81.34339816868305,
"min": -32.000001668930054,
"max": 85.5125985071063,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05430124706823942,
"min": 0.05221880353547888,
"max": 11.78470384888351,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.258074824094365,
"min": 3.1267407851992175,
"max": 188.55526158213615,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674741185",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674743275"
},
"total": 2089.860621701,
"count": 1,
"self": 0.42422997300036513,
"children": {
"run_training.setup": {
"total": 0.10783573899993826,
"count": 1,
"self": 0.10783573899993826
},
"TrainerController.start_learning": {
"total": 2089.328555989,
"count": 1,
"self": 1.196517516026688,
"children": {
"TrainerController._reset_env": {
"total": 6.353618767999933,
"count": 1,
"self": 6.353618767999933
},
"TrainerController.advance": {
"total": 2081.6891635539732,
"count": 63496,
"self": 1.2519003628840437,
"children": {
"env_step": {
"total": 1393.8049555310583,
"count": 63496,
"self": 1289.479772422994,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.56256311507036,
"count": 63496,
"self": 4.362399139105719,
"children": {
"TorchPolicy.evaluate": {
"total": 99.20016397596464,
"count": 62572,
"self": 33.44723045392652,
"children": {
"TorchPolicy.sample_actions": {
"total": 65.75293352203812,
"count": 62572,
"self": 65.75293352203812
}
}
}
}
},
"workers": {
"total": 0.7626199929939048,
"count": 63496,
"self": 0.0,
"children": {
"worker_root": {
"total": 2084.90530761505,
"count": 63496,
"is_parallel": true,
"self": 895.8129332220565,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001924998000049527,
"count": 1,
"is_parallel": true,
"self": 0.0007326350000766979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011923629999728291,
"count": 8,
"is_parallel": true,
"self": 0.0011923629999728291
}
}
},
"UnityEnvironment.step": {
"total": 0.04960541400009788,
"count": 1,
"is_parallel": true,
"self": 0.0004935540000587935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004430670001056569,
"count": 1,
"is_parallel": true,
"self": 0.0004430670001056569
},
"communicator.exchange": {
"total": 0.04697357700001703,
"count": 1,
"is_parallel": true,
"self": 0.04697357700001703
},
"steps_from_proto": {
"total": 0.0016952159999163996,
"count": 1,
"is_parallel": true,
"self": 0.00044517299966173596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012500430002546636,
"count": 8,
"is_parallel": true,
"self": 0.0012500430002546636
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1189.0923743929934,
"count": 63495,
"is_parallel": true,
"self": 28.776364771000317,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.795484361053468,
"count": 63495,
"is_parallel": true,
"self": 22.795484361053468
},
"communicator.exchange": {
"total": 1034.4508777619567,
"count": 63495,
"is_parallel": true,
"self": 1034.4508777619567
},
"steps_from_proto": {
"total": 103.06964749898293,
"count": 63495,
"is_parallel": true,
"self": 21.90785651994952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.16179097903341,
"count": 507960,
"is_parallel": true,
"self": 81.16179097903341
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 686.6323076600306,
"count": 63496,
"self": 2.1705349660219326,
"children": {
"process_trajectory": {
"total": 147.64618364300418,
"count": 63496,
"self": 147.45648520600412,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18969843700006095,
"count": 2,
"self": 0.18969843700006095
}
}
},
"_update_policy": {
"total": 536.8155890510045,
"count": 440,
"self": 202.54091605095505,
"children": {
"TorchPPOOptimizer.update": {
"total": 334.27467300004946,
"count": 22785,
"self": 334.27467300004946
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1649999578366987e-06,
"count": 1,
"self": 1.1649999578366987e-06
},
"TrainerController._save_models": {
"total": 0.08925498600001447,
"count": 1,
"self": 0.0014598959996874328,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08779509000032704,
"count": 1,
"self": 0.08779509000032704
}
}
}
}
}
}
}