testpyramidsrnd / run_logs /timers.json
Terence3927's picture
First Pyramids
6e0bca6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.449701726436615,
"min": 0.449701726436615,
"max": 1.4280096292495728,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13634.9560546875,
"min": 13634.9560546875,
"max": 43320.1015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989928.0,
"min": 29952.0,
"max": 989928.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40966179966926575,
"min": -0.09639322012662888,
"max": 0.4709737002849579,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 110.60868835449219,
"min": -23.327159881591797,
"max": 127.05229187011719,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.004913768731057644,
"min": -0.007100201211869717,
"max": 0.3013646900653839,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.3267176151275635,
"min": -1.874453067779541,
"max": 71.42343139648438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0681662009905858,
"min": 0.06526270886739191,
"max": 0.07360263431663408,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9543268138682013,
"min": 0.5011534766152222,
"max": 1.070489668144671,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014694889709398692,
"min": 0.0004235745644795499,
"max": 0.014769917707044873,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20572845593158168,
"min": 0.005930043902713698,
"max": 0.20677884789862822,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4118332437071475e-06,
"min": 7.4118332437071475e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010376566541190007,
"min": 0.00010376566541190007,
"max": 0.0036322630892457,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247057857142859,
"min": 0.10247057857142859,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345881000000003,
"min": 1.3886848,
"max": 2.6107543000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025681079928571444,
"min": 0.00025681079928571444,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035953511900000024,
"min": 0.0035953511900000024,
"max": 0.12109435457,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009917506016790867,
"min": 0.009917506016790867,
"max": 0.4148555099964142,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13884508609771729,
"min": 0.13884508609771729,
"max": 2.9039885997772217,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 404.85135135135135,
"min": 394.23376623376623,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29959.0,
"min": 15984.0,
"max": 32877.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4058999721262906,
"min": -1.0000000521540642,
"max": 1.5017131285643892,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.0365979373455,
"min": -30.666801653802395,
"max": 114.13019777089357,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4058999721262906,
"min": -1.0000000521540642,
"max": 1.5017131285643892,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.0365979373455,
"min": -30.666801653802395,
"max": 114.13019777089357,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04125636693670704,
"min": 0.04125636693670704,
"max": 8.458495073020458,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.052971153316321,
"min": 3.052971153316321,
"max": 135.33592116832733,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1668153153",
"python_version": "3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:04:10) \n[GCC 10.3.0]",
"command_line_arguments": "/opt/conda/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.3",
"end_time_seconds": "1668156146"
},
"total": 2993.159958376549,
"count": 1,
"self": 0.8303574761375785,
"children": {
"run_training.setup": {
"total": 0.17639728263020515,
"count": 1,
"self": 0.17639728263020515
},
"TrainerController.start_learning": {
"total": 2992.1532036177814,
"count": 1,
"self": 1.105168977752328,
"children": {
"TrainerController._reset_env": {
"total": 5.922881801612675,
"count": 1,
"self": 5.922881801612675
},
"TrainerController.advance": {
"total": 2984.981429251842,
"count": 63575,
"self": 1.0910676680505276,
"children": {
"env_step": {
"total": 1969.355714276433,
"count": 63575,
"self": 1874.520522115752,
"children": {
"SubprocessEnvManager._take_step": {
"total": 94.12087427638471,
"count": 63575,
"self": 3.902109225280583,
"children": {
"TorchPolicy.evaluate": {
"total": 90.21876505110413,
"count": 62564,
"self": 30.958266763947904,
"children": {
"TorchPolicy.sample_actions": {
"total": 59.260498287156224,
"count": 62564,
"self": 59.260498287156224
}
}
}
}
},
"workers": {
"total": 0.7143178842961788,
"count": 63575,
"self": 0.0,
"children": {
"worker_root": {
"total": 2987.8581345360726,
"count": 63575,
"is_parallel": true,
"self": 1236.599324947223,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0045655835419893265,
"count": 1,
"is_parallel": true,
"self": 0.0014189807698130608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031466027721762657,
"count": 8,
"is_parallel": true,
"self": 0.0031466027721762657
}
}
},
"UnityEnvironment.step": {
"total": 0.06641075946390629,
"count": 1,
"is_parallel": true,
"self": 0.0008928673341870308,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007423553615808487,
"count": 1,
"is_parallel": true,
"self": 0.0007423553615808487
},
"communicator.exchange": {
"total": 0.062159025110304356,
"count": 1,
"is_parallel": true,
"self": 0.062159025110304356
},
"steps_from_proto": {
"total": 0.002616511657834053,
"count": 1,
"is_parallel": true,
"self": 0.0006196461617946625,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019968654960393906,
"count": 8,
"is_parallel": true,
"self": 0.0019968654960393906
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1751.2588095888495,
"count": 63574,
"is_parallel": true,
"self": 52.18478089570999,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.879492323845625,
"count": 63574,
"is_parallel": true,
"self": 36.879492323845625
},
"communicator.exchange": {
"total": 1508.8757964856923,
"count": 63574,
"is_parallel": true,
"self": 1508.8757964856923
},
"steps_from_proto": {
"total": 153.31873988360167,
"count": 63574,
"is_parallel": true,
"self": 34.31051422748715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 119.00822565611452,
"count": 508592,
"is_parallel": true,
"self": 119.00822565611452
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1014.5346473073587,
"count": 63575,
"self": 2.1868025586009026,
"children": {
"process_trajectory": {
"total": 130.35874030645937,
"count": 63575,
"self": 130.04567389562726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31306641083210707,
"count": 2,
"self": 0.31306641083210707
}
}
},
"_update_policy": {
"total": 881.9891044422984,
"count": 449,
"self": 215.5706182140857,
"children": {
"TorchPPOOptimizer.update": {
"total": 666.4184862282127,
"count": 22839,
"self": 666.4184862282127
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.927898645401e-07,
"count": 1,
"self": 9.927898645401e-07
},
"TrainerController._save_models": {
"total": 0.14372259378433228,
"count": 1,
"self": 0.008248788304626942,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13547380547970533,
"count": 1,
"self": 0.13547380547970533
}
}
}
}
}
}
}