Pyramidsss / run_logs /timers.json
marcos995's picture
Second Push
acde419
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5162696242332458,
"min": 0.5065971612930298,
"max": 1.4310581684112549,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15620.2529296875,
"min": 15136.185546875,
"max": 43412.58203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.47977665066719055,
"min": -0.08716558665037155,
"max": 0.47977665066719055,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 128.58013916015625,
"min": -20.919740676879883,
"max": 131.5968017578125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003259980818256736,
"min": 0.000624600681476295,
"max": 0.2075844258069992,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.8736748695373535,
"min": 0.16426998376846313,
"max": 49.82026290893555,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06987569819016028,
"min": 0.06641642811935765,
"max": 0.07296061889095302,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9782597746622438,
"min": 0.4868670316182556,
"max": 1.0811522480216809,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015602979094962523,
"min": 0.0008585261054808566,
"max": 0.016056296210860767,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21844170732947532,
"min": 0.006429781960592329,
"max": 0.22478814695205077,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.508540354328572e-06,
"min": 7.508540354328572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010511956496060001,
"min": 0.00010511956496060001,
"max": 0.0032579493140169995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250281428571428,
"min": 0.10250281428571428,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350394,
"min": 1.3691136000000002,
"max": 2.4859830000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002600311471428572,
"min": 0.0002600311471428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00364043606,
"min": 0.00364043606,
"max": 0.10862970170000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011252226307988167,
"min": 0.011252226307988167,
"max": 0.38056954741477966,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15753117203712463,
"min": 0.15753117203712463,
"max": 2.663986921310425,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 389.875,
"min": 369.109756097561,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28071.0,
"min": 15984.0,
"max": 34248.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.443399986045228,
"min": -1.0000000521540642,
"max": 1.606492668753717,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 103.92479899525642,
"min": -32.000001668930054,
"max": 131.7323988378048,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.443399986045228,
"min": -1.0000000521540642,
"max": 1.606492668753717,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 103.92479899525642,
"min": -32.000001668930054,
"max": 131.7323988378048,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04558100700220772,
"min": 0.04527290439257674,
"max": 8.092846375890076,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.281832504158956,
"min": 3.281832504158956,
"max": 129.48554201424122,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679570433",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramidsss --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679572593"
},
"total": 2159.917392294,
"count": 1,
"self": 0.48542954399954397,
"children": {
"run_training.setup": {
"total": 0.16986497700003156,
"count": 1,
"self": 0.16986497700003156
},
"TrainerController.start_learning": {
"total": 2159.2620977730003,
"count": 1,
"self": 1.4806639760117832,
"children": {
"TrainerController._reset_env": {
"total": 9.095539525999982,
"count": 1,
"self": 9.095539525999982
},
"TrainerController.advance": {
"total": 2148.5884939039884,
"count": 63623,
"self": 1.575063752998176,
"children": {
"env_step": {
"total": 1513.0018573719872,
"count": 63623,
"self": 1398.312782671018,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.72518519099054,
"count": 63623,
"self": 5.127331352014039,
"children": {
"TorchPolicy.evaluate": {
"total": 108.5978538389765,
"count": 62560,
"self": 108.5978538389765
}
}
},
"workers": {
"total": 0.9638895099785714,
"count": 63623,
"self": 0.0,
"children": {
"worker_root": {
"total": 2154.0001449970337,
"count": 63623,
"is_parallel": true,
"self": 882.4952070300244,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005960800999957883,
"count": 1,
"is_parallel": true,
"self": 0.0038250769998739997,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002135724000083883,
"count": 8,
"is_parallel": true,
"self": 0.002135724000083883
}
}
},
"UnityEnvironment.step": {
"total": 0.05223239399998647,
"count": 1,
"is_parallel": true,
"self": 0.0005558769998970092,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004970600000433478,
"count": 1,
"is_parallel": true,
"self": 0.0004970600000433478
},
"communicator.exchange": {
"total": 0.04946768100001009,
"count": 1,
"is_parallel": true,
"self": 0.04946768100001009
},
"steps_from_proto": {
"total": 0.001711776000036025,
"count": 1,
"is_parallel": true,
"self": 0.0004149399999278103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012968360001082146,
"count": 8,
"is_parallel": true,
"self": 0.0012968360001082146
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1271.5049379670093,
"count": 63622,
"is_parallel": true,
"self": 32.04223340205226,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.09343461400897,
"count": 63622,
"is_parallel": true,
"self": 24.09343461400897
},
"communicator.exchange": {
"total": 1119.1327262899433,
"count": 63622,
"is_parallel": true,
"self": 1119.1327262899433
},
"steps_from_proto": {
"total": 96.23654366100459,
"count": 63622,
"is_parallel": true,
"self": 21.17441922804045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.06212443296414,
"count": 508976,
"is_parallel": true,
"self": 75.06212443296414
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 634.0115727790029,
"count": 63623,
"self": 2.915581818045098,
"children": {
"process_trajectory": {
"total": 123.76911720496037,
"count": 63623,
"self": 123.47360427996045,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2955129249999118,
"count": 2,
"self": 0.2955129249999118
}
}
},
"_update_policy": {
"total": 507.3268737559975,
"count": 446,
"self": 323.3955655399922,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.93130821600528,
"count": 22824,
"self": 183.93130821600528
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1940001058974303e-06,
"count": 1,
"self": 1.1940001058974303e-06
},
"TrainerController._save_models": {
"total": 0.09739917300021261,
"count": 1,
"self": 0.0014674110002488305,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09593176199996378,
"count": 1,
"self": 0.09593176199996378
}
}
}
}
}
}
}