pyramids-model / run_logs /timers.json
newbie4000's picture
uploading pyramid game model
5d4ea5d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5694612860679626,
"min": 0.5239174365997314,
"max": 1.4975794553756714,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17320.734375,
"min": 15742.671875,
"max": 45430.5703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989951.0,
"min": 29952.0,
"max": 989951.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989951.0,
"min": 29952.0,
"max": 989951.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1762009859085083,
"min": -0.09496651589870453,
"max": 0.18040089309215546,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 44.05024719238281,
"min": -22.886930465698242,
"max": 45.82182693481445,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003915366251021624,
"min": 0.003915366251021624,
"max": 0.2877003252506256,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.9788415431976318,
"min": 0.9788415431976318,
"max": 68.18497467041016,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06870179549350366,
"min": 0.06579528620452685,
"max": 0.07381045444939757,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9618251369090514,
"min": 0.4813551055448098,
"max": 1.0765650314763966,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007476409122731439,
"min": 0.000584549912423296,
"max": 0.007991574479092378,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10466972771824015,
"min": 0.004676399299386368,
"max": 0.11272402751880387,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.429447523549999e-06,
"min": 7.429447523549999e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010401226532969999,
"min": 0.00010401226532969999,
"max": 0.0033826178724608007,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247645000000001,
"min": 0.10247645000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346703,
"min": 1.3691136000000002,
"max": 2.5275392000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000257397355,
"min": 0.000257397355,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036035629700000003,
"min": 0.0036035629700000003,
"max": 0.11278116608000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012318690307438374,
"min": 0.012318690307438374,
"max": 0.29761990904808044,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17246165871620178,
"min": 0.17246165871620178,
"max": 2.0833394527435303,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 709.5714285714286,
"min": 683.0952380952381,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29802.0,
"min": 15984.0,
"max": 34000.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6712094827422074,
"min": -1.0000000521540642,
"max": 0.6712094827422074,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 28.19079827517271,
"min": -32.000001668930054,
"max": 28.98939846456051,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6712094827422074,
"min": -1.0000000521540642,
"max": 0.6712094827422074,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 28.19079827517271,
"min": -32.000001668930054,
"max": 28.98939846456051,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08912616325653203,
"min": 0.08912616325653203,
"max": 5.442231884226203,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.743298856774345,
"min": 3.743298856774345,
"max": 87.07571014761925,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673688121",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673690153"
},
"total": 2031.9834597520003,
"count": 1,
"self": 0.4895223400003488,
"children": {
"run_training.setup": {
"total": 0.10904394899989711,
"count": 1,
"self": 0.10904394899989711
},
"TrainerController.start_learning": {
"total": 2031.384893463,
"count": 1,
"self": 1.2080754300131957,
"children": {
"TrainerController._reset_env": {
"total": 6.6893554529997346,
"count": 1,
"self": 6.6893554529997346
},
"TrainerController.advance": {
"total": 2023.3951446569877,
"count": 63238,
"self": 1.2739905549856303,
"children": {
"env_step": {
"total": 1330.1659688840132,
"count": 63238,
"self": 1224.3303435270568,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.0760897539999,
"count": 63238,
"self": 4.238834321032755,
"children": {
"TorchPolicy.evaluate": {
"total": 100.83725543296714,
"count": 62557,
"self": 34.02560253702313,
"children": {
"TorchPolicy.sample_actions": {
"total": 66.81165289594401,
"count": 62557,
"self": 66.81165289594401
}
}
}
}
},
"workers": {
"total": 0.7595356029564755,
"count": 63238,
"self": 0.0,
"children": {
"worker_root": {
"total": 2026.799057861008,
"count": 63238,
"is_parallel": true,
"self": 900.8569802030834,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019359999996595434,
"count": 1,
"is_parallel": true,
"self": 0.0006426259992622363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001293374000397307,
"count": 8,
"is_parallel": true,
"self": 0.001293374000397307
}
}
},
"UnityEnvironment.step": {
"total": 0.04578937899987068,
"count": 1,
"is_parallel": true,
"self": 0.0005530349999389728,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045893500009697163,
"count": 1,
"is_parallel": true,
"self": 0.00045893500009697163
},
"communicator.exchange": {
"total": 0.04317329900004552,
"count": 1,
"is_parallel": true,
"self": 0.04317329900004552
},
"steps_from_proto": {
"total": 0.001604109999789216,
"count": 1,
"is_parallel": true,
"self": 0.0004367619994809502,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001167348000308266,
"count": 8,
"is_parallel": true,
"self": 0.001167348000308266
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1125.9420776579245,
"count": 63237,
"is_parallel": true,
"self": 28.479583087914307,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.867542925999714,
"count": 63237,
"is_parallel": true,
"self": 22.867542925999714
},
"communicator.exchange": {
"total": 972.798024445096,
"count": 63237,
"is_parallel": true,
"self": 972.798024445096
},
"steps_from_proto": {
"total": 101.79692719891455,
"count": 63237,
"is_parallel": true,
"self": 21.999529909985085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.79739728892946,
"count": 505896,
"is_parallel": true,
"self": 79.79739728892946
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 691.9551852179889,
"count": 63238,
"self": 2.223274586023763,
"children": {
"process_trajectory": {
"total": 147.74373264096585,
"count": 63238,
"self": 147.54920977596612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19452286499972615,
"count": 2,
"self": 0.19452286499972615
}
}
},
"_update_policy": {
"total": 541.9881779909992,
"count": 435,
"self": 209.78139281300673,
"children": {
"TorchPPOOptimizer.update": {
"total": 332.2067851779925,
"count": 22845,
"self": 332.2067851779925
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.799996405490674e-07,
"count": 1,
"self": 9.799996405490674e-07
},
"TrainerController._save_models": {
"total": 0.09231694299978699,
"count": 1,
"self": 0.0014531990000250516,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09086374399976194,
"count": 1,
"self": 0.09086374399976194
}
}
}
}
}
}
}