ppo-Pyramids / run_logs /timers.json
nakanolab's picture
Pyramids
3c96670
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37292224168777466,
"min": 0.37246620655059814,
"max": 1.3754805326461792,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11050.431640625,
"min": 11050.431640625,
"max": 41726.578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989955.0,
"min": 29982.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989955.0,
"min": 29982.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48612505197525024,
"min": -0.13869871199131012,
"max": 0.6133142709732056,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 134.17051696777344,
"min": -33.010292053222656,
"max": 172.95462036132812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016411541029810905,
"min": -0.008420317433774471,
"max": 0.5284061431884766,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.529585361480713,
"min": -2.324007511138916,
"max": 125.76065826416016,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07120527930357833,
"min": 0.06547216631727115,
"max": 0.07188889242851729,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9968739102500966,
"min": 0.49763262148407267,
"max": 1.0783333864277593,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016681696769234554,
"min": 0.0016673494295696856,
"max": 0.019088655244905526,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23354375476928377,
"min": 0.016673494295696856,
"max": 0.26853223087339384,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.538840344228573e-06,
"min": 7.538840344228573e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010554376481920003,
"min": 0.00010554376481920003,
"max": 0.0037584553471815994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025129142857143,
"min": 0.1025129142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351808000000001,
"min": 1.3886848,
"max": 2.6528184000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026104013714285716,
"min": 0.00026104013714285716,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036545619200000004,
"min": 0.0036545619200000004,
"max": 0.12529655816,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011791789904236794,
"min": 0.011711543425917625,
"max": 0.630494236946106,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1650850623846054,
"min": 0.16396160423755646,
"max": 4.413459777832031,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 360.2967032967033,
"min": 284.24242424242425,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32787.0,
"min": 16781.0,
"max": 33477.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4379644241597918,
"min": -0.9999806972280625,
"max": 1.675345442361302,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 129.41679817438126,
"min": -30.99940161406994,
"max": 165.85919879376888,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4379644241597918,
"min": -0.9999806972280625,
"max": 1.675345442361302,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 129.41679817438126,
"min": -30.99940161406994,
"max": 165.85919879376888,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04431507484761015,
"min": 0.034351703864604594,
"max": 13.266721412539482,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9883567362849135,
"min": 3.4008186825958546,
"max": 225.5342640131712,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676897877",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676900260"
},
"total": 2383.227483924,
"count": 1,
"self": 0.4747036339999795,
"children": {
"run_training.setup": {
"total": 0.1049105930001133,
"count": 1,
"self": 0.1049105930001133
},
"TrainerController.start_learning": {
"total": 2382.647869697,
"count": 1,
"self": 1.4344966959952217,
"children": {
"TrainerController._reset_env": {
"total": 6.102086897000163,
"count": 1,
"self": 6.102086897000163
},
"TrainerController.advance": {
"total": 2375.026322136004,
"count": 64079,
"self": 1.5127171089548028,
"children": {
"env_step": {
"total": 1638.3365319040342,
"count": 64079,
"self": 1521.6726528949287,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.82173966606433,
"count": 64079,
"self": 4.698841425038609,
"children": {
"TorchPolicy.evaluate": {
"total": 111.12289824102572,
"count": 62553,
"self": 37.362957566052046,
"children": {
"TorchPolicy.sample_actions": {
"total": 73.75994067497368,
"count": 62553,
"self": 73.75994067497368
}
}
}
}
},
"workers": {
"total": 0.8421393430412536,
"count": 64079,
"self": 0.0,
"children": {
"worker_root": {
"total": 2376.7403387370387,
"count": 64079,
"is_parallel": true,
"self": 974.0269204640201,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019297870001082629,
"count": 1,
"is_parallel": true,
"self": 0.0007733360002930567,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011564509998152062,
"count": 8,
"is_parallel": true,
"self": 0.0011564509998152062
}
}
},
"UnityEnvironment.step": {
"total": 0.050934755999833214,
"count": 1,
"is_parallel": true,
"self": 0.0005380560000958212,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005216619999828254,
"count": 1,
"is_parallel": true,
"self": 0.0005216619999828254
},
"communicator.exchange": {
"total": 0.04811127899984058,
"count": 1,
"is_parallel": true,
"self": 0.04811127899984058
},
"steps_from_proto": {
"total": 0.001763758999913989,
"count": 1,
"is_parallel": true,
"self": 0.00046456500012936885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012991939997846202,
"count": 8,
"is_parallel": true,
"self": 0.0012991939997846202
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1402.7134182730185,
"count": 64078,
"is_parallel": true,
"self": 31.505873090928844,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.849377687031392,
"count": 64078,
"is_parallel": true,
"self": 23.849377687031392
},
"communicator.exchange": {
"total": 1240.398296004975,
"count": 64078,
"is_parallel": true,
"self": 1240.398296004975
},
"steps_from_proto": {
"total": 106.95987149008329,
"count": 64078,
"is_parallel": true,
"self": 25.72488716393991,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.23498432614338,
"count": 512624,
"is_parallel": true,
"self": 81.23498432614338
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 735.1770731230154,
"count": 64079,
"self": 2.666522053016706,
"children": {
"process_trajectory": {
"total": 169.32584390099873,
"count": 64079,
"self": 169.14003445499839,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18580944600034854,
"count": 2,
"self": 0.18580944600034854
}
}
},
"_update_policy": {
"total": 563.1847071689999,
"count": 458,
"self": 215.75514361703677,
"children": {
"TorchPPOOptimizer.update": {
"total": 347.42956355196316,
"count": 22785,
"self": 347.42956355196316
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.73000169324223e-07,
"count": 1,
"self": 9.73000169324223e-07
},
"TrainerController._save_models": {
"total": 0.08496299499984161,
"count": 1,
"self": 0.0014134480002212513,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08354954699962036,
"count": 1,
"self": 0.08354954699962036
}
}
}
}
}
}
}