tmilushev's picture
First training of Pyramids
3cd5567
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5770847201347351,
"min": 0.5660080313682556,
"max": 1.3883445262908936,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17275.607421875,
"min": 16942.95703125,
"max": 42116.8203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989940.0,
"min": 29952.0,
"max": 989940.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989940.0,
"min": 29952.0,
"max": 989940.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.07570744305849075,
"min": -0.09891591221094131,
"max": 0.1704883724451065,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 18.699737548828125,
"min": -23.739818572998047,
"max": 42.792579650878906,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.029485192149877548,
"min": 0.010363946668803692,
"max": 0.4153425693511963,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.282842636108398,
"min": 2.580622673034668,
"max": 98.43618774414062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06801297728590867,
"min": 0.06449306839342517,
"max": 0.07312330311080391,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0201946592886302,
"min": 0.49027080017214925,
"max": 1.02248844557371,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0047914397175416856,
"min": 0.00016007385935074597,
"max": 0.006782654352023468,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.07187159576312528,
"min": 0.0020809601715596976,
"max": 0.09495716092832855,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.555957481380004e-06,
"min": 7.555957481380004e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011333936222070006,
"min": 0.00011333936222070006,
"max": 0.0035079773306742994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251862000000002,
"min": 0.10251862000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377793000000002,
"min": 1.3886848,
"max": 2.5693257000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002616101380000001,
"min": 0.0002616101380000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003924152070000002,
"min": 0.003924152070000002,
"max": 0.11695563743000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01113695465028286,
"min": 0.010713638737797737,
"max": 0.46452635526657104,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16705432534217834,
"min": 0.14999094605445862,
"max": 3.2516844272613525,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 757.0833333333334,
"min": 697.5581395348837,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27255.0,
"min": 15984.0,
"max": 33096.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.209032393186479,
"min": -1.0000000521540642,
"max": 0.5816487462782278,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 7.734198547899723,
"min": -31.99640166759491,
"max": 23.84759859740734,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.209032393186479,
"min": -1.0000000521540642,
"max": 0.5816487462782278,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 7.734198547899723,
"min": -31.99640166759491,
"max": 23.84759859740734,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08833901416762052,
"min": 0.08697170725592025,
"max": 9.378143298439682,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2685435242019594,
"min": 3.2685435242019594,
"max": 150.0502927750349,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673647380",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673649356"
},
"total": 1976.272478992,
"count": 1,
"self": 0.44335348800018437,
"children": {
"run_training.setup": {
"total": 0.10203313600004549,
"count": 1,
"self": 0.10203313600004549
},
"TrainerController.start_learning": {
"total": 1975.7270923679998,
"count": 1,
"self": 1.4683355319552902,
"children": {
"TrainerController._reset_env": {
"total": 6.649732567000001,
"count": 1,
"self": 6.649732567000001
},
"TrainerController.advance": {
"total": 1967.5087222200443,
"count": 63254,
"self": 1.573896195128782,
"children": {
"env_step": {
"total": 1289.8440378470295,
"count": 63254,
"self": 1173.714156854031,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.1585188869667,
"count": 63254,
"self": 4.595567188004452,
"children": {
"TorchPolicy.evaluate": {
"total": 110.56295169896225,
"count": 62585,
"self": 36.8060076708889,
"children": {
"TorchPolicy.sample_actions": {
"total": 73.75694402807335,
"count": 62585,
"self": 73.75694402807335
}
}
}
}
},
"workers": {
"total": 0.9713621060318474,
"count": 63254,
"self": 0.0,
"children": {
"worker_root": {
"total": 1970.7505606779825,
"count": 63254,
"is_parallel": true,
"self": 906.5790841009145,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016638829999919835,
"count": 1,
"is_parallel": true,
"self": 0.0005936609998116182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010702220001803653,
"count": 8,
"is_parallel": true,
"self": 0.0010702220001803653
}
}
},
"UnityEnvironment.step": {
"total": 0.044372975999976916,
"count": 1,
"is_parallel": true,
"self": 0.0004954879998422257,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004397160000735312,
"count": 1,
"is_parallel": true,
"self": 0.0004397160000735312
},
"communicator.exchange": {
"total": 0.04183798700000807,
"count": 1,
"is_parallel": true,
"self": 0.04183798700000807
},
"steps_from_proto": {
"total": 0.0015997850000530889,
"count": 1,
"is_parallel": true,
"self": 0.00041227700012314017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011875079999299487,
"count": 8,
"is_parallel": true,
"self": 0.0011875079999299487
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1064.171476577068,
"count": 63253,
"is_parallel": true,
"self": 29.148565656089204,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.116438092022463,
"count": 63253,
"is_parallel": true,
"self": 23.116438092022463
},
"communicator.exchange": {
"total": 909.2082266200084,
"count": 63253,
"is_parallel": true,
"self": 909.2082266200084
},
"steps_from_proto": {
"total": 102.69824620894792,
"count": 63253,
"is_parallel": true,
"self": 23.200367395863623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.4978788130843,
"count": 506024,
"is_parallel": true,
"self": 79.4978788130843
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 676.090788177886,
"count": 63254,
"self": 2.7699347868474433,
"children": {
"process_trajectory": {
"total": 146.58389627902875,
"count": 63254,
"self": 146.26734279302877,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31655348599997524,
"count": 2,
"self": 0.31655348599997524
}
}
},
"_update_policy": {
"total": 526.7369571120098,
"count": 452,
"self": 202.94920087802961,
"children": {
"TorchPPOOptimizer.update": {
"total": 323.7877562339802,
"count": 22758,
"self": 323.7877562339802
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2240002433827613e-06,
"count": 1,
"self": 1.2240002433827613e-06
},
"TrainerController._save_models": {
"total": 0.10030082499997661,
"count": 1,
"self": 0.001853711999956431,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09844711300002018,
"count": 1,
"self": 0.09844711300002018
}
}
}
}
}
}
}