PyramidsRND / run_logs /timers.json
Art-phys's picture
First Push
90361c4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.312359094619751,
"min": 0.3076399564743042,
"max": 1.4103870391845703,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9355.779296875,
"min": 9151.884765625,
"max": 42785.5,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5847235321998596,
"min": -0.14529451727867126,
"max": 0.6554327011108398,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.38369750976562,
"min": -34.43479919433594,
"max": 182.86572265625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.02807162143290043,
"min": -0.02807162143290043,
"max": 0.3776421546936035,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -7.747767448425293,
"min": -7.747767448425293,
"max": 89.50119018554688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06735456386134804,
"min": 0.06499023197878463,
"max": 0.07260351228221687,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9429638940588725,
"min": 0.5081308259747136,
"max": 1.084513437661443,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015652973032307015,
"min": 0.0009207465044709017,
"max": 0.015783666534763242,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2191416224522982,
"min": 0.011048958053650821,
"max": 0.23675499802144861,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.308054706871433e-06,
"min": 7.308054706871433e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010231276589620006,
"min": 0.00010231276589620006,
"max": 0.0035101667299444993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243598571428572,
"min": 0.10243598571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341038000000002,
"min": 1.3886848,
"max": 2.5700555,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025335497285714306,
"min": 0.00025335497285714306,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003546969620000003,
"min": 0.003546969620000003,
"max": 0.11702854445,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014090928249061108,
"min": 0.013619811274111271,
"max": 0.4901733696460724,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19727300107479095,
"min": 0.19727300107479095,
"max": 3.431213617324829,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 334.1304347826087,
"min": 296.7551020408163,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30740.0,
"min": 15984.0,
"max": 34759.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6429977814902317,
"min": -1.0000000521540642,
"max": 1.6711608116159733,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 149.51279811561108,
"min": -30.571601636707783,
"max": 162.91659806668758,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6429977814902317,
"min": -1.0000000521540642,
"max": 1.6711608116159733,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 149.51279811561108,
"min": -30.571601636707783,
"max": 162.91659806668758,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04798116156598553,
"min": 0.04212589473231952,
"max": 9.666238343343139,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.366285702504683,
"min": 4.128337683767313,
"max": 154.65981349349022,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677955155",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677957415"
},
"total": 2260.2522484300007,
"count": 1,
"self": 0.6597295740011759,
"children": {
"run_training.setup": {
"total": 0.11078627299957589,
"count": 1,
"self": 0.11078627299957589
},
"TrainerController.start_learning": {
"total": 2259.481732583,
"count": 1,
"self": 1.3405574429561966,
"children": {
"TrainerController._reset_env": {
"total": 6.035625127999992,
"count": 1,
"self": 6.035625127999992
},
"TrainerController.advance": {
"total": 2252.0112835490436,
"count": 64001,
"self": 1.3642974970434807,
"children": {
"env_step": {
"total": 1525.3255810859855,
"count": 64001,
"self": 1416.4180292928932,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.11524997618744,
"count": 64001,
"self": 4.578147036352675,
"children": {
"TorchPolicy.evaluate": {
"total": 103.53710293983477,
"count": 62555,
"self": 35.40323402802642,
"children": {
"TorchPolicy.sample_actions": {
"total": 68.13386891180835,
"count": 62555,
"self": 68.13386891180835
}
}
}
}
},
"workers": {
"total": 0.7923018169049101,
"count": 64001,
"self": 0.0,
"children": {
"worker_root": {
"total": 2254.8949098070407,
"count": 64001,
"is_parallel": true,
"self": 949.5855280807491,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001997071000005235,
"count": 1,
"is_parallel": true,
"self": 0.0006976089998715906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012994620001336443,
"count": 8,
"is_parallel": true,
"self": 0.0012994620001336443
}
}
},
"UnityEnvironment.step": {
"total": 0.0450351130002673,
"count": 1,
"is_parallel": true,
"self": 0.0004998100012016948,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004357609996077372,
"count": 1,
"is_parallel": true,
"self": 0.0004357609996077372
},
"communicator.exchange": {
"total": 0.042546124999716994,
"count": 1,
"is_parallel": true,
"self": 0.042546124999716994
},
"steps_from_proto": {
"total": 0.0015534169997408753,
"count": 1,
"is_parallel": true,
"self": 0.00040478900154994335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001148627998190932,
"count": 8,
"is_parallel": true,
"self": 0.001148627998190932
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1305.3093817262916,
"count": 64000,
"is_parallel": true,
"self": 30.47806334417055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.245464026982518,
"count": 64000,
"is_parallel": true,
"self": 22.245464026982518
},
"communicator.exchange": {
"total": 1163.5787640421095,
"count": 64000,
"is_parallel": true,
"self": 1163.5787640421095
},
"steps_from_proto": {
"total": 89.00709031302904,
"count": 64000,
"is_parallel": true,
"self": 21.044585022043975,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.96250529098506,
"count": 512000,
"is_parallel": true,
"self": 67.96250529098506
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 725.3214049660146,
"count": 64001,
"self": 2.4746005599463388,
"children": {
"process_trajectory": {
"total": 160.10395018006784,
"count": 64001,
"self": 159.91586251406807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1880876659997739,
"count": 2,
"self": 0.1880876659997739
}
}
},
"_update_policy": {
"total": 562.7428542260004,
"count": 455,
"self": 221.19335779999255,
"children": {
"TorchPPOOptimizer.update": {
"total": 341.54949642600786,
"count": 22803,
"self": 341.54949642600786
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.830000635702163e-07,
"count": 1,
"self": 9.830000635702163e-07
},
"TrainerController._save_models": {
"total": 0.09426548000010371,
"count": 1,
"self": 0.001551786999698379,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09271369300040533,
"count": 1,
"self": 0.09271369300040533
}
}
}
}
}
}
}