ppo-PyramidsRND / run_logs /timers.json
lnros's picture
First training
52e53c4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5037152171134949,
"min": 0.497599333524704,
"max": 1.4263094663619995,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15232.34765625,
"min": 15015.5576171875,
"max": 43268.5234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989967.0,
"min": 29952.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989967.0,
"min": 29952.0,
"max": 989967.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1878625750541687,
"min": -0.10486308485269547,
"max": 0.1878625750541687,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 47.71709442138672,
"min": -25.376867294311523,
"max": 47.71709442138672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018211640417575836,
"min": 0.018211640417575836,
"max": 0.2742055058479309,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.625756740570068,
"min": 4.623575687408447,
"max": 65.80931854248047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06701420206532237,
"min": 0.06459005830635177,
"max": 0.0731800542176607,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9381988289145131,
"min": 0.4887519262442714,
"max": 1.0900332310896677,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010057301091278314,
"min": 8.049325493823174e-05,
"max": 0.010057301091278314,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1408022152778964,
"min": 0.0010464123141970126,
"max": 0.1415116844503192,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.506311783642859e-06,
"min": 7.506311783642859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010508836497100002,
"min": 0.00010508836497100002,
"max": 0.0030208178930608,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250207142857146,
"min": 0.10250207142857146,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350290000000003,
"min": 1.3691136000000002,
"max": 2.3069392000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002599569357142857,
"min": 0.0002599569357142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036393971000000004,
"min": 0.0036393971000000004,
"max": 0.10072322608,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01698652282357216,
"min": 0.01698652282357216,
"max": 0.4382668435573578,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23781131207942963,
"min": 0.23781131207942963,
"max": 3.0678679943084717,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 613.5510204081633,
"min": 613.5510204081633,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30064.0,
"min": 15984.0,
"max": 33464.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9373101599666537,
"min": -1.0000000521540642,
"max": 0.9373101599666537,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 45.92819783836603,
"min": -32.000001668930054,
"max": 45.92819783836603,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9373101599666537,
"min": -1.0000000521540642,
"max": 0.9373101599666537,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 45.92819783836603,
"min": -32.000001668930054,
"max": 45.92819783836603,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10800737811119429,
"min": 0.10800737811119429,
"max": 8.478829085826874,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.29236152744852,
"min": 5.267810908728279,
"max": 135.66126537322998,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674486013",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674487946"
},
"total": 1932.461143146,
"count": 1,
"self": 0.42453969300004246,
"children": {
"run_training.setup": {
"total": 0.11299661699990793,
"count": 1,
"self": 0.11299661699990793
},
"TrainerController.start_learning": {
"total": 1931.923606836,
"count": 1,
"self": 1.2721438999326438,
"children": {
"TrainerController._reset_env": {
"total": 6.2162329760001285,
"count": 1,
"self": 6.2162329760001285
},
"TrainerController.advance": {
"total": 1924.3424215630675,
"count": 63171,
"self": 1.2553738860458452,
"children": {
"env_step": {
"total": 1273.0521833519924,
"count": 63171,
"self": 1167.9691354759245,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.29401548007536,
"count": 63171,
"self": 4.272069541085784,
"children": {
"TorchPolicy.evaluate": {
"total": 100.02194593898957,
"count": 62572,
"self": 33.32375104005246,
"children": {
"TorchPolicy.sample_actions": {
"total": 66.69819489893712,
"count": 62572,
"self": 66.69819489893712
}
}
}
}
},
"workers": {
"total": 0.7890323959925354,
"count": 63171,
"self": 0.0,
"children": {
"worker_root": {
"total": 1927.576609894999,
"count": 63171,
"is_parallel": true,
"self": 856.6135787110325,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017454440001074545,
"count": 1,
"is_parallel": true,
"self": 0.000617446999740423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011279970003670314,
"count": 8,
"is_parallel": true,
"self": 0.0011279970003670314
}
}
},
"UnityEnvironment.step": {
"total": 0.04241541300007157,
"count": 1,
"is_parallel": true,
"self": 0.0004876110001532652,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041146399985336757,
"count": 1,
"is_parallel": true,
"self": 0.00041146399985336757
},
"communicator.exchange": {
"total": 0.03996539300010227,
"count": 1,
"is_parallel": true,
"self": 0.03996539300010227
},
"steps_from_proto": {
"total": 0.0015509449999626668,
"count": 1,
"is_parallel": true,
"self": 0.0004073750003499299,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011435699996127369,
"count": 8,
"is_parallel": true,
"self": 0.0011435699996127369
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1070.9630311839665,
"count": 63170,
"is_parallel": true,
"self": 27.930029526866747,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.987605330059523,
"count": 63170,
"is_parallel": true,
"self": 21.987605330059523
},
"communicator.exchange": {
"total": 922.0458660540389,
"count": 63170,
"is_parallel": true,
"self": 922.0458660540389
},
"steps_from_proto": {
"total": 98.99953027300126,
"count": 63170,
"is_parallel": true,
"self": 21.75017382489773,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.24935644810353,
"count": 505360,
"is_parallel": true,
"self": 77.24935644810353
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.0348643250293,
"count": 63171,
"self": 2.1817376840110683,
"children": {
"process_trajectory": {
"total": 141.18130671101608,
"count": 63171,
"self": 140.99395276901578,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18735394200029987,
"count": 2,
"self": 0.18735394200029987
}
}
},
"_update_policy": {
"total": 506.67181993000213,
"count": 433,
"self": 189.15585672501538,
"children": {
"TorchPPOOptimizer.update": {
"total": 317.51596320498675,
"count": 22893,
"self": 317.51596320498675
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1639999684120994e-06,
"count": 1,
"self": 1.1639999684120994e-06
},
"TrainerController._save_models": {
"total": 0.09280723299980309,
"count": 1,
"self": 0.0018257040001117275,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09098152899969136,
"count": 1,
"self": 0.09098152899969136
}
}
}
}
}
}
}