ppo-Pyramids / run_logs /timers.json
leonard-pak's picture
Load model
cda5e67
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4699142575263977,
"min": 0.3891666531562805,
"max": 1.500190019607544,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14142.5390625,
"min": 11687.453125,
"max": 45509.765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989986.0,
"min": 29952.0,
"max": 989986.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989986.0,
"min": 29952.0,
"max": 989986.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.018810128793120384,
"min": -0.10927955061197281,
"max": -0.0012816793750971556,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.552051067352295,
"min": -26.33637237548828,
"max": -0.31401145458221436,
"count": 33
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.1656452715396881,
"min": 0.1590929478406906,
"max": 0.5021917223930359,
"count": 33
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 40.08615493774414,
"min": 38.341400146484375,
"max": 120.52601623535156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06635392671498255,
"min": 0.06497024932366871,
"max": 0.07246138155990259,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9289549740097558,
"min": 0.4817415838474047,
"max": 1.0739379732192758,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0031029837457519555,
"min": 0.00011244172555845392,
"max": 0.003513037227047043,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.043441772440527375,
"min": 0.001461742432259901,
"max": 0.0491825211786586,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.737375992335715e-06,
"min": 7.737375992335715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010832326389270001,
"min": 0.00010832326389270001,
"max": 0.0031441946519352,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257909285714285,
"min": 0.10257909285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361073,
"min": 1.3691136000000002,
"max": 2.3480648000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026765137642857157,
"min": 0.00026765137642857157,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037471192700000017,
"min": 0.0037471192700000017,
"max": 0.10483167351999999,
"count": 33
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.07584772152038348,
"min": 0.07543217037649205,
"max": 0.42585953099772406,
"count": 33
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 1.0618681012853688,
"min": 1.0560503852708887,
"max": 2.9810167169840684,
"count": 33
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.06941799885564495,
"min": 0.06183912462577046,
"max": 0.7221576714221455,
"count": 33
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.9718519839790294,
"min": 0.8657477447607864,
"max": 5.055103699955018,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 950.96875,
"min": 868.4411764705883,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30431.0,
"min": 15984.0,
"max": 32491.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.5138813045341522,
"min": -1.0000000521540642,
"max": -0.22170592581524567,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.44420174509287,
"min": -32.000001668930054,
"max": -7.538001477718353,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.5138813045341522,
"min": -1.0000000521540642,
"max": -0.22170592581524567,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.44420174509287,
"min": -32.000001668930054,
"max": -7.538001477718353,
"count": 33
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 1.5551242381334305,
"min": 1.442157154354979,
"max": 6.1382381450384855,
"count": 33
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 49.763975620269775,
"min": 44.36399610340595,
"max": 176.31472471356392,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694165513",
"python_version": "3.9.17 (main, Jul 5 2023, 20:47:11) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Leonard\\anaconda3\\envs\\ml-agents\\Scripts\\mlagents-learn .\\config\\ppo\\Pyramids.yaml --run-id=Pyramids Training --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1694167332"
},
"total": 1819.0755296999998,
"count": 1,
"self": 0.010550699999839708,
"children": {
"run_training.setup": {
"total": 0.0960595999999998,
"count": 1,
"self": 0.0960595999999998
},
"TrainerController.start_learning": {
"total": 1818.9689194,
"count": 1,
"self": 1.1975100000179282,
"children": {
"TrainerController._reset_env": {
"total": 7.0293030000000005,
"count": 1,
"self": 7.0293030000000005
},
"TrainerController.advance": {
"total": 1810.639066099982,
"count": 63055,
"self": 1.1738500999983899,
"children": {
"env_step": {
"total": 959.0570887999919,
"count": 63055,
"self": 785.9034175999773,
"children": {
"SubprocessEnvManager._take_step": {
"total": 172.3645049000137,
"count": 63055,
"self": 3.497180999999017,
"children": {
"TorchPolicy.evaluate": {
"total": 168.8673239000147,
"count": 62564,
"self": 168.8673239000147
}
}
},
"workers": {
"total": 0.7891663000008791,
"count": 63055,
"self": 0.0,
"children": {
"worker_root": {
"total": 1811.6397657000075,
"count": 63055,
"is_parallel": true,
"self": 1111.0342455000198,
"children": {
"steps_from_proto": {
"total": 0.0010573000000002608,
"count": 1,
"is_parallel": true,
"self": 0.00021849999999989933,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008388000000003615,
"count": 8,
"is_parallel": true,
"self": 0.0008388000000003615
}
}
},
"UnityEnvironment.step": {
"total": 700.6044628999878,
"count": 63055,
"is_parallel": true,
"self": 19.215686699962248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.143204700019632,
"count": 63055,
"is_parallel": true,
"self": 13.143204700019632
},
"communicator.exchange": {
"total": 615.1939641000101,
"count": 63055,
"is_parallel": true,
"self": 615.1939641000101
},
"steps_from_proto": {
"total": 53.05160739999575,
"count": 63055,
"is_parallel": true,
"self": 12.587725099981405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.46388230001435,
"count": 504440,
"is_parallel": true,
"self": 40.46388230001435
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 850.4081271999918,
"count": 63055,
"self": 1.8936161999887418,
"children": {
"process_trajectory": {
"total": 100.93181030000363,
"count": 63055,
"self": 100.69354100000345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23826930000018365,
"count": 2,
"self": 0.23826930000018365
}
}
},
"_update_policy": {
"total": 747.5827006999994,
"count": 430,
"self": 454.3094221999869,
"children": {
"TorchPPOOptimizer.update": {
"total": 293.27327850001245,
"count": 22770,
"self": 293.27327850001245
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999999525054591e-07,
"count": 1,
"self": 7.999999525054591e-07
},
"TrainerController._save_models": {
"total": 0.1030395000000226,
"count": 1,
"self": 0.007437600000002931,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09560190000001967,
"count": 1,
"self": 0.09560190000001967
}
}
}
}
}
}
}