ppo-Pyramids / run_logs /timers.json
duwi's picture
First Push
11d672d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.27456164360046387,
"min": 0.2346637099981308,
"max": 1.4432616233825684,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8223.669921875,
"min": 7088.72119140625,
"max": 43782.78515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989959.0,
"min": 29952.0,
"max": 989959.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989959.0,
"min": 29952.0,
"max": 989959.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03341314196586609,
"min": -0.0949224978685379,
"max": 0.049000248312950134,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 8.219633102416992,
"min": -22.78139877319336,
"max": 11.613059043884277,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.07474235445261002,
"min": 0.06047491356730461,
"max": 0.34305477142333984,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 18.386619567871094,
"min": 14.513978958129883,
"max": 82.67620086669922,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07038414100090797,
"min": 0.06357376825613725,
"max": 0.07260334071393244,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9853779740127115,
"min": 0.5082233849975271,
"max": 1.007853482342719,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004196856411519028,
"min": 0.00015104681027873414,
"max": 0.0065069984724155285,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.05875598976126639,
"min": 0.0016615149130660754,
"max": 0.05875598976126639,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.7106974298e-06,
"min": 7.7106974298e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001079497640172,
"min": 0.0001079497640172,
"max": 0.0035074280308573995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257019999999999,
"min": 0.10257019999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359828,
"min": 1.3886848,
"max": 2.5691426000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026676298000000013,
"min": 0.00026676298000000013,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037346817200000016,
"min": 0.0037346817200000016,
"max": 0.11693734574,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.06417929381132126,
"min": 0.060770995914936066,
"max": 0.5231438875198364,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.8985100984573364,
"min": 0.8507939577102661,
"max": 3.6620073318481445,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 815.3714285714286,
"min": 815.3714285714286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28538.0,
"min": 15984.0,
"max": 32796.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.13006861571754727,
"min": -1.0000000521540642,
"max": -0.13006861571754727,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -4.552401550114155,
"min": -30.999201625585556,
"max": -4.552401550114155,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.13006861571754727,
"min": -1.0000000521540642,
"max": -0.13006861571754727,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -4.552401550114155,
"min": -30.999201625585556,
"max": -4.552401550114155,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.529703208923872,
"min": 0.529703208923872,
"max": 10.622885194607079,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 18.53961231233552,
"min": 15.593072811840102,
"max": 169.96616311371326,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696518782",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696522089"
},
"total": 3306.8666176509996,
"count": 1,
"self": 0.5782094979995236,
"children": {
"run_training.setup": {
"total": 0.08869277600001624,
"count": 1,
"self": 0.08869277600001624
},
"TrainerController.start_learning": {
"total": 3306.199715377,
"count": 1,
"self": 2.380733610010793,
"children": {
"TrainerController._reset_env": {
"total": 2.1431991980000475,
"count": 1,
"self": 2.1431991980000475
},
"TrainerController.advance": {
"total": 3301.5657433599886,
"count": 63168,
"self": 2.264297309985068,
"children": {
"env_step": {
"total": 2111.384506741032,
"count": 63168,
"self": 1967.967775670939,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.95701035604384,
"count": 63168,
"self": 7.133578664976994,
"children": {
"TorchPolicy.evaluate": {
"total": 134.82343169106684,
"count": 62551,
"self": 134.82343169106684
}
}
},
"workers": {
"total": 1.4597207140491264,
"count": 63168,
"self": 0.0,
"children": {
"worker_root": {
"total": 3299.4390391000043,
"count": 63168,
"is_parallel": true,
"self": 1504.3459579060695,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003971734000060678,
"count": 1,
"is_parallel": true,
"self": 0.00106900100013263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029027329999280482,
"count": 8,
"is_parallel": true,
"self": 0.0029027329999280482
}
}
},
"UnityEnvironment.step": {
"total": 0.13179528000000573,
"count": 1,
"is_parallel": true,
"self": 0.0008020200002647471,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007340219999605324,
"count": 1,
"is_parallel": true,
"self": 0.0007340219999605324
},
"communicator.exchange": {
"total": 0.12692934999995487,
"count": 1,
"is_parallel": true,
"self": 0.12692934999995487
},
"steps_from_proto": {
"total": 0.003329887999825587,
"count": 1,
"is_parallel": true,
"self": 0.000439100999983566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002890786999842021,
"count": 8,
"is_parallel": true,
"self": 0.002890786999842021
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1795.0930811939347,
"count": 63167,
"is_parallel": true,
"self": 46.374895161032555,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.82302800595494,
"count": 63167,
"is_parallel": true,
"self": 25.82302800595494
},
"communicator.exchange": {
"total": 1582.0574128019434,
"count": 63167,
"is_parallel": true,
"self": 1582.0574128019434
},
"steps_from_proto": {
"total": 140.8377452250038,
"count": 63167,
"is_parallel": true,
"self": 29.51146327600304,
"children": {
"_process_rank_one_or_two_observation": {
"total": 111.32628194900076,
"count": 505336,
"is_parallel": true,
"self": 111.32628194900076
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1187.9169393089717,
"count": 63168,
"self": 4.50747085691728,
"children": {
"process_trajectory": {
"total": 149.77866815905054,
"count": 63168,
"self": 149.54671643805068,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2319517209998594,
"count": 2,
"self": 0.2319517209998594
}
}
},
"_update_policy": {
"total": 1033.6308002930039,
"count": 444,
"self": 441.6069040699738,
"children": {
"TorchPPOOptimizer.update": {
"total": 592.0238962230301,
"count": 22716,
"self": 592.0238962230301
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4229999578674324e-06,
"count": 1,
"self": 1.4229999578674324e-06
},
"TrainerController._save_models": {
"total": 0.11003778600024816,
"count": 1,
"self": 0.0017367870004818542,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10830099899976631,
"count": 1,
"self": 0.10830099899976631
}
}
}
}
}
}
}