testpyramidsrnd / run_logs /timers.json
btsas's picture
First Pyramids
9269e9a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8233450651168823,
"min": 0.7984223365783691,
"max": 1.3894301652908325,
"count": 20
},
"Pyramids.Policy.Entropy.sum": {
"value": 24766.21875,
"min": 23709.94921875,
"max": 42149.75390625,
"count": 20
},
"Pyramids.Step.mean": {
"value": 599937.0,
"min": 29987.0,
"max": 599937.0,
"count": 20
},
"Pyramids.Step.sum": {
"value": 599937.0,
"min": 29987.0,
"max": 599937.0,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.03990941867232323,
"min": -0.2289409339427948,
"max": 0.06493476778268814,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.737897872924805,
"min": -54.48794174194336,
"max": 15.973953247070312,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016215212643146515,
"min": 0.016164833679795265,
"max": 0.5946663022041321,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.9565117359161377,
"min": 3.9565117359161377,
"max": 141.53057861328125,
"count": 20
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06947956437641868,
"min": 0.0666489455281856,
"max": 0.07340738674391135,
"count": 20
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9727139012698615,
"min": 0.6074350104350869,
"max": 1.0201817682936247,
"count": 20
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.005103617892065649,
"min": 0.00011802643508214116,
"max": 0.009582505592833681,
"count": 20
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.07145065048891909,
"min": 0.0010622379157392704,
"max": 0.08624255033550313,
"count": 20
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.672918870964286e-06,
"min": 7.672918870964286e-06,
"max": 0.00029215083594972223,
"count": 20
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001074208641935,
"min": 0.0001074208641935,
"max": 0.0027771610742796664,
"count": 20
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255760714285715,
"min": 0.10255760714285715,
"max": 0.1973836111111111,
"count": 20
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358065000000002,
"min": 1.4358065000000002,
"max": 1.9963236666666668,
"count": 20
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002655049535714286,
"min": 0.0002655049535714286,
"max": 0.009738622750000002,
"count": 20
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037170693500000003,
"min": 0.0037170693500000003,
"max": 0.0925794613,
"count": 20
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015156647190451622,
"min": 0.015006942674517632,
"max": 0.4557708203792572,
"count": 20
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2121930569410324,
"min": 0.21009719371795654,
"max": 4.101937294006348,
"count": 20
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 856.5277777777778,
"min": 751.6285714285714,
"max": 999.0,
"count": 20
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30835.0,
"min": 16529.0,
"max": 32867.0,
"count": 20
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.23604576119354792,
"min": -1.0000000521540642,
"max": 0.21956567402396884,
"count": 20
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -8.261601641774178,
"min": -32.000001668930054,
"max": 7.684798590838909,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.23604576119354792,
"min": -1.0000000521540642,
"max": 0.21956567402396884,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -8.261601641774178,
"min": -32.000001668930054,
"max": 7.684798590838909,
"count": 20
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.13539667784103326,
"min": 0.11727566726372711,
"max": 8.225774183869362,
"count": 20
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.738883724436164,
"min": 4.104648354230449,
"max": 148.0639353096485,
"count": 20
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1659852097",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1659853187"
},
"total": 1090.753050094,
"count": 1,
"self": 0.4787579189999178,
"children": {
"run_training.setup": {
"total": 0.04378409000003103,
"count": 1,
"self": 0.04378409000003103
},
"TrainerController.start_learning": {
"total": 1090.230508085,
"count": 1,
"self": 0.7525848999819118,
"children": {
"TrainerController._reset_env": {
"total": 9.69543510599999,
"count": 1,
"self": 9.69543510599999
},
"TrainerController.advance": {
"total": 1079.6859243150184,
"count": 37821,
"self": 0.8241268420299548,
"children": {
"env_step": {
"total": 670.4507810920011,
"count": 37821,
"self": 609.7955857590207,
"children": {
"SubprocessEnvManager._take_step": {
"total": 60.263676439977075,
"count": 37821,
"self": 2.62114733296994,
"children": {
"TorchPolicy.evaluate": {
"total": 57.642529107007135,
"count": 37554,
"self": 19.93510012000837,
"children": {
"TorchPolicy.sample_actions": {
"total": 37.707428986998764,
"count": 37554,
"self": 37.707428986998764
}
}
}
}
},
"workers": {
"total": 0.3915188930032514,
"count": 37821,
"self": 0.0,
"children": {
"worker_root": {
"total": 1087.9349839240058,
"count": 37821,
"is_parallel": true,
"self": 535.6470072710106,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005467732999989039,
"count": 1,
"is_parallel": true,
"self": 0.004173098000023856,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001294634999965183,
"count": 8,
"is_parallel": true,
"self": 0.001294634999965183
}
}
},
"UnityEnvironment.step": {
"total": 0.04794721700000082,
"count": 1,
"is_parallel": true,
"self": 0.0005048500000270906,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004711689999794544,
"count": 1,
"is_parallel": true,
"self": 0.0004711689999794544
},
"communicator.exchange": {
"total": 0.04523462899999231,
"count": 1,
"is_parallel": true,
"self": 0.04523462899999231
},
"steps_from_proto": {
"total": 0.001736569000001964,
"count": 1,
"is_parallel": true,
"self": 0.00043514500003993817,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013014239999620258,
"count": 8,
"is_parallel": true,
"self": 0.0013014239999620258
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 552.2879766529952,
"count": 37820,
"is_parallel": true,
"self": 16.168420772010677,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.349130265982978,
"count": 37820,
"is_parallel": true,
"self": 13.349130265982978
},
"communicator.exchange": {
"total": 469.0925578330078,
"count": 37820,
"is_parallel": true,
"self": 469.0925578330078
},
"steps_from_proto": {
"total": 53.67786778199371,
"count": 37820,
"is_parallel": true,
"self": 13.07753974403198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.60032803796173,
"count": 302560,
"is_parallel": true,
"self": 40.60032803796173
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 408.41101638098735,
"count": 37821,
"self": 1.2168836389824946,
"children": {
"process_trajectory": {
"total": 91.3764676890047,
"count": 37821,
"self": 91.27221093400465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10425675500005127,
"count": 1,
"self": 0.10425675500005127
}
}
},
"_update_policy": {
"total": 315.81766505300016,
"count": 241,
"self": 123.1898380090172,
"children": {
"TorchPPOOptimizer.update": {
"total": 192.62782704398296,
"count": 13746,
"self": 192.62782704398296
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4269999155658297e-06,
"count": 1,
"self": 1.4269999155658297e-06
},
"TrainerController._save_models": {
"total": 0.09656233700002304,
"count": 1,
"self": 0.00172443800011024,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0948378989999128,
"count": 1,
"self": 0.0948378989999128
}
}
}
}
}
}
}