pyramidsrnd / run_logs /timers.json
cverluise's picture
First Pyramids
9275f65
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8341489434242249,
"min": 0.8341489434242249,
"max": 1.437943458557129,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 25024.46875,
"min": 25024.46875,
"max": 43621.453125,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479915.0,
"min": 29952.0,
"max": 479915.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479915.0,
"min": 29952.0,
"max": 479915.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.09666759520769119,
"min": -0.08215032517910004,
"max": 0.25575530529022217,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 24.07023048400879,
"min": -19.71607780456543,
"max": 60.614009857177734,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025496231392025948,
"min": 0.02393612451851368,
"max": 0.438051700592041,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.348561763763428,
"min": 5.912222862243652,
"max": 103.81825256347656,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06605156904760832,
"min": 0.06594023411348938,
"max": 0.07174256868408216,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9247219666665165,
"min": 0.48902487655417143,
"max": 1.049707814323483,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007429880501692095,
"min": 0.0005903526648019358,
"max": 0.007429880501692095,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10401832702368932,
"min": 0.006493879312821293,
"max": 0.10984065934826502,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0946350160771434e-05,
"min": 2.0946350160771434e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0002932489022508001,
"min": 0.0002932489022508001,
"max": 0.0028505426498192,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10698208571428572,
"min": 0.10698208571428572,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4977492000000001,
"min": 1.3773696000000002,
"max": 2.1708157999999997,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007075103628571432,
"min": 0.0007075103628571432,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009905145080000004,
"min": 0.009905145080000004,
"max": 0.09504306191999999,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.021310335025191307,
"min": 0.021310335025191307,
"max": 0.399213045835495,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2983447015285492,
"min": 0.2983447015285492,
"max": 2.7944912910461426,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 740.9285714285714,
"min": 740.9285714285714,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31119.0,
"min": 15984.0,
"max": 32536.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.2587809179510389,
"min": -1.0000000521540642,
"max": 0.28630948598895756,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 10.868798553943634,
"min": -30.53460170328617,
"max": 12.024998411536217,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.2587809179510389,
"min": -1.0000000521540642,
"max": 0.28630948598895756,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 10.868798553943634,
"min": -30.53460170328617,
"max": 12.024998411536217,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.16385969481918783,
"min": 0.16385969481918783,
"max": 7.7894810270518064,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.882107182405889,
"min": 6.706708388694096,
"max": 124.6316964328289,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671645980",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671646938"
},
"total": 957.7829837869999,
"count": 1,
"self": 0.39308084499998586,
"children": {
"run_training.setup": {
"total": 0.10263272899987896,
"count": 1,
"self": 0.10263272899987896
},
"TrainerController.start_learning": {
"total": 957.287270213,
"count": 1,
"self": 0.5625604209719768,
"children": {
"TrainerController._reset_env": {
"total": 7.173057020999977,
"count": 1,
"self": 7.173057020999977
},
"TrainerController.advance": {
"total": 949.458850721028,
"count": 31602,
"self": 0.6291353279768828,
"children": {
"env_step": {
"total": 622.0962961320436,
"count": 31602,
"self": 570.3855794150318,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.34949984903437,
"count": 31602,
"self": 2.1962398760676933,
"children": {
"TorchPolicy.evaluate": {
"total": 49.15325997296668,
"count": 31306,
"self": 16.600076438946417,
"children": {
"TorchPolicy.sample_actions": {
"total": 32.55318353402026,
"count": 31306,
"self": 32.55318353402026
}
}
}
}
},
"workers": {
"total": 0.3612168679774186,
"count": 31602,
"self": 0.0,
"children": {
"worker_root": {
"total": 955.1745560310005,
"count": 31602,
"is_parallel": true,
"self": 433.83427060497934,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005371767000042382,
"count": 1,
"is_parallel": true,
"self": 0.0031480570003168395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022237099997255427,
"count": 8,
"is_parallel": true,
"self": 0.0022237099997255427
}
}
},
"UnityEnvironment.step": {
"total": 0.049859716000128174,
"count": 1,
"is_parallel": true,
"self": 0.0005234030002156942,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004931600001327752,
"count": 1,
"is_parallel": true,
"self": 0.0004931600001327752
},
"communicator.exchange": {
"total": 0.04724041499980558,
"count": 1,
"is_parallel": true,
"self": 0.04724041499980558
},
"steps_from_proto": {
"total": 0.0016027379999741243,
"count": 1,
"is_parallel": true,
"self": 0.00043804099959743326,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001164697000376691,
"count": 8,
"is_parallel": true,
"self": 0.001164697000376691
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 521.3402854260212,
"count": 31601,
"is_parallel": true,
"self": 13.810498772056462,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.13946816904604,
"count": 31601,
"is_parallel": true,
"self": 12.13946816904604
},
"communicator.exchange": {
"total": 447.251706652964,
"count": 31601,
"is_parallel": true,
"self": 447.251706652964
},
"steps_from_proto": {
"total": 48.13861183195468,
"count": 31601,
"is_parallel": true,
"self": 11.131099037987951,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.007512793966725,
"count": 252808,
"is_parallel": true,
"self": 37.007512793966725
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 326.7334192610076,
"count": 31602,
"self": 0.9996612920335792,
"children": {
"process_trajectory": {
"total": 72.72581856197553,
"count": 31602,
"self": 72.62151894397562,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10429961799991361,
"count": 1,
"self": 0.10429961799991361
}
}
},
"_update_policy": {
"total": 253.0079394069985,
"count": 214,
"self": 97.37822288999178,
"children": {
"TorchPPOOptimizer.update": {
"total": 155.62971651700673,
"count": 11454,
"self": 155.62971651700673
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0670000847312622e-06,
"count": 1,
"self": 1.0670000847312622e-06
},
"TrainerController._save_models": {
"total": 0.09280098299996098,
"count": 1,
"self": 0.0013448259996948764,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0914561570002661,
"count": 1,
"self": 0.0914561570002661
}
}
}
}
}
}
}