ppo-pyramids_3 / run_logs /timers.json
daripaez's picture
First training
0d94849
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21000109612941742,
"min": 0.20282530784606934,
"max": 1.4987908601760864,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 6464.673828125,
"min": 5945.21533203125,
"max": 47961.30859375,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499937.0,
"min": 29965.0,
"max": 1499937.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499937.0,
"min": 29965.0,
"max": 1499937.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7420340180397034,
"min": -0.17377686500549316,
"max": 0.7843450903892517,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 123.91968536376953,
"min": -20.85322380065918,
"max": 139.03561401367188,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012078857980668545,
"min": -0.019199013710021973,
"max": 0.2037448137998581,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.017169237136841,
"min": -3.0718421936035156,
"max": 24.24563217163086,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06815258119584171,
"min": 0.06520804375645192,
"max": 0.07585374099660049,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9541361367417839,
"min": 0.4942462240869483,
"max": 1.0322288755132296,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015690698891164635,
"min": 0.0005699708628393532,
"max": 0.01702291942171391,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2196697844763049,
"min": 0.0062696794912328845,
"max": 0.23832087190399476,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 4.887884736728569e-06,
"min": 4.887884736728569e-06,
"max": 0.0004946118106014476,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 6.843038631419997e-05,
"min": 6.843038631419997e-05,
"max": 0.0056679849330697335,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10097755714285717,
"min": 0.10097755714285717,
"max": 0.19892236190476195,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4136858000000003,
"min": 1.3367888,
"max": 2.5335969333333335,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00010765795857142852,
"min": 0.00010765795857142852,
"max": 0.009892343954285714,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0015072114199999992,
"min": 0.0015072114199999992,
"max": 0.11338633363999998,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008972442708909512,
"min": 0.008972442708909512,
"max": 0.3135761618614197,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.125614196062088,
"min": 0.125614196062088,
"max": 2.195033073425293,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 241.43650793650792,
"min": 221.6390977443609,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30421.0,
"min": 15984.0,
"max": 32212.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7268110842458786,
"min": -0.9999833926558495,
"max": 1.7783608810794085,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 217.5781966149807,
"min": -30.991601705551147,
"max": 236.52199718356133,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7268110842458786,
"min": -0.9999833926558495,
"max": 1.7783608810794085,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 217.5781966149807,
"min": -30.991601705551147,
"max": 236.52199718356133,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.022587337922376473,
"min": 0.02104095551924941,
"max": 4.860745341206591,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8460045782194356,
"min": 2.7697720539363218,
"max": 116.65788818895817,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674231325",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_3 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674234826"
},
"total": 3501.122817852,
"count": 1,
"self": 0.5005281789999572,
"children": {
"run_training.setup": {
"total": 0.10767898599999626,
"count": 1,
"self": 0.10767898599999626
},
"TrainerController.start_learning": {
"total": 3500.514610687,
"count": 1,
"self": 2.2532774779651845,
"children": {
"TrainerController._reset_env": {
"total": 10.534003471999995,
"count": 1,
"self": 10.534003471999995
},
"TrainerController.advance": {
"total": 3487.6434426440346,
"count": 97001,
"self": 2.3407994440258335,
"children": {
"env_step": {
"total": 2485.6283189029996,
"count": 97001,
"self": 2308.430676666088,
"children": {
"SubprocessEnvManager._take_step": {
"total": 175.7803666209516,
"count": 97001,
"self": 7.038472659936815,
"children": {
"TorchPolicy.evaluate": {
"total": 168.7418939610148,
"count": 93882,
"self": 56.8360830440385,
"children": {
"TorchPolicy.sample_actions": {
"total": 111.9058109169763,
"count": 93882,
"self": 111.9058109169763
}
}
}
}
},
"workers": {
"total": 1.4172756159600794,
"count": 97001,
"self": 0.0,
"children": {
"worker_root": {
"total": 3493.2578676929543,
"count": 97001,
"is_parallel": true,
"self": 1353.4987849779236,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005190994000031424,
"count": 1,
"is_parallel": true,
"self": 0.0028228090001789496,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002368184999852474,
"count": 8,
"is_parallel": true,
"self": 0.002368184999852474
}
}
},
"UnityEnvironment.step": {
"total": 0.0638987070000212,
"count": 1,
"is_parallel": true,
"self": 0.0005594220000375572,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000514979999991283,
"count": 1,
"is_parallel": true,
"self": 0.000514979999991283
},
"communicator.exchange": {
"total": 0.0599889229999917,
"count": 1,
"is_parallel": true,
"self": 0.0599889229999917
},
"steps_from_proto": {
"total": 0.0028353820000006635,
"count": 1,
"is_parallel": true,
"self": 0.0015007220000597954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013346599999408681,
"count": 8,
"is_parallel": true,
"self": 0.0013346599999408681
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2139.7590827150307,
"count": 97000,
"is_parallel": true,
"self": 45.53715751911068,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 35.538468998951544,
"count": 97000,
"is_parallel": true,
"self": 35.538468998951544
},
"communicator.exchange": {
"total": 1913.2542705909866,
"count": 97000,
"is_parallel": true,
"self": 1913.2542705909866
},
"steps_from_proto": {
"total": 145.42918560598156,
"count": 97000,
"is_parallel": true,
"self": 35.18899929605499,
"children": {
"_process_rank_one_or_two_observation": {
"total": 110.24018630992657,
"count": 776000,
"is_parallel": true,
"self": 110.24018630992657
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 999.674324297009,
"count": 97001,
"self": 4.681280775965433,
"children": {
"process_trajectory": {
"total": 203.42490704703954,
"count": 97001,
"self": 202.97044644403962,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45446060299991586,
"count": 5,
"self": 0.45446060299991586
}
}
},
"_update_policy": {
"total": 791.568136474004,
"count": 648,
"self": 299.4080673410608,
"children": {
"TorchPPOOptimizer.update": {
"total": 492.16006913294314,
"count": 34269,
"self": 492.16006913294314
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.769999683659989e-07,
"count": 1,
"self": 7.769999683659989e-07
},
"TrainerController._save_models": {
"total": 0.08388631600018925,
"count": 1,
"self": 0.0013932040005784074,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08249311199961085,
"count": 1,
"self": 0.08249311199961085
}
}
}
}
}
}
}