PyramidsRND_v1 / run_logs /timers.json
EmirhanExecute's picture
First Push
0f81692
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3662203252315521,
"min": 0.35252490639686584,
"max": 1.4821102619171143,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10963.171875,
"min": 10575.7470703125,
"max": 44961.296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.581268310546875,
"min": -0.06992906332015991,
"max": 0.6206590533256531,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.84878540039062,
"min": -16.922832489013672,
"max": 170.68124389648438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.029434269294142723,
"min": -0.029434269294142723,
"max": 0.6096308827400208,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.0944242477417,
"min": -8.0944242477417,
"max": 144.48251342773438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07414545548783194,
"min": 0.06402237467312565,
"max": 0.07414545548783194,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0380363768296472,
"min": 0.49986444515291195,
"max": 1.0927377134817193,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013442317180479654,
"min": 0.0024263131573948127,
"max": 0.016615065985423002,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18819244052671516,
"min": 0.029165011096455917,
"max": 0.23855689145007183,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.390983250657146e-06,
"min": 7.390983250657146e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010347376550920005,
"min": 0.00010347376550920005,
"max": 0.0037578970473676997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246362857142861,
"min": 0.10246362857142861,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344908000000005,
"min": 1.3886848,
"max": 2.6526323000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002561164942857143,
"min": 0.0002561164942857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003585630920000001,
"min": 0.003585630920000001,
"max": 0.12527796677,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011244603432714939,
"min": 0.010602102614939213,
"max": 0.6331111192703247,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1574244499206543,
"min": 0.14842943847179413,
"max": 4.4317779541015625,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 355.125,
"min": 309.421568627451,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28410.0,
"min": 15984.0,
"max": 33018.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4948299762792885,
"min": -1.0000000521540642,
"max": 1.6271746943051788,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 119.58639810234308,
"min": -28.753001734614372,
"max": 164.43679846078157,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4948299762792885,
"min": -1.0000000521540642,
"max": 1.6271746943051788,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 119.58639810234308,
"min": -28.753001734614372,
"max": 164.43679846078157,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0406228395191647,
"min": 0.0352157667700393,
"max": 13.469349486753345,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.249827161533176,
"min": 3.232574724650476,
"max": 215.5095917880535,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692617401",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692619753"
},
"total": 2352.457404783,
"count": 1,
"self": 0.47630618900075206,
"children": {
"run_training.setup": {
"total": 0.06715090199986662,
"count": 1,
"self": 0.06715090199986662
},
"TrainerController.start_learning": {
"total": 2351.9139476919995,
"count": 1,
"self": 1.5221912529486872,
"children": {
"TrainerController._reset_env": {
"total": 4.95491548300015,
"count": 1,
"self": 4.95491548300015
},
"TrainerController.advance": {
"total": 2345.3390180540514,
"count": 64092,
"self": 1.4225067220868368,
"children": {
"env_step": {
"total": 1693.9282755859235,
"count": 64092,
"self": 1582.8220088259372,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.24129824408374,
"count": 64092,
"self": 4.769488856039516,
"children": {
"TorchPolicy.evaluate": {
"total": 105.47180938804422,
"count": 62559,
"self": 105.47180938804422
}
}
},
"workers": {
"total": 0.8649685159025466,
"count": 64092,
"self": 0.0,
"children": {
"worker_root": {
"total": 2346.6364319910535,
"count": 64092,
"is_parallel": true,
"self": 881.423678519021,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026366130000496923,
"count": 1,
"is_parallel": true,
"self": 0.0007060830002956209,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019305299997540715,
"count": 8,
"is_parallel": true,
"self": 0.0019305299997540715
}
}
},
"UnityEnvironment.step": {
"total": 0.050637370999993436,
"count": 1,
"is_parallel": true,
"self": 0.0005476369997268193,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005072009998912108,
"count": 1,
"is_parallel": true,
"self": 0.0005072009998912108
},
"communicator.exchange": {
"total": 0.04774446000010357,
"count": 1,
"is_parallel": true,
"self": 0.04774446000010357
},
"steps_from_proto": {
"total": 0.0018380730002718337,
"count": 1,
"is_parallel": true,
"self": 0.00034648499968170654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014915880005901272,
"count": 8,
"is_parallel": true,
"self": 0.0014915880005901272
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1465.2127534720325,
"count": 64091,
"is_parallel": true,
"self": 34.342037267894284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.65537818404846,
"count": 64091,
"is_parallel": true,
"self": 22.65537818404846
},
"communicator.exchange": {
"total": 1301.3639940570392,
"count": 64091,
"is_parallel": true,
"self": 1301.3639940570392
},
"steps_from_proto": {
"total": 106.85134396305057,
"count": 64091,
"is_parallel": true,
"self": 21.160973099130388,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.69037086392018,
"count": 512728,
"is_parallel": true,
"self": 85.69037086392018
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 649.988235746041,
"count": 64092,
"self": 2.7474069830245753,
"children": {
"process_trajectory": {
"total": 111.35380878300975,
"count": 64092,
"self": 111.15067750301068,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2031312799990701,
"count": 2,
"self": 0.2031312799990701
}
}
},
"_update_policy": {
"total": 535.8870199800067,
"count": 457,
"self": 348.2779830689865,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.6090369110202,
"count": 22788,
"self": 187.6090369110202
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.139995770761743e-07,
"count": 1,
"self": 8.139995770761743e-07
},
"TrainerController._save_models": {
"total": 0.09782208799970249,
"count": 1,
"self": 0.0014795580000281916,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0963425299996743,
"count": 1,
"self": 0.0963425299996743
}
}
}
}
}
}
}