ppo-Pyramids / run_logs /timers.json
MaitreHibou's picture
first commit
cc6e769
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.26223844289779663,
"min": 0.26223844289779663,
"max": 1.5016237497329712,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7959.46142578125,
"min": 7959.46142578125,
"max": 45553.2578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29928.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29928.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7297298312187195,
"min": -0.11346393823623657,
"max": 0.7665205001831055,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 212.35137939453125,
"min": -26.890953063964844,
"max": 226.8900604248047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01334985252469778,
"min": 0.01334985252469778,
"max": 0.3806348145008087,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.8848071098327637,
"min": 3.8848071098327637,
"max": 90.21044921875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0669987091996951,
"min": 0.0664428077372981,
"max": 0.07716278828097388,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9379819287957313,
"min": 0.5401395179668171,
"max": 1.0918941104027908,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014252618569853005,
"min": 0.00025940326213510974,
"max": 0.01684501042884823,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19953665997794207,
"min": 0.0025940326213510973,
"max": 0.2358301460038752,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.2442347511549997e-05,
"min": 1.2442347511549997e-05,
"max": 0.0004919177159021714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00017419286516169995,
"min": 0.00017419286516169995,
"max": 0.005425921314815799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248845,
"min": 0.10248845,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348383,
"min": 1.3886848,
"max": 2.4851842000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025859615499999997,
"min": 0.00025859615499999997,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036203461699999993,
"min": 0.0036203461699999993,
"max": 0.10854990158000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011656478978693485,
"min": 0.011656478978693485,
"max": 0.4483734667301178,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16319070756435394,
"min": 0.16319070756435394,
"max": 3.1386141777038574,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 257.02564102564105,
"min": 251.8780487804878,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30072.0,
"min": 16855.0,
"max": 32810.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7245879145017986,
"min": -0.9999355356539449,
"max": 1.7368695414584616,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 200.05219808220863,
"min": -31.997201651334763,
"max": 211.01799830794334,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7245879145017986,
"min": -0.9999355356539449,
"max": 1.7368695414584616,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 200.05219808220863,
"min": -31.997201651334763,
"max": 211.01799830794334,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031132964077877315,
"min": 0.031131029292234557,
"max": 9.531955739154535,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6114238330337685,
"min": 3.4829676072113216,
"max": 162.0432475656271,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689089521",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689092241"
},
"total": 2720.112885132,
"count": 1,
"self": 0.576373902999876,
"children": {
"run_training.setup": {
"total": 0.03923694899998509,
"count": 1,
"self": 0.03923694899998509
},
"TrainerController.start_learning": {
"total": 2719.49727428,
"count": 1,
"self": 1.9650025169476066,
"children": {
"TrainerController._reset_env": {
"total": 4.513965122999991,
"count": 1,
"self": 4.513965122999991
},
"TrainerController.advance": {
"total": 2712.859167492052,
"count": 64298,
"self": 2.1395098960106225,
"children": {
"env_step": {
"total": 1945.0356857799825,
"count": 64298,
"self": 1776.7646584479621,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.10161059300117,
"count": 64298,
"self": 6.151154952996649,
"children": {
"TorchPolicy.evaluate": {
"total": 160.95045564000452,
"count": 62567,
"self": 160.95045564000452
}
}
},
"workers": {
"total": 1.1694167390192547,
"count": 64298,
"self": 0.0,
"children": {
"worker_root": {
"total": 2712.482012896996,
"count": 64298,
"is_parallel": true,
"self": 1084.8261057429881,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005266584000025887,
"count": 1,
"is_parallel": true,
"self": 0.003891221999992922,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013753620000329647,
"count": 8,
"is_parallel": true,
"self": 0.0013753620000329647
}
}
},
"UnityEnvironment.step": {
"total": 0.053036889999987125,
"count": 1,
"is_parallel": true,
"self": 0.0005849209999837512,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005356819999633444,
"count": 1,
"is_parallel": true,
"self": 0.0005356819999633444
},
"communicator.exchange": {
"total": 0.04966543000000456,
"count": 1,
"is_parallel": true,
"self": 0.04966543000000456
},
"steps_from_proto": {
"total": 0.0022508570000354666,
"count": 1,
"is_parallel": true,
"self": 0.0004485510000904469,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018023059999450197,
"count": 8,
"is_parallel": true,
"self": 0.0018023059999450197
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1627.6559071540078,
"count": 64297,
"is_parallel": true,
"self": 39.7200046249975,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.677611193987218,
"count": 64297,
"is_parallel": true,
"self": 27.677611193987218
},
"communicator.exchange": {
"total": 1429.4817182550298,
"count": 64297,
"is_parallel": true,
"self": 1429.4817182550298
},
"steps_from_proto": {
"total": 130.7765730799931,
"count": 64297,
"is_parallel": true,
"self": 26.47218245983322,
"children": {
"_process_rank_one_or_two_observation": {
"total": 104.3043906201599,
"count": 514376,
"is_parallel": true,
"self": 104.3043906201599
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 765.6839718160592,
"count": 64298,
"self": 3.553350266059283,
"children": {
"process_trajectory": {
"total": 155.50202164499655,
"count": 64298,
"self": 155.17933532099636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3226863240001876,
"count": 2,
"self": 0.3226863240001876
}
}
},
"_update_policy": {
"total": 606.6285999050033,
"count": 447,
"self": 383.05958918798564,
"children": {
"TorchPPOOptimizer.update": {
"total": 223.56901071701765,
"count": 22815,
"self": 223.56901071701765
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1940001058974303e-06,
"count": 1,
"self": 1.1940001058974303e-06
},
"TrainerController._save_models": {
"total": 0.15913795400001618,
"count": 1,
"self": 0.0016208260003622854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1575171279996539,
"count": 1,
"self": 0.1575171279996539
}
}
}
}
}
}
}