ppo-Pyramids / run_logs /timers.json
pabloac31's picture
Pyramids Training
daab459
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3946812152862549,
"min": 0.3940752148628235,
"max": 1.4105116128921509,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 11821.4921875,
"min": 11821.4921875,
"max": 42789.28125,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499966.0,
"min": 29911.0,
"max": 1499966.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499966.0,
"min": 29911.0,
"max": 1499966.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5393136739730835,
"min": -0.09371119737625122,
"max": 0.6017904877662659,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 148.31126403808594,
"min": -22.58439826965332,
"max": 166.7568817138672,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.001714419457130134,
"min": -0.016314681619405746,
"max": 0.45678767561912537,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.4714653491973877,
"min": -4.339705467224121,
"max": 108.25868225097656,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06489156403281911,
"min": 0.06399997184468832,
"max": 0.0737073020047083,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9084818964594674,
"min": 0.5159511140329581,
"max": 1.0552889572960944,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014863416312355516,
"min": 0.0014139563976802295,
"max": 0.015216988088685044,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2080878283729772,
"min": 0.019795389567523212,
"max": 0.21303783324159062,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.1061561075047606e-06,
"min": 3.1061561075047606e-06,
"max": 0.0002968231153446761,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.348618550506665e-05,
"min": 4.348618550506665e-05,
"max": 0.003738108953963733,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10103535238095238,
"min": 0.10103535238095238,
"max": 0.19894103809523814,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4144949333333332,
"min": 1.392587266666667,
"max": 2.646036266666667,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00011343170285714284,
"min": 0.00011343170285714284,
"max": 0.009894209705714285,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0015880438399999998,
"min": 0.0015880438399999998,
"max": 0.12461902304,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00824776478111744,
"min": 0.007796644698828459,
"max": 0.46392565965652466,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11546871066093445,
"min": 0.10915302485227585,
"max": 3.2474796772003174,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 342.144578313253,
"min": 323.3695652173913,
"max": 981.8620689655172,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28398.0,
"min": 16582.0,
"max": 33888.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.561426482556096,
"min": -0.9216849002422709,
"max": 1.6185158922929654,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 129.59839805215597,
"min": -30.415601707994938,
"max": 146.24739844352007,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.561426482556096,
"min": -0.9216849002422709,
"max": 1.6185158922929654,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 129.59839805215597,
"min": -30.415601707994938,
"max": 146.24739844352007,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029171536696221995,
"min": 0.026742858160540218,
"max": 8.885381640756831,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4212375457864255,
"min": 2.286547838244587,
"max": 151.05148789286613,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675870376",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675873712"
},
"total": 3335.8096577150004,
"count": 1,
"self": 0.8834582650006269,
"children": {
"run_training.setup": {
"total": 0.10842068500005553,
"count": 1,
"self": 0.10842068500005553
},
"TrainerController.start_learning": {
"total": 3334.8177787649997,
"count": 1,
"self": 2.170482805042411,
"children": {
"TrainerController._reset_env": {
"total": 7.130858233999788,
"count": 1,
"self": 7.130858233999788
},
"TrainerController.advance": {
"total": 3325.3794607669574,
"count": 95973,
"self": 2.1618146659002377,
"children": {
"env_step": {
"total": 2214.77028632192,
"count": 95973,
"self": 2041.3882573948258,
"children": {
"SubprocessEnvManager._take_step": {
"total": 172.02867989911692,
"count": 95973,
"self": 6.848380181962057,
"children": {
"TorchPolicy.evaluate": {
"total": 165.18029971715487,
"count": 93807,
"self": 54.853089362228275,
"children": {
"TorchPolicy.sample_actions": {
"total": 110.32721035492659,
"count": 93807,
"self": 110.32721035492659
}
}
}
}
},
"workers": {
"total": 1.3533490279774014,
"count": 95973,
"self": 0.0,
"children": {
"worker_root": {
"total": 3327.1302601279704,
"count": 95973,
"is_parallel": true,
"self": 1458.7129747580739,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017313180001110595,
"count": 1,
"is_parallel": true,
"self": 0.0006357609995575331,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010955570005535264,
"count": 8,
"is_parallel": true,
"self": 0.0010955570005535264
}
}
},
"UnityEnvironment.step": {
"total": 0.0473355479998645,
"count": 1,
"is_parallel": true,
"self": 0.0005184159999771509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046341000006577815,
"count": 1,
"is_parallel": true,
"self": 0.00046341000006577815
},
"communicator.exchange": {
"total": 0.04473698499987222,
"count": 1,
"is_parallel": true,
"self": 0.04473698499987222
},
"steps_from_proto": {
"total": 0.001616736999949353,
"count": 1,
"is_parallel": true,
"self": 0.00042068100037795375,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011960559995713993,
"count": 8,
"is_parallel": true,
"self": 0.0011960559995713993
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1868.4172853698965,
"count": 95972,
"is_parallel": true,
"self": 46.66527073093812,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.08589690599456,
"count": 95972,
"is_parallel": true,
"self": 34.08589690599456
},
"communicator.exchange": {
"total": 1635.5554513140241,
"count": 95972,
"is_parallel": true,
"self": 1635.5554513140241
},
"steps_from_proto": {
"total": 152.11066641893967,
"count": 95972,
"is_parallel": true,
"self": 33.899499352441126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 118.21116706649855,
"count": 767776,
"is_parallel": true,
"self": 118.21116706649855
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1108.447359779137,
"count": 95973,
"self": 4.508595827169302,
"children": {
"process_trajectory": {
"total": 239.877084557972,
"count": 95973,
"self": 239.56419882397222,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31288573399979214,
"count": 3,
"self": 0.31288573399979214
}
}
},
"_update_policy": {
"total": 864.0616793939957,
"count": 690,
"self": 339.33986132902737,
"children": {
"TorchPPOOptimizer.update": {
"total": 524.7218180649684,
"count": 34131,
"self": 524.7218180649684
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5169998732744716e-06,
"count": 1,
"self": 1.5169998732744716e-06
},
"TrainerController._save_models": {
"total": 0.13697544200022094,
"count": 1,
"self": 0.0019302870005049044,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13504515499971603,
"count": 1,
"self": 0.13504515499971603
}
}
}
}
}
}
}