ppo-Pyramids / run_logs /timers.json
gioca91's picture
Second Push
b133ec7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.27999401092529297,
"min": 0.2584760785102844,
"max": 1.4691399335861206,
"count": 45
},
"Pyramids.Policy.Entropy.sum": {
"value": 8489.41796875,
"min": 7729.46826171875,
"max": 44567.828125,
"count": 45
},
"Pyramids.Step.mean": {
"value": 1349874.0,
"min": 29952.0,
"max": 1349874.0,
"count": 45
},
"Pyramids.Step.sum": {
"value": 1349874.0,
"min": 29952.0,
"max": 1349874.0,
"count": 45
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5843580961227417,
"min": -0.09230305254459381,
"max": 0.623251736164093,
"count": 45
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.2828369140625,
"min": -22.152732849121094,
"max": 177.0034942626953,
"count": 45
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03263590857386589,
"min": -0.03559710457921028,
"max": 0.24793276190757751,
"count": 45
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.007511138916016,
"min": -9.753606796264648,
"max": 58.76006317138672,
"count": 45
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07196356706637223,
"min": 0.06517991559196483,
"max": 0.07228699586507605,
"count": 45
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0074899389292113,
"min": 0.4962817789593995,
"max": 1.04574709889358,
"count": 45
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014598498705752371,
"min": 0.000496700446871436,
"max": 0.014598498705752371,
"count": 45
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2043789818805332,
"min": 0.006457105809328668,
"max": 0.20839173290490481,
"count": 45
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.3119403245942855e-05,
"min": 3.3119403245942855e-05,
"max": 0.00029676708679192377,
"count": 45
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0004636716454432,
"min": 0.0004636716454432,
"max": 0.003822684125771999,
"count": 45
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.11103977142857144,
"min": 0.11103977142857144,
"max": 0.19892236190476195,
"count": 45
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5545568,
"min": 1.3924565333333336,
"max": 2.674228,
"count": 45
},
"Pyramids.Policy.Beta.mean": {
"value": 0.001112873165714286,
"min": 0.001112873165714286,
"max": 0.009892343954285714,
"count": 45
},
"Pyramids.Policy.Beta.sum": {
"value": 0.015580224320000002,
"min": 0.015580224320000002,
"max": 0.1274353772,
"count": 45
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008267933502793312,
"min": 0.00725268479436636,
"max": 0.2745141386985779,
"count": 45
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11575107276439667,
"min": 0.10153758525848389,
"max": 1.92159903049469,
"count": 45
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 336.34090909090907,
"min": 315.21590909090907,
"max": 999.0,
"count": 45
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29598.0,
"min": 15984.0,
"max": 33308.0,
"count": 45
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.549988615157252,
"min": -1.0000000521540642,
"max": 1.6620522551238537,
"count": 45
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 136.39899813383818,
"min": -29.54780162125826,
"max": 146.26059845089912,
"count": 45
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.549988615157252,
"min": -1.0000000521540642,
"max": 1.6620522551238537,
"count": 45
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 136.39899813383818,
"min": -29.54780162125826,
"max": 146.26059845089912,
"count": 45
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027509241803355424,
"min": 0.0264230206602709,
"max": 5.217091434635222,
"count": 45
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4208132786952774,
"min": 2.246325553089264,
"max": 83.47346295416355,
"count": 45
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 45
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 45
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691750876",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691754662"
},
"total": 3786.0023300489997,
"count": 1,
"self": 1.0648708260000603,
"children": {
"run_training.setup": {
"total": 0.04659137900000587,
"count": 1,
"self": 0.04659137900000587
},
"TrainerController.start_learning": {
"total": 3784.890867844,
"count": 1,
"self": 2.9133006480246877,
"children": {
"TrainerController._reset_env": {
"total": 4.779982707000045,
"count": 1,
"self": 4.779982707000045
},
"TrainerController.advance": {
"total": 3776.907284288975,
"count": 87158,
"self": 2.778459381104767,
"children": {
"env_step": {
"total": 2721.368848492936,
"count": 87158,
"self": 2505.3714593169534,
"children": {
"SubprocessEnvManager._take_step": {
"total": 214.25981234506662,
"count": 87158,
"self": 8.566666551139974,
"children": {
"TorchPolicy.evaluate": {
"total": 205.69314579392665,
"count": 85406,
"self": 205.69314579392665
}
}
},
"workers": {
"total": 1.7375768309161685,
"count": 87157,
"self": 0.0,
"children": {
"worker_root": {
"total": 3774.935332657907,
"count": 87157,
"is_parallel": true,
"self": 1488.5201336329233,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002058512000076007,
"count": 1,
"is_parallel": true,
"self": 0.0006784310000966798,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013800809999793273,
"count": 8,
"is_parallel": true,
"self": 0.0013800809999793273
}
}
},
"UnityEnvironment.step": {
"total": 0.10743021899997984,
"count": 1,
"is_parallel": true,
"self": 0.0006598019999728422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005733520000603676,
"count": 1,
"is_parallel": true,
"self": 0.0005733520000603676
},
"communicator.exchange": {
"total": 0.10393732999989425,
"count": 1,
"is_parallel": true,
"self": 0.10393732999989425
},
"steps_from_proto": {
"total": 0.0022597350000523875,
"count": 1,
"is_parallel": true,
"self": 0.0004465740000796359,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018131609999727516,
"count": 8,
"is_parallel": true,
"self": 0.0018131609999727516
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2286.415199024984,
"count": 87156,
"is_parallel": true,
"self": 58.757214693991955,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 40.65425757292178,
"count": 87156,
"is_parallel": true,
"self": 40.65425757292178
},
"communicator.exchange": {
"total": 1995.1682066750204,
"count": 87156,
"is_parallel": true,
"self": 1995.1682066750204
},
"steps_from_proto": {
"total": 191.8355200830498,
"count": 87156,
"is_parallel": true,
"self": 39.23901168239752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 152.59650840065228,
"count": 697248,
"is_parallel": true,
"self": 152.59650840065228
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1052.759976414934,
"count": 87157,
"self": 5.074748490041884,
"children": {
"process_trajectory": {
"total": 190.53597549689243,
"count": 87157,
"self": 190.2910874918923,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24488800500012076,
"count": 2,
"self": 0.24488800500012076
}
}
},
"_update_policy": {
"total": 857.1492524279997,
"count": 623,
"self": 557.0176884439649,
"children": {
"TorchPPOOptimizer.update": {
"total": 300.13156398403487,
"count": 31134,
"self": 300.13156398403487
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5129999155760743e-06,
"count": 1,
"self": 1.5129999155760743e-06
},
"TrainerController._save_models": {
"total": 0.29029868700035877,
"count": 1,
"self": 0.00207498400050099,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2882237029998578,
"count": 1,
"self": 0.2882237029998578
}
}
}
}
}
}
}