Ziyu Zhao
First Push
eb967e5
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3739420175552368,
"min": 0.3739420175552368,
"max": 1.495103120803833,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11182.3623046875,
"min": 11182.3623046875,
"max": 45355.44921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989912.0,
"min": 29952.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989912.0,
"min": 29952.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5345433354377747,
"min": -0.11415775120258331,
"max": 0.5934386849403381,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.39578247070312,
"min": -27.626174926757812,
"max": 165.56939697265625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0681004673242569,
"min": 0.00030173620325513184,
"max": 0.2678124010562897,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 18.523326873779297,
"min": 0.08327919244766235,
"max": 64.27497863769531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07103471165070968,
"min": 0.06625999546252258,
"max": 0.07335674096199199,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0655206747606452,
"min": 0.496533855155576,
"max": 1.075208651020041,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017738853525324706,
"min": 0.00045383501612485274,
"max": 0.017738853525324706,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2660828028798706,
"min": 0.004538350161248528,
"max": 0.2660828028798706,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.0356512389445543e-05,
"min": 1.0356512389445543e-05,
"max": 0.0002951986437503818,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00015534768584168316,
"min": 0.00015534768584168316,
"max": 0.003260200906335742,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10345213861386139,
"min": 0.10345213861386139,
"max": 0.19839954738330975,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5517820792079209,
"min": 1.3694194059405942,
"max": 2.446043861386139,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00035486864752475256,
"min": 0.00035486864752475256,
"max": 0.009840114783592644,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.005323029712871288,
"min": 0.005323029712871288,
"max": 0.1086946830792079,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01026099268347025,
"min": 0.01026099268347025,
"max": 0.36318111419677734,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15391488373279572,
"min": 0.14727701246738434,
"max": 2.5422677993774414,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.73333333333335,
"min": 300.5108695652174,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31116.0,
"min": 15984.0,
"max": 34265.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.516644922283928,
"min": -1.0000000521540642,
"max": 1.679926860076125,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.9813980832696,
"min": -32.000001668930054,
"max": 156.23319798707962,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.516644922283928,
"min": -1.0000000521540642,
"max": 1.679926860076125,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.9813980832696,
"min": -32.000001668930054,
"max": 156.23319798707962,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.036877568334398605,
"min": 0.03481766418819891,
"max": 7.568356424570084,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2821035817614757,
"min": 3.238042769502499,
"max": 121.09370279312134,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686078224",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686080352"
},
"total": 2127.733873554,
"count": 1,
"self": 0.5389802049999162,
"children": {
"run_training.setup": {
"total": 0.04258437500004675,
"count": 1,
"self": 0.04258437500004675
},
"TrainerController.start_learning": {
"total": 2127.1523089740003,
"count": 1,
"self": 1.2252123499201844,
"children": {
"TrainerController._reset_env": {
"total": 4.2742059239999435,
"count": 1,
"self": 4.2742059239999435
},
"TrainerController.advance": {
"total": 2121.5621113370808,
"count": 64507,
"self": 1.295678842037887,
"children": {
"env_step": {
"total": 1482.3606137630104,
"count": 64507,
"self": 1376.121678082957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.48466621998034,
"count": 64507,
"self": 4.550730979999798,
"children": {
"TorchPolicy.evaluate": {
"total": 100.93393523998054,
"count": 63187,
"self": 100.93393523998054
}
}
},
"workers": {
"total": 0.7542694600730329,
"count": 64507,
"self": 0.0,
"children": {
"worker_root": {
"total": 2122.6909617180636,
"count": 64507,
"is_parallel": true,
"self": 854.4103461730783,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006046384000001126,
"count": 1,
"is_parallel": true,
"self": 0.004689967999865985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001356416000135141,
"count": 8,
"is_parallel": true,
"self": 0.001356416000135141
}
}
},
"UnityEnvironment.step": {
"total": 0.0456459110000651,
"count": 1,
"is_parallel": true,
"self": 0.001044063999984246,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047621999999591935,
"count": 1,
"is_parallel": true,
"self": 0.00047621999999591935
},
"communicator.exchange": {
"total": 0.04237429400006931,
"count": 1,
"is_parallel": true,
"self": 0.04237429400006931
},
"steps_from_proto": {
"total": 0.0017513330000156202,
"count": 1,
"is_parallel": true,
"self": 0.00033923400008006865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014120989999355515,
"count": 8,
"is_parallel": true,
"self": 0.0014120989999355515
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1268.2806155449853,
"count": 64506,
"is_parallel": true,
"self": 33.00898678999283,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.36388331109424,
"count": 64506,
"is_parallel": true,
"self": 22.36388331109424
},
"communicator.exchange": {
"total": 1114.971855150925,
"count": 64506,
"is_parallel": true,
"self": 1114.971855150925
},
"steps_from_proto": {
"total": 97.93589029297311,
"count": 64506,
"is_parallel": true,
"self": 19.162654483896176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.77323580907694,
"count": 516048,
"is_parallel": true,
"self": 78.77323580907694
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.9058187320327,
"count": 64507,
"self": 2.376437568036863,
"children": {
"process_trajectory": {
"total": 104.33848106999073,
"count": 64507,
"self": 104.13532935599085,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20315171399988685,
"count": 2,
"self": 0.20315171399988685
}
}
},
"_update_policy": {
"total": 531.1909000940051,
"count": 448,
"self": 340.3390671060142,
"children": {
"TorchPPOOptimizer.update": {
"total": 190.85183298799086,
"count": 23088,
"self": 190.85183298799086
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1129995982628316e-06,
"count": 1,
"self": 1.1129995982628316e-06
},
"TrainerController._save_models": {
"total": 0.09077824999985751,
"count": 1,
"self": 0.001327382999988913,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0894508669998686,
"count": 1,
"self": 0.0894508669998686
}
}
}
}
}
}
}