ppo-PyramidsRND / run_logs /timers.json
israel-avihail's picture
First model push
bd07067
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3801942765712738,
"min": 0.3531356751918793,
"max": 1.4276145696640015,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11478.8251953125,
"min": 10492.3671875,
"max": 43308.1171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989995.0,
"min": 29952.0,
"max": 989995.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989995.0,
"min": 29952.0,
"max": 989995.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5224593877792358,
"min": -0.09584258496761322,
"max": 0.68194580078125,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.76617431640625,
"min": -23.223289489746094,
"max": 198.44622802734375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.001816640025936067,
"min": 0.001816640025936067,
"max": 0.6472456455230713,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.5068425536155701,
"min": 0.5068425536155701,
"max": 153.397216796875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06956627330648536,
"min": 0.06519799841038765,
"max": 0.0730428424058857,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.973927826290795,
"min": 0.4954061816622444,
"max": 1.0871616466365772,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017839861827572655,
"min": 0.00040312768468195667,
"max": 0.018584341096640047,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24975806558601718,
"min": 0.004434404531501524,
"max": 0.2787651164496007,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.232369017814287e-06,
"min": 7.232369017814287e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010125316624940002,
"min": 0.00010125316624940002,
"max": 0.0035089355303548992,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241075714285715,
"min": 0.10241075714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4337506,
"min": 1.3886848,
"max": 2.5696451,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002508346385714286,
"min": 0.0002508346385714286,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035116849400000007,
"min": 0.0035116849400000007,
"max": 0.11698754549000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013090507127344608,
"min": 0.013090507127344608,
"max": 0.6112498641014099,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18326710164546967,
"min": 0.18326710164546967,
"max": 4.278748989105225,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 351.18888888888887,
"min": 255.85046728971963,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31607.0,
"min": 15984.0,
"max": 32621.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.514642683893777,
"min": -1.0000000521540642,
"max": 1.7266166480603042,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.80319886654615,
"min": -31.998401671648026,
"max": 186.47459799051285,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.514642683893777,
"min": -1.0000000521540642,
"max": 1.7266166480603042,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.80319886654615,
"min": -31.998401671648026,
"max": 186.47459799051285,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047070560233570695,
"min": 0.0411717245186554,
"max": 12.333260266110301,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.189279860787792,
"min": 4.189279860787792,
"max": 197.33216425776482,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679759132",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679761516"
},
"total": 2384.3954895579996,
"count": 1,
"self": 0.4793556029994761,
"children": {
"run_training.setup": {
"total": 0.20132658300008188,
"count": 1,
"self": 0.20132658300008188
},
"TrainerController.start_learning": {
"total": 2383.714807372,
"count": 1,
"self": 1.6177493009854516,
"children": {
"TrainerController._reset_env": {
"total": 6.340245222000249,
"count": 1,
"self": 6.340245222000249
},
"TrainerController.advance": {
"total": 2375.6573717840142,
"count": 64136,
"self": 1.6464778620638754,
"children": {
"env_step": {
"total": 1722.4459355509834,
"count": 64136,
"self": 1601.9117928129394,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.5474888199833,
"count": 64136,
"self": 5.079077506954036,
"children": {
"TorchPolicy.evaluate": {
"total": 114.46841131302926,
"count": 62566,
"self": 114.46841131302926
}
}
},
"workers": {
"total": 0.9866539180607106,
"count": 64136,
"self": 0.0,
"children": {
"worker_root": {
"total": 2377.974952608998,
"count": 64136,
"is_parallel": true,
"self": 902.1959823460984,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019545959999049956,
"count": 1,
"is_parallel": true,
"self": 0.0005675769994013535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001387019000503642,
"count": 8,
"is_parallel": true,
"self": 0.001387019000503642
}
}
},
"UnityEnvironment.step": {
"total": 0.04948256699981357,
"count": 1,
"is_parallel": true,
"self": 0.000540748999810603,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005027580000387388,
"count": 1,
"is_parallel": true,
"self": 0.0005027580000387388
},
"communicator.exchange": {
"total": 0.046842109999943204,
"count": 1,
"is_parallel": true,
"self": 0.046842109999943204
},
"steps_from_proto": {
"total": 0.0015969500000210246,
"count": 1,
"is_parallel": true,
"self": 0.000352962999841111,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012439870001799136,
"count": 8,
"is_parallel": true,
"self": 0.0012439870001799136
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1475.7789702628997,
"count": 64135,
"is_parallel": true,
"self": 32.66858775274795,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.828478418076884,
"count": 64135,
"is_parallel": true,
"self": 23.828478418076884
},
"communicator.exchange": {
"total": 1322.8868224250855,
"count": 64135,
"is_parallel": true,
"self": 1322.8868224250855
},
"steps_from_proto": {
"total": 96.39508166698943,
"count": 64135,
"is_parallel": true,
"self": 20.856376408030428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.538705258959,
"count": 513080,
"is_parallel": true,
"self": 75.538705258959
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 651.5649583709669,
"count": 64136,
"self": 2.951220375955927,
"children": {
"process_trajectory": {
"total": 123.30545854101001,
"count": 64136,
"self": 123.0495682210103,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25589031999970757,
"count": 2,
"self": 0.25589031999970757
}
}
},
"_update_policy": {
"total": 525.308279454001,
"count": 453,
"self": 335.06011000804165,
"children": {
"TorchPPOOptimizer.update": {
"total": 190.24816944595932,
"count": 22782,
"self": 190.24816944595932
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.79000105871819e-07,
"count": 1,
"self": 9.79000105871819e-07
},
"TrainerController._save_models": {
"total": 0.09944008599995868,
"count": 1,
"self": 0.003862168000523525,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09557791799943516,
"count": 1,
"self": 0.09557791799943516
}
}
}
}
}
}
}