Retrial9842's picture
First Push
89ad1d0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4548112154006958,
"min": 0.4548112154006958,
"max": 1.4480611085891724,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13775.322265625,
"min": 13670.9609375,
"max": 43928.3828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989997.0,
"min": 29952.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989997.0,
"min": 29952.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36824795603752136,
"min": -0.10178116708993912,
"max": 0.42843231558799744,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 96.84921264648438,
"min": -24.529260635375977,
"max": 113.96299743652344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006934191100299358,
"min": 0.0028177027124911547,
"max": 0.34559914469718933,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8236922025680542,
"min": 0.7072433829307556,
"max": 81.90699768066406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06956796737377016,
"min": 0.06379592067394758,
"max": 0.07321309103909669,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9739515432327822,
"min": 0.5015818004693098,
"max": 1.070607882817664,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014214756165829871,
"min": 0.00025640409403211625,
"max": 0.014862789750276578,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1990065863216182,
"min": 0.0028204450343532785,
"max": 0.21029198639250052,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.517518922764284e-06,
"min": 7.517518922764284e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010524526491869997,
"min": 0.00010524526491869997,
"max": 0.0032559047146985,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250580714285715,
"min": 0.10250580714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350813,
"min": 1.3691136000000002,
"max": 2.4021698000000002,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002603301335714285,
"min": 0.0002603301335714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036446218699999994,
"min": 0.0036446218699999994,
"max": 0.10855161985,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009032952599227428,
"min": 0.0086537916213274,
"max": 0.43593263626098633,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12646134197711945,
"min": 0.1211530864238739,
"max": 3.0515284538269043,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 471.4032258064516,
"min": 440.463768115942,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29227.0,
"min": 15984.0,
"max": 32924.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3011048914956265,
"min": -1.0000000521540642,
"max": 1.4725594002267588,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 79.36739838123322,
"min": -32.000001668930054,
"max": 101.60659861564636,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3011048914956265,
"min": -1.0000000521540642,
"max": 1.4725594002267588,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 79.36739838123322,
"min": -32.000001668930054,
"max": 101.60659861564636,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04429158767719646,
"min": 0.043352641953407,
"max": 8.53546473942697,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.701786848308984,
"min": 2.6599582975322846,
"max": 136.56743583083153,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686112265",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686114358"
},
"total": 2092.9848652600003,
"count": 1,
"self": 0.575282798000444,
"children": {
"run_training.setup": {
"total": 0.04292366000004222,
"count": 1,
"self": 0.04292366000004222
},
"TrainerController.start_learning": {
"total": 2092.366658802,
"count": 1,
"self": 1.4404065180688121,
"children": {
"TrainerController._reset_env": {
"total": 3.736506961000032,
"count": 1,
"self": 3.736506961000032
},
"TrainerController.advance": {
"total": 2087.089941646931,
"count": 63390,
"self": 1.3712998078508463,
"children": {
"env_step": {
"total": 1448.0797201420341,
"count": 63390,
"self": 1335.6835791899957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 111.57247465801902,
"count": 63390,
"self": 4.859888107013376,
"children": {
"TorchPolicy.evaluate": {
"total": 106.71258655100564,
"count": 62552,
"self": 106.71258655100564
}
}
},
"workers": {
"total": 0.8236662940194037,
"count": 63390,
"self": 0.0,
"children": {
"worker_root": {
"total": 2087.4207112570193,
"count": 63390,
"is_parallel": true,
"self": 866.3884227230128,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001830280000149287,
"count": 1,
"is_parallel": true,
"self": 0.0006319470003290917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011983329998201953,
"count": 8,
"is_parallel": true,
"self": 0.0011983329998201953
}
}
},
"UnityEnvironment.step": {
"total": 0.053344245000062074,
"count": 1,
"is_parallel": true,
"self": 0.0005557460001455183,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005415170001015213,
"count": 1,
"is_parallel": true,
"self": 0.0005415170001015213
},
"communicator.exchange": {
"total": 0.05046834399990985,
"count": 1,
"is_parallel": true,
"self": 0.05046834399990985
},
"steps_from_proto": {
"total": 0.001778637999905186,
"count": 1,
"is_parallel": true,
"self": 0.0003668269996524032,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014118110002527828,
"count": 8,
"is_parallel": true,
"self": 0.0014118110002527828
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1221.0322885340065,
"count": 63389,
"is_parallel": true,
"self": 32.81479635010237,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.436398587927215,
"count": 63389,
"is_parallel": true,
"self": 23.436398587927215
},
"communicator.exchange": {
"total": 1061.6281738520104,
"count": 63389,
"is_parallel": true,
"self": 1061.6281738520104
},
"steps_from_proto": {
"total": 103.15291974396655,
"count": 63389,
"is_parallel": true,
"self": 20.69779507699195,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.4551246669746,
"count": 507112,
"is_parallel": true,
"self": 82.4551246669746
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.6389216970458,
"count": 63390,
"self": 2.5722135250296105,
"children": {
"process_trajectory": {
"total": 111.34335536401795,
"count": 63390,
"self": 111.04837539201799,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2949799719999646,
"count": 2,
"self": 0.2949799719999646
}
}
},
"_update_policy": {
"total": 523.7233528079983,
"count": 442,
"self": 336.1868543580042,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.53649844999404,
"count": 22818,
"self": 187.53649844999404
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.269998943433166e-07,
"count": 1,
"self": 8.269998943433166e-07
},
"TrainerController._save_models": {
"total": 0.09980284900029801,
"count": 1,
"self": 0.0014069150001887465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09839593400010926,
"count": 1,
"self": 0.09839593400010926
}
}
}
}
}
}
}