nomad-ai's picture
First commit
120a22f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3339800536632538,
"min": 0.3180413842201233,
"max": 1.5922272205352783,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 3350.48779296875,
"min": 3129.52734375,
"max": 16304.40625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 999970.0,
"min": 9984.0,
"max": 999970.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 999970.0,
"min": 9984.0,
"max": 999970.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4776577651500702,
"min": -0.10876123607158661,
"max": 0.6380420327186584,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 44.42217254638672,
"min": -8.809659957885742,
"max": 61.252037048339844,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006431872025132179,
"min": -0.020481683313846588,
"max": 0.36574122309684753,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.5981640815734863,
"min": -1.9662415981292725,
"max": 29.625038146972656,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07351361495675518,
"min": 0.05930584500339476,
"max": 0.07778544960698734,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.3675680747837759,
"min": 0.13289931900939778,
"max": 0.37685757594105473,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01662263655452989,
"min": 0.0003626126776461937,
"max": 0.02118562733521685,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.08311318277264945,
"min": 0.0014504507105847749,
"max": 0.10592813667608425,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5334594888799946e-06,
"min": 1.5334594888799946e-06,
"max": 0.0002981568006144,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.667297444399973e-06,
"min": 7.667297444399973e-06,
"max": 0.0013571268476244,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051112,
"min": 0.10051112,
"max": 0.1993856,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.5025556,
"min": 0.39717119999999995,
"max": 0.9523756000000001,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.106088799999984e-05,
"min": 6.106088799999984e-05,
"max": 0.00993862144,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0003053044399999992,
"min": 0.0003053044399999992,
"max": 0.04524232244,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011171339079737663,
"min": 0.010520762763917446,
"max": 0.5629435181617737,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.055856697261333466,
"min": 0.042083051055669785,
"max": 1.1258870363235474,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 315.73333333333335,
"min": 214.8,
"max": 999.0,
"count": 98
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 9472.0,
"min": 2304.0,
"max": 16868.0,
"count": 98
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6842666601141294,
"min": -1.0000000521540642,
"max": 1.7851999836308616,
"count": 98
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 50.52799980342388,
"min": -16.000000834465027,
"max": 62.481999427080154,
"count": 98
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6842666601141294,
"min": -1.0000000521540642,
"max": 1.7851999836308616,
"count": 98
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 50.52799980342388,
"min": -16.000000834465027,
"max": 62.481999427080154,
"count": 98
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.037274526238131024,
"min": 0.025683769491817138,
"max": 6.449226463213563,
"count": 98
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.1182357871439308,
"min": 0.8989319322135998,
"max": 103.18762341141701,
"count": 98
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687739943",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687742225"
},
"total": 2282.259003199,
"count": 1,
"self": 0.4362220529997103,
"children": {
"run_training.setup": {
"total": 0.0565557780000745,
"count": 1,
"self": 0.0565557780000745
},
"TrainerController.start_learning": {
"total": 2281.766225368,
"count": 1,
"self": 1.3898236320269461,
"children": {
"TrainerController._reset_env": {
"total": 5.278706085000067,
"count": 1,
"self": 5.278706085000067
},
"TrainerController.advance": {
"total": 2275.0002743049736,
"count": 64042,
"self": 1.336264221968122,
"children": {
"env_step": {
"total": 1630.5615501559826,
"count": 64042,
"self": 1519.7967926000317,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.97684039097408,
"count": 64042,
"self": 4.798707302971707,
"children": {
"TorchPolicy.evaluate": {
"total": 105.17813308800237,
"count": 62560,
"self": 105.17813308800237
}
}
},
"workers": {
"total": 0.7879171649767613,
"count": 64042,
"self": 0.0,
"children": {
"worker_root": {
"total": 2276.3691483099733,
"count": 64042,
"is_parallel": true,
"self": 870.0120739350468,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002789444999962143,
"count": 1,
"is_parallel": true,
"self": 0.0007860799998979928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00200336500006415,
"count": 8,
"is_parallel": true,
"self": 0.00200336500006415
}
}
},
"UnityEnvironment.step": {
"total": 0.04910212300001149,
"count": 1,
"is_parallel": true,
"self": 0.0005663149997872097,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005027030001656385,
"count": 1,
"is_parallel": true,
"self": 0.0005027030001656385
},
"communicator.exchange": {
"total": 0.046007228999997096,
"count": 1,
"is_parallel": true,
"self": 0.046007228999997096
},
"steps_from_proto": {
"total": 0.002025876000061544,
"count": 1,
"is_parallel": true,
"self": 0.0003906360002474685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016352399998140754,
"count": 8,
"is_parallel": true,
"self": 0.0016352399998140754
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1406.3570743749265,
"count": 64041,
"is_parallel": true,
"self": 34.05369571584629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.779807668019885,
"count": 64041,
"is_parallel": true,
"self": 22.779807668019885
},
"communicator.exchange": {
"total": 1246.07124977402,
"count": 64041,
"is_parallel": true,
"self": 1246.07124977402
},
"steps_from_proto": {
"total": 103.45232121704021,
"count": 64041,
"is_parallel": true,
"self": 20.107409374102872,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.34491184293734,
"count": 512328,
"is_parallel": true,
"self": 83.34491184293734
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 643.1024599270229,
"count": 64042,
"self": 2.6191588409747055,
"children": {
"process_trajectory": {
"total": 110.0185244910424,
"count": 64042,
"self": 109.81311750904274,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20540698199965846,
"count": 2,
"self": 0.20540698199965846
}
}
},
"_update_policy": {
"total": 530.4647765950058,
"count": 456,
"self": 339.18473643003426,
"children": {
"TorchPPOOptimizer.update": {
"total": 191.28004016497152,
"count": 22776,
"self": 191.28004016497152
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.303999852098059e-06,
"count": 1,
"self": 1.303999852098059e-06
},
"TrainerController._save_models": {
"total": 0.0974200419996123,
"count": 1,
"self": 0.0013721129994337389,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09604792900017856,
"count": 1,
"self": 0.09604792900017856
}
}
}
}
}
}
}