Charles0831's picture
First Push
afb325b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21803376078605652,
"min": 0.21662364900112152,
"max": 1.4206165075302124,
"count": 30
},
"Pyramids.Policy.Entropy.sum": {
"value": 10964.4814453125,
"min": 10848.5126953125,
"max": 71099.015625,
"count": 30
},
"Pyramids.Step.mean": {
"value": 1499891.0,
"min": 49920.0,
"max": 1499891.0,
"count": 30
},
"Pyramids.Step.sum": {
"value": 1499891.0,
"min": 49920.0,
"max": 1499891.0,
"count": 30
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7389137148857117,
"min": -0.10044143348932266,
"max": 0.7556094527244568,
"count": 30
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 361.32879638671875,
"min": -40.17657470703125,
"max": 365.7149658203125,
"count": 30
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0072410693392157555,
"min": -0.013385334052145481,
"max": 0.2099233865737915,
"count": 30
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.5408828258514404,
"min": -6.478501796722412,
"max": 83.75942993164062,
"count": 30
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.09671019162676574,
"min": 0.09561453901167023,
"max": 0.10274666010945611,
"count": 30
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 2.3210445990423776,
"min": 1.201505965621133,
"max": 2.4291341004927287,
"count": 30
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0166195064054115,
"min": 0.00011141922623391655,
"max": 0.016987197312703844,
"count": 30
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.39886815372987594,
"min": 0.002005546072210498,
"max": 0.40769273550489227,
"count": 30
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.083298305600001e-06,
"min": 5.083298305600001e-06,
"max": 0.00029477120174293327,
"count": 30
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00012199915933440003,
"min": 0.00012199915933440003,
"max": 0.0054030778989742,
"count": 30
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1016944,
"min": 0.1016944,
"max": 0.19825706666666668,
"count": 30
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4406656,
"min": 2.3406848,
"max": 4.119861,
"count": 30
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00017927056000000008,
"min": 0.00017927056000000008,
"max": 0.00982588096,
"count": 30
},
"Pyramids.Policy.Beta.sum": {
"value": 0.004302493440000002,
"min": 0.004302493440000002,
"max": 0.18015247742,
"count": 30
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006612860132008791,
"min": 0.0065802112221717834,
"max": 0.17313165962696075,
"count": 30
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15870864689350128,
"min": 0.1541585624217987,
"max": 2.077579975128174,
"count": 30
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 250.318407960199,
"min": 244.3681592039801,
"max": 999.0,
"count": 30
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 50314.0,
"min": 47500.0,
"max": 53710.0,
"count": 30
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7098726229881174,
"min": -1.0000000521540642,
"max": 1.7459819845110178,
"count": 30
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 343.6843972206116,
"min": -48.00000250339508,
"max": 349.19639690220356,
"count": 30
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7098726229881174,
"min": -1.0000000521540642,
"max": 1.7459819845110178,
"count": 30
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 343.6843972206116,
"min": -48.00000250339508,
"max": 349.19639690220356,
"count": 30
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.017370152949912714,
"min": 0.017370152949912714,
"max": 2.668248741654679,
"count": 30
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4914007429324556,
"min": 3.413188502270714,
"max": 128.0759395994246,
"count": 30
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722501682",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722506105"
},
"total": 4423.624909221,
"count": 1,
"self": 1.0443064859991864,
"children": {
"run_training.setup": {
"total": 0.05891033299985793,
"count": 1,
"self": 0.05891033299985793
},
"TrainerController.start_learning": {
"total": 4422.521692402001,
"count": 1,
"self": 2.981850718919304,
"children": {
"TrainerController._reset_env": {
"total": 1.9860108200000468,
"count": 1,
"self": 1.9860108200000468
},
"TrainerController.advance": {
"total": 4417.405389072081,
"count": 96535,
"self": 2.719442416977472,
"children": {
"env_step": {
"total": 2947.3605758921044,
"count": 96535,
"self": 2713.4800765888504,
"children": {
"SubprocessEnvManager._take_step": {
"total": 232.23741196217088,
"count": 96535,
"self": 8.728964415245173,
"children": {
"TorchPolicy.evaluate": {
"total": 223.5084475469257,
"count": 93821,
"self": 223.5084475469257
}
}
},
"workers": {
"total": 1.6430873410831737,
"count": 96535,
"self": 0.0,
"children": {
"worker_root": {
"total": 4412.891079741996,
"count": 96535,
"is_parallel": true,
"self": 1920.5098699368173,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026321049999751267,
"count": 1,
"is_parallel": true,
"self": 0.0009111029994528508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001721002000522276,
"count": 8,
"is_parallel": true,
"self": 0.001721002000522276
}
}
},
"UnityEnvironment.step": {
"total": 0.07009186100003717,
"count": 1,
"is_parallel": true,
"self": 0.0007183509997048532,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.004292113999781577,
"count": 1,
"is_parallel": true,
"self": 0.004292113999781577
},
"communicator.exchange": {
"total": 0.06309020000026067,
"count": 1,
"is_parallel": true,
"self": 0.06309020000026067
},
"steps_from_proto": {
"total": 0.001991196000290074,
"count": 1,
"is_parallel": true,
"self": 0.0004326120001678646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015585840001222095,
"count": 8,
"is_parallel": true,
"self": 0.0015585840001222095
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2492.3812098051785,
"count": 96534,
"is_parallel": true,
"self": 57.98847198854128,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 41.114058780062805,
"count": 96534,
"is_parallel": true,
"self": 41.114058780062805
},
"communicator.exchange": {
"total": 2218.048739766714,
"count": 96534,
"is_parallel": true,
"self": 2218.048739766714
},
"steps_from_proto": {
"total": 175.22993926986055,
"count": 96534,
"is_parallel": true,
"self": 38.033532697245846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 137.1964065726147,
"count": 772272,
"is_parallel": true,
"self": 137.1964065726147
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1467.3253707629988,
"count": 96535,
"self": 4.990377789036756,
"children": {
"process_trajectory": {
"total": 217.8541875569631,
"count": 96535,
"self": 217.478935797963,
"children": {
"RLTrainer._checkpoint": {
"total": 0.37525175900009344,
"count": 3,
"self": 0.37525175900009344
}
}
},
"_update_policy": {
"total": 1244.480805416999,
"count": 666,
"self": 613.9784468071198,
"children": {
"TorchPPOOptimizer.update": {
"total": 630.5023586098791,
"count": 69405,
"self": 630.5023586098791
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5670002539991401e-06,
"count": 1,
"self": 1.5670002539991401e-06
},
"TrainerController._save_models": {
"total": 0.14844022400029644,
"count": 1,
"self": 0.002288038000187953,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14615218600010849,
"count": 1,
"self": 0.14615218600010849
}
}
}
}
}
}
}