ppo-PyramidsRND / run_logs /timers.json
Logic-TARS's picture
First Push
1944b60 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6437311172485352,
"min": 0.589735746383667,
"max": 1.416475534439087,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19456.12890625,
"min": 17871.3515625,
"max": 42970.203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29932.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29932.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.10817890614271164,
"min": -0.11651916056871414,
"max": 0.42530879378318787,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 26.612010955810547,
"min": -27.964597702026367,
"max": 113.98275756835938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -2.345491409301758,
"min": -6.10516357421875,
"max": 0.7415123581886292,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -576.9909057617188,
"min": -1587.342529296875,
"max": 175.73843383789062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06801167780017633,
"min": 0.06424098359263827,
"max": 0.07313633597782339,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9521634892024687,
"min": 0.5671326788547745,
"max": 1.0412892848835327,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 1.6556923260940994,
"min": 0.00020093872507229304,
"max": 4.762759036118431,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 23.179692565317392,
"min": 0.0026122034259398095,
"max": 66.67862650565803,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.636640311628573e-06,
"min": 7.636640311628573e-06,
"max": 0.00029520945159685,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010691296436280003,
"min": 0.00010691296436280003,
"max": 0.0033825284724906,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025455142857143,
"min": 0.1025455142857143,
"max": 0.19840314999999997,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356372000000002,
"min": 1.4356372000000002,
"max": 2.5275094000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002642968771428573,
"min": 0.0002642968771428573,
"max": 0.009840474685,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003700156280000002,
"min": 0.003700156280000002,
"max": 0.11277818906,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010438172146677971,
"min": 0.010438172146677971,
"max": 0.5317188501358032,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1461344063282013,
"min": 0.1461344063282013,
"max": 4.253750801086426,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 737.6666666666666,
"min": 428.4848484848485,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30982.0,
"min": 15964.0,
"max": 32595.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.40496663021899404,
"min": -0.9999500522390008,
"max": 1.493170126827795,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 17.00859846919775,
"min": -31.998401671648026,
"max": 100.04239849746227,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.40496663021899404,
"min": -0.9999500522390008,
"max": 1.493170126827795,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 17.00859846919775,
"min": -31.998401671648026,
"max": 100.04239849746227,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07992272592881429,
"min": 0.058543632732618055,
"max": 11.119983524084091,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3567544890102,
"min": 3.066087754406908,
"max": 177.91973638534546,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744080477",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744083718"
},
"total": 3240.9060520580006,
"count": 1,
"self": 0.6403814500008593,
"children": {
"run_training.setup": {
"total": 0.031606990000000224,
"count": 1,
"self": 0.031606990000000224
},
"TrainerController.start_learning": {
"total": 3240.2340636179997,
"count": 1,
"self": 2.3978963479539743,
"children": {
"TrainerController._reset_env": {
"total": 4.642176391000248,
"count": 1,
"self": 4.642176391000248
},
"TrainerController.advance": {
"total": 3233.096982283045,
"count": 63571,
"self": 2.527847131189901,
"children": {
"env_step": {
"total": 2121.3593254409366,
"count": 63571,
"self": 1952.393461039646,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.60579144116355,
"count": 63571,
"self": 7.028123955101364,
"children": {
"TorchPolicy.evaluate": {
"total": 160.57766748606218,
"count": 62582,
"self": 160.57766748606218
}
}
},
"workers": {
"total": 1.3600729601271269,
"count": 63571,
"self": 0.0,
"children": {
"worker_root": {
"total": 3232.524142706056,
"count": 63571,
"is_parallel": true,
"self": 1457.1967202419896,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00427604899959988,
"count": 1,
"is_parallel": true,
"self": 0.0015468520000467834,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027291969995530962,
"count": 8,
"is_parallel": true,
"self": 0.0027291969995530962
}
}
},
"UnityEnvironment.step": {
"total": 0.06599142000004576,
"count": 1,
"is_parallel": true,
"self": 0.0006435420004891057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000536389999979292,
"count": 1,
"is_parallel": true,
"self": 0.000536389999979292
},
"communicator.exchange": {
"total": 0.06270738099965456,
"count": 1,
"is_parallel": true,
"self": 0.06270738099965456
},
"steps_from_proto": {
"total": 0.0021041069999228057,
"count": 1,
"is_parallel": true,
"self": 0.0004156020004302263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016885049994925794,
"count": 8,
"is_parallel": true,
"self": 0.0016885049994925794
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1775.3274224640663,
"count": 63570,
"is_parallel": true,
"self": 45.2519477369824,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.362613214020257,
"count": 63570,
"is_parallel": true,
"self": 30.362613214020257
},
"communicator.exchange": {
"total": 1574.09652253602,
"count": 63570,
"is_parallel": true,
"self": 1574.09652253602
},
"steps_from_proto": {
"total": 125.6163389770436,
"count": 63570,
"is_parallel": true,
"self": 27.742648085165456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 97.87369089187814,
"count": 508560,
"is_parallel": true,
"self": 97.87369089187814
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1109.2098097109183,
"count": 63571,
"self": 4.715605532756399,
"children": {
"process_trajectory": {
"total": 165.89719800016928,
"count": 63571,
"self": 165.57008659516987,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32711140499941394,
"count": 2,
"self": 0.32711140499941394
}
}
},
"_update_policy": {
"total": 938.5970061779926,
"count": 451,
"self": 367.0871505170544,
"children": {
"TorchPPOOptimizer.update": {
"total": 571.5098556609382,
"count": 22803,
"self": 571.5098556609382
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2090004020137712e-06,
"count": 1,
"self": 1.2090004020137712e-06
},
"TrainerController._save_models": {
"total": 0.09700738700030342,
"count": 1,
"self": 0.001938148000590445,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09506923899971298,
"count": 1,
"self": 0.09506923899971298
}
}
}
}
}
}
}