Pyramids / run_logs /timers.json
DhruvJalan's picture
First Push
3a692b1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4368692636489868,
"min": 0.4308827519416809,
"max": 1.6023768186569214,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 8751.365234375,
"min": 8659.01953125,
"max": 32201.365234375,
"count": 50
},
"Pyramids.Step.mean": {
"value": 999892.0,
"min": 19968.0,
"max": 999892.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 999892.0,
"min": 19968.0,
"max": 999892.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07685001194477081,
"min": -0.1043524369597435,
"max": -0.0594039112329483,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -12.296002388000488,
"min": -16.851215362548828,
"max": -9.564029693603516,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0173623226583004,
"min": 0.01718338020145893,
"max": 0.3498693108558655,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.7779715061187744,
"min": 2.7560315132141113,
"max": 55.62921905517578,
"count": 50
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.07259224355220795,
"min": 0.032525256276130676,
"max": 0.2536250948905945,
"count": 50
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 11.61475944519043,
"min": 5.171515464782715,
"max": 40.580013275146484,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06893045877513941,
"min": 0.05804945588942594,
"max": 0.07661458291947895,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.13786091755027882,
"min": 0.06736144984897692,
"max": 0.15150092454213235,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 6.862680124299913e-05,
"min": 1.0468886299008772e-05,
"max": 0.015604345289628126,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.00013725360248599826,
"min": 1.9393958666569233e-05,
"max": 0.015604345289628126,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.240098919999993e-06,
"min": 3.240098919999993e-06,
"max": 0.00029631360122879996,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 6.480197839999986e-06,
"min": 6.480197839999986e-06,
"max": 0.0005819136060287999,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10162000000000002,
"min": 0.10162000000000002,
"max": 0.24815680000000004,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.20324000000000003,
"min": 0.12249219999999995,
"max": 0.4909568000000001,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 2.069199999999997e-05,
"min": 2.069199999999997e-05,
"max": 0.00098783488,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 4.138399999999994e-05,
"min": 4.138399999999994e-05,
"max": 0.0019403148800000002,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01641818881034851,
"min": 0.014551613479852676,
"max": 0.8168574571609497,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.03283637762069702,
"min": 0.017075607553124428,
"max": 0.8168574571609497,
"count": 50
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.06454861147794873,
"min": 0.06454861147794873,
"max": 0.5022600194853213,
"count": 50
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 0.12909722295589746,
"min": 0.07133469521068037,
"max": 0.5236991447788002,
"count": 50
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.0685774822505967,
"min": 0.06479755593463779,
"max": 0.9341130413942866,
"count": 50
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.1371549645011934,
"min": 0.07756975268324216,
"max": 1.1713232168373668,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 889.15,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 15039.0,
"max": 31968.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9997000526636839,
"min": -1.0000000521540642,
"max": -0.47678952193573904,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -19.994001053273678,
"min": -31.99920167028904,
"max": -9.059000916779041,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9997000526636839,
"min": -1.0000000521540642,
"max": -0.47678952193573904,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -19.994001053273678,
"min": -31.99920167028904,
"max": -9.059000916779041,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.16850452576763927,
"min": 0.1461322949693093,
"max": 17.824592078104615,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3700905153527856,
"min": 2.6850122353062034,
"max": 285.19347324967384,
"count": 50
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 0.6733389327302575,
"min": 0.6435650361134954,
"max": 3.64256305526942,
"count": 50
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 13.46677865460515,
"min": 11.854416705667973,
"max": 107.61907610297203,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1766092466",
"python_version": "3.10.12 (main, Nov 4 2025, 08:48:33) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1766096460"
},
"total": 3993.6023549690003,
"count": 1,
"self": 0.811768376001055,
"children": {
"run_training.setup": {
"total": 0.04085086099985347,
"count": 1,
"self": 0.04085086099985347
},
"TrainerController.start_learning": {
"total": 3992.7497357319994,
"count": 1,
"self": 2.3739707189783985,
"children": {
"TrainerController._reset_env": {
"total": 3.183051900999999,
"count": 1,
"self": 3.183051900999999
},
"TrainerController.advance": {
"total": 3986.9343676280205,
"count": 62985,
"self": 2.4794799581022744,
"children": {
"env_step": {
"total": 2064.8888257369913,
"count": 62985,
"self": 1874.1203932320477,
"children": {
"SubprocessEnvManager._take_step": {
"total": 189.3285083650244,
"count": 62985,
"self": 7.631458312866471,
"children": {
"TorchPolicy.evaluate": {
"total": 181.69705005215792,
"count": 62557,
"self": 181.69705005215792
}
}
},
"workers": {
"total": 1.439924139919185,
"count": 62985,
"self": 0.0,
"children": {
"worker_root": {
"total": 3984.631569267093,
"count": 62985,
"is_parallel": true,
"self": 2292.9114663560745,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0035667199999807053,
"count": 1,
"is_parallel": true,
"self": 0.0013955640006315662,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002171155999349139,
"count": 8,
"is_parallel": true,
"self": 0.002171155999349139
}
}
},
"UnityEnvironment.step": {
"total": 0.07281096699989575,
"count": 1,
"is_parallel": true,
"self": 0.0007347069999923406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005616630000986333,
"count": 1,
"is_parallel": true,
"self": 0.0005616630000986333
},
"communicator.exchange": {
"total": 0.06920479400014301,
"count": 1,
"is_parallel": true,
"self": 0.06920479400014301
},
"steps_from_proto": {
"total": 0.0023098029996617697,
"count": 1,
"is_parallel": true,
"self": 0.00047800899938010843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018317940002816613,
"count": 8,
"is_parallel": true,
"self": 0.0018317940002816613
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1691.7201029110183,
"count": 62984,
"is_parallel": true,
"self": 46.09102295205912,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.606381755063012,
"count": 62984,
"is_parallel": true,
"self": 31.606381755063012
},
"communicator.exchange": {
"total": 1467.7338906199088,
"count": 62984,
"is_parallel": true,
"self": 1467.7338906199088
},
"steps_from_proto": {
"total": 146.28880758398736,
"count": 62984,
"is_parallel": true,
"self": 29.585931012516085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 116.70287657147128,
"count": 503872,
"is_parallel": true,
"self": 116.70287657147128
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1919.566061932927,
"count": 62985,
"self": 4.029967697950269,
"children": {
"process_trajectory": {
"total": 210.87578020897672,
"count": 62985,
"self": 210.34872139497747,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5270588139992469,
"count": 2,
"self": 0.5270588139992469
}
}
},
"_update_policy": {
"total": 1704.660314026,
"count": 94,
"self": 794.2426236510391,
"children": {
"TorchPPOOptimizer.update": {
"total": 910.4176903749608,
"count": 23187,
"self": 910.4176903749608
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4810002539888956e-06,
"count": 1,
"self": 1.4810002539888956e-06
},
"TrainerController._save_models": {
"total": 0.2583440030002748,
"count": 1,
"self": 0.011337405000631406,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24700659799964342,
"count": 1,
"self": 0.24700659799964342
}
}
}
}
}
}
}