ppo-Pyramids / run_logs /timers.json
pratyushmathur's picture
First Push
96cbcfc verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.5013519525527954,
"min": 1.5013519525527954,
"max": 1.5013519525527954,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 45545.01171875,
"min": 45545.01171875,
"max": 45545.01171875,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.11159086227416992,
"min": -0.11159086227416992,
"max": -0.11159086227416992,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -26.44703483581543,
"min": -26.44703483581543,
"max": -26.44703483581543,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.16744586825370789,
"min": 0.16744586825370789,
"max": 0.16744586825370789,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 39.684669494628906,
"min": 39.684669494628906,
"max": 39.684669494628906,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07356957144961575,
"min": 0.07356957144961575,
"max": 0.07356957144961575,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5149870001473102,
"min": 0.5149870001473102,
"max": 0.5149870001473102,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.005684913074515065,
"min": 0.005684913074515065,
"max": 0.005684913074515065,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.03979439152160545,
"min": 0.03979439152160545,
"max": 0.03979439152160545,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00020301260375771423,
"min": 0.00020301260375771423,
"max": 0.00020301260375771423,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0014210882263039997,
"min": 0.0014210882263039997,
"max": 0.0014210882263039997,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16767085714285712,
"min": 0.16767085714285712,
"max": 0.16767085714285712,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1736959999999999,
"min": 1.1736959999999999,
"max": 1.1736959999999999,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006770318628571428,
"min": 0.006770318628571428,
"max": 0.006770318628571428,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0473922304,
"min": 0.0473922304,
"max": 0.0473922304,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.4472079277038574,
"min": 0.4472079277038574,
"max": 0.4472079277038574,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 3.130455493927002,
"min": 3.130455493927002,
"max": 3.130455493927002,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 8.904934572055936,
"min": 8.904934572055936,
"max": 8.904934572055936,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 142.47895315289497,
"min": 142.47895315289497,
"max": 142.47895315289497,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753777235",
"python_version": "3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753777334"
},
"total": 98.86324001999992,
"count": 1,
"self": 0.47673020499951235,
"children": {
"run_training.setup": {
"total": 0.024873321000086435,
"count": 1,
"self": 0.024873321000086435
},
"TrainerController.start_learning": {
"total": 98.36163649400032,
"count": 1,
"self": 0.07326060000241341,
"children": {
"TrainerController._reset_env": {
"total": 2.7142901619999975,
"count": 1,
"self": 2.7142901619999975
},
"TrainerController.advance": {
"total": 95.44732628099791,
"count": 3132,
"self": 0.07490508898081316,
"children": {
"env_step": {
"total": 63.220539555017695,
"count": 3132,
"self": 55.186422894012594,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7.988736288005384,
"count": 3132,
"self": 0.24691419701684936,
"children": {
"TorchPolicy.evaluate": {
"total": 7.741822090988535,
"count": 3131,
"self": 7.741822090988535
}
}
},
"workers": {
"total": 0.0453803729997162,
"count": 3132,
"self": 0.0,
"children": {
"worker_root": {
"total": 97.98650231598458,
"count": 3132,
"is_parallel": true,
"self": 48.54224929897555,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028152419999969425,
"count": 1,
"is_parallel": true,
"self": 0.0009677139992163575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001847528000780585,
"count": 8,
"is_parallel": true,
"self": 0.001847528000780585
}
}
},
"UnityEnvironment.step": {
"total": 0.049129712999729236,
"count": 1,
"is_parallel": true,
"self": 0.000597325999478926,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004791530000147759,
"count": 1,
"is_parallel": true,
"self": 0.0004791530000147759
},
"communicator.exchange": {
"total": 0.04605626000011398,
"count": 1,
"is_parallel": true,
"self": 0.04605626000011398
},
"steps_from_proto": {
"total": 0.001996974000121554,
"count": 1,
"is_parallel": true,
"self": 0.00047791100087124505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001519062999250309,
"count": 8,
"is_parallel": true,
"self": 0.001519062999250309
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 49.44425301700903,
"count": 3131,
"is_parallel": true,
"self": 1.6099424830340467,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.1492256459951022,
"count": 3131,
"is_parallel": true,
"self": 1.1492256459951022
},
"communicator.exchange": {
"total": 41.757451673980995,
"count": 3131,
"is_parallel": true,
"self": 41.757451673980995
},
"steps_from_proto": {
"total": 4.927633213998888,
"count": 3131,
"is_parallel": true,
"self": 1.0086358209341597,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.9189973930647284,
"count": 25048,
"is_parallel": true,
"self": 3.9189973930647284
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 32.151881636999406,
"count": 3132,
"self": 0.09715488901201752,
"children": {
"process_trajectory": {
"total": 6.160585111987075,
"count": 3132,
"self": 6.160585111987075
},
"_update_policy": {
"total": 25.894141636000313,
"count": 13,
"self": 14.320586725993962,
"children": {
"TorchPPOOptimizer.update": {
"total": 11.57355491000635,
"count": 1119,
"self": 11.57355491000635
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.759998308960348e-07,
"count": 1,
"self": 8.759998308960348e-07
},
"TrainerController._save_models": {
"total": 0.12675857500016718,
"count": 1,
"self": 0.0013849150004716648,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12537365999969552,
"count": 1,
"self": 0.12537365999969552
}
}
}
}
}
}
}