Pyramids / run_logs /timers.json
KaushikSahoo's picture
First Push
24a07eb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5803764462471008,
"min": 0.5803764462471008,
"max": 1.4619883298873901,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17318.43359375,
"min": 17318.43359375,
"max": 44350.87890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989925.0,
"min": 29951.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989925.0,
"min": 29951.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3750361204147339,
"min": -0.16430480778217316,
"max": 0.4063592553138733,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 100.1346435546875,
"min": -38.94023895263672,
"max": 108.4979248046875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.26886966824531555,
"min": -0.26886966824531555,
"max": 0.2690623104572296,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -71.78820037841797,
"min": -71.78820037841797,
"max": 63.76776885986328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07055515987061274,
"min": 0.06539901213492828,
"max": 0.07301834890033095,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9877722381885784,
"min": 0.5658426696711216,
"max": 1.0437948404578492,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.02433275010705637,
"min": 0.00023663470108917386,
"max": 0.02433275010705637,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3406585014987892,
"min": 0.00307625111415926,
"max": 0.3406585014987892,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.694347435250003e-06,
"min": 7.694347435250003e-06,
"max": 0.0002948401142199625,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010772086409350005,
"min": 0.00010772086409350005,
"max": 0.003609052596982499,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256475000000001,
"min": 0.10256475000000001,
"max": 0.1982800375,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359065000000002,
"min": 1.4359065000000002,
"max": 2.5691352000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002662185250000001,
"min": 0.0002662185250000001,
"max": 0.00982817574625,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037270593500000017,
"min": 0.0037270593500000017,
"max": 0.12031144825,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0084510687738657,
"min": 0.0084510687738657,
"max": 0.4080495536327362,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1183149665594101,
"min": 0.1183149665594101,
"max": 3.2643964290618896,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 438.51428571428573,
"min": 436.484375,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30696.0,
"min": 16366.0,
"max": 33431.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4471599789602416,
"min": -0.9999000523239374,
"max": 1.4697281058179215,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 101.30119852721691,
"min": -31.996801674365997,
"max": 101.30119852721691,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4471599789602416,
"min": -0.9999000523239374,
"max": 1.4697281058179215,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 101.30119852721691,
"min": -31.996801674365997,
"max": 101.30119852721691,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.038820630348605584,
"min": 0.038820630348605584,
"max": 8.869967715705142,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.717444124402391,
"min": 2.5553170590719674,
"max": 150.78945116698742,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746725387",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746727645"
},
"total": 2258.185667812,
"count": 1,
"self": 0.7877206930002103,
"children": {
"run_training.setup": {
"total": 0.0201007259997823,
"count": 1,
"self": 0.0201007259997823
},
"TrainerController.start_learning": {
"total": 2257.377846393,
"count": 1,
"self": 1.4069917488927786,
"children": {
"TrainerController._reset_env": {
"total": 2.3323682850000296,
"count": 1,
"self": 2.3323682850000296
},
"TrainerController.advance": {
"total": 2253.5213766921065,
"count": 63515,
"self": 1.557367698894268,
"children": {
"env_step": {
"total": 1575.4248510660927,
"count": 63515,
"self": 1415.3999103753295,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.20398924991332,
"count": 63515,
"self": 4.833780697835209,
"children": {
"TorchPolicy.evaluate": {
"total": 154.3702085520781,
"count": 62581,
"self": 154.3702085520781
}
}
},
"workers": {
"total": 0.8209514408499672,
"count": 63515,
"self": 0.0,
"children": {
"worker_root": {
"total": 2252.041455205072,
"count": 63515,
"is_parallel": true,
"self": 955.0309135189596,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019464819997665472,
"count": 1,
"is_parallel": true,
"self": 0.0006318720006674994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013146099990990479,
"count": 8,
"is_parallel": true,
"self": 0.0013146099990990479
}
}
},
"UnityEnvironment.step": {
"total": 0.04828335599995626,
"count": 1,
"is_parallel": true,
"self": 0.0005464880000545236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047194199987643515,
"count": 1,
"is_parallel": true,
"self": 0.00047194199987643515
},
"communicator.exchange": {
"total": 0.04571673100008411,
"count": 1,
"is_parallel": true,
"self": 0.04571673100008411
},
"steps_from_proto": {
"total": 0.0015481949999411881,
"count": 1,
"is_parallel": true,
"self": 0.0003486870000415365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011995079998996516,
"count": 8,
"is_parallel": true,
"self": 0.0011995079998996516
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1297.0105416861124,
"count": 63514,
"is_parallel": true,
"self": 32.87425865816385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.154283554895756,
"count": 63514,
"is_parallel": true,
"self": 24.154283554895756
},
"communicator.exchange": {
"total": 1140.1200869389295,
"count": 63514,
"is_parallel": true,
"self": 1140.1200869389295
},
"steps_from_proto": {
"total": 99.86191253412335,
"count": 63514,
"is_parallel": true,
"self": 20.66197956395581,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.19993297016754,
"count": 508112,
"is_parallel": true,
"self": 79.19993297016754
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 676.5391579271195,
"count": 63515,
"self": 2.6959643292188957,
"children": {
"process_trajectory": {
"total": 132.25129217489211,
"count": 63515,
"self": 132.009942817891,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24134935700112692,
"count": 2,
"self": 0.24134935700112692
}
}
},
"_update_policy": {
"total": 541.5919014230085,
"count": 453,
"self": 300.7717996940578,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.8201017289507,
"count": 22773,
"self": 240.8201017289507
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2119999155402184e-06,
"count": 1,
"self": 1.2119999155402184e-06
},
"TrainerController._save_models": {
"total": 0.11710845500056166,
"count": 1,
"self": 0.0017254379999940284,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11538301700056763,
"count": 1,
"self": 0.11538301700056763
}
}
}
}
}
}
}