pyramids-ppo / run_logs /timers.json
mihofer's picture
first
06bc2b1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21545448899269104,
"min": 0.1969103068113327,
"max": 0.21545448899269104,
"count": 4
},
"Pyramids.Policy.Entropy.sum": {
"value": 6408.478515625,
"min": 2177.090576171875,
"max": 6408.478515625,
"count": 4
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 225.359375,
"min": 225.359375,
"max": 257.05172413793105,
"count": 4
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28846.0,
"min": 7918.0,
"max": 30203.0,
"count": 4
},
"Pyramids.Step.mean": {
"value": 2099900.0,
"min": 2009900.0,
"max": 2099900.0,
"count": 4
},
"Pyramids.Step.sum": {
"value": 2099900.0,
"min": 2009900.0,
"max": 2099900.0,
"count": 4
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7720959782600403,
"min": 0.7012885808944702,
"max": 0.7720959782600403,
"count": 4
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 230.0845947265625,
"min": 65.98175811767578,
"max": 230.0845947265625,
"count": 4
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003978192340582609,
"min": -0.0076828706078231335,
"max": 0.017121044918894768,
"count": 4
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.1855013370513916,
"min": -0.7221898436546326,
"max": 4.999345302581787,
"count": 4
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.760317814384782,
"min": 1.7155666507780551,
"max": 1.760317814384782,
"count": 4
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 227.08099805563688,
"min": 58.081999987363815,
"max": 227.08099805563688,
"count": 4
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.760317814384782,
"min": 1.7155666507780551,
"max": 1.760317814384782,
"count": 4
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 227.08099805563688,
"min": 58.081999987363815,
"max": 227.08099805563688,
"count": 4
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.024505979789223215,
"min": 0.024505979789223215,
"max": 0.02954132916056551,
"count": 4
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1612713928097946,
"min": 0.9748638622986618,
"max": 3.3699953574396204,
"count": 4
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07079494727492733,
"min": 0.06862178926563096,
"max": 0.07079494727492733,
"count": 4
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9911292618489825,
"min": 0.2801392069280458,
"max": 1.0293268389844645,
"count": 4
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017137390891938206,
"min": 0.013357302560810544,
"max": 0.017137390891938206,
"count": 4
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2399234724871349,
"min": 0.053429210243242174,
"max": 0.2399234724871349,
"count": 4
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0387523816734685e-06,
"min": 2.0387523816734685e-06,
"max": 1.3541988343178574e-05,
"count": 4
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.8542533343428557e-05,
"min": 2.8542533343428557e-05,
"max": 0.00015146492094071423,
"count": 4
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10067955102040818,
"min": 0.10067955102040818,
"max": 0.1045139642857143,
"count": 4
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4095137142857146,
"min": 0.4180558571428572,
"max": 1.5321639999999999,
"count": 4
},
"Pyramids.Policy.Beta.mean": {
"value": 7.788714693877549e-05,
"min": 7.788714693877549e-05,
"max": 0.0004609450321428573,
"count": 4
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0010904200571428568,
"min": 0.0010904200571428568,
"max": 0.005183736928571427,
"count": 4
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010521610267460346,
"min": 0.010521610267460346,
"max": 0.011539807543158531,
"count": 4
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1473025381565094,
"min": 0.046159230172634125,
"max": 0.16239240765571594,
"count": 4
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726394389",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726394770"
},
"total": 380.67846820200066,
"count": 1,
"self": 0.5830978819994925,
"children": {
"run_training.setup": {
"total": 0.06438857400098641,
"count": 1,
"self": 0.06438857400098641
},
"TrainerController.start_learning": {
"total": 380.0309817460002,
"count": 1,
"self": 0.2517459590399085,
"children": {
"TrainerController._reset_env": {
"total": 2.010494397000002,
"count": 1,
"self": 2.010494397000002
},
"TrainerController.advance": {
"total": 377.66390660696015,
"count": 6604,
"self": 0.2699393370312464,
"children": {
"env_step": {
"total": 269.1196077229979,
"count": 6604,
"self": 252.33229802090318,
"children": {
"SubprocessEnvManager._take_step": {
"total": 16.63636370207496,
"count": 6604,
"self": 0.6823030201030633,
"children": {
"TorchPolicy.evaluate": {
"total": 15.954060681971896,
"count": 6294,
"self": 15.954060681971896
}
}
},
"workers": {
"total": 0.15094600001975778,
"count": 6604,
"self": 0.0,
"children": {
"worker_root": {
"total": 379.17492600895093,
"count": 6604,
"is_parallel": true,
"self": 146.1772384590131,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023413670005538734,
"count": 1,
"is_parallel": true,
"self": 0.0007692789986322168,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015720880019216565,
"count": 8,
"is_parallel": true,
"self": 0.0015720880019216565
}
}
},
"UnityEnvironment.step": {
"total": 0.06171483600155625,
"count": 1,
"is_parallel": true,
"self": 0.0007473580008081626,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000539325001227553,
"count": 1,
"is_parallel": true,
"self": 0.000539325001227553
},
"communicator.exchange": {
"total": 0.058162569999694824,
"count": 1,
"is_parallel": true,
"self": 0.058162569999694824
},
"steps_from_proto": {
"total": 0.0022655829998257104,
"count": 1,
"is_parallel": true,
"self": 0.00045411900464387145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001811463995181839,
"count": 8,
"is_parallel": true,
"self": 0.001811463995181839
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 232.99768754993784,
"count": 6603,
"is_parallel": true,
"self": 4.91209454291311,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.0470732619778573,
"count": 6603,
"is_parallel": true,
"self": 3.0470732619778573
},
"communicator.exchange": {
"total": 211.42684691702016,
"count": 6603,
"is_parallel": true,
"self": 211.42684691702016
},
"steps_from_proto": {
"total": 13.611672828026713,
"count": 6603,
"is_parallel": true,
"self": 2.9652305290510412,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10.646442298975671,
"count": 52824,
"is_parallel": true,
"self": 10.646442298975671
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 108.27435954693101,
"count": 6604,
"self": 0.4769260489647422,
"children": {
"process_trajectory": {
"total": 17.824004556974614,
"count": 6604,
"self": 17.824004556974614
},
"_update_policy": {
"total": 89.97342894099165,
"count": 47,
"self": 36.01618036099717,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.957248579994484,
"count": 2256,
"self": 53.957248579994484
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0769999789772555e-06,
"count": 1,
"self": 1.0769999789772555e-06
},
"TrainerController._save_models": {
"total": 0.10483370600013586,
"count": 1,
"self": 0.002437962999465526,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10239574300067034,
"count": 1,
"self": 0.10239574300067034
}
}
}
}
}
}
}