ppo-PyramidsRND / run_logs /timers.json
TxAA's picture
PyramidsRND first try
a82f60e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.46655145287513733,
"min": 0.4543718099594116,
"max": 0.5169422030448914,
"count": 13
},
"Pyramids.Policy.Entropy.sum": {
"value": 13989.0791015625,
"min": 13529.375,
"max": 15318.03125,
"count": 13
},
"Pyramids.Step.mean": {
"value": 1589898.0,
"min": 1229962.0,
"max": 1589898.0,
"count": 13
},
"Pyramids.Step.sum": {
"value": 1589898.0,
"min": 1229962.0,
"max": 1589898.0,
"count": 13
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3393390476703644,
"min": 0.1938980519771576,
"max": 0.3393390476703644,
"count": 13
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 89.24617004394531,
"min": 49.25010681152344,
"max": 89.24617004394531,
"count": 13
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 9.462202072143555,
"min": 8.912569046020508,
"max": 18.159835815429688,
"count": 13
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2488.55908203125,
"min": 2335.093017578125,
"max": 4739.71728515625,
"count": 13
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 489.4375,
"min": 436.26785714285717,
"max": 678.8723404255319,
"count": 13
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31324.0,
"min": 24431.0,
"max": 32105.0,
"count": 13
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2646126727026605,
"min": 0.554914856210668,
"max": 1.4208178309989827,
"count": 13
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 79.67059838026762,
"min": 26.080998241901398,
"max": 79.67059838026762,
"count": 13
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2646126727026605,
"min": 0.554914856210668,
"max": 1.4208178309989827,
"count": 13
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 79.67059838026762,
"min": 26.080998241901398,
"max": 79.67059838026762,
"count": 13
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0364661766160723,
"min": 0.0364661766160723,
"max": 0.07318291784279009,
"count": 13
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.297369126812555,
"min": 2.137254884570666,
"max": 3.439597138611134,
"count": 13
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06579424443415996,
"min": 0.06579424443415996,
"max": 0.07123254773753053,
"count": 13
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9211194220782394,
"min": 0.9211194220782394,
"max": 1.0418033949584544,
"count": 13
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 9.290047738257616,
"min": 9.28028309158459,
"max": 41.81478864327073,
"count": 13
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 130.06066833560664,
"min": 129.92396328218425,
"max": 585.4070410057902,
"count": 13
},
"Pyramids.Policy.LearningRate.mean": {
"value": 4.629741313928572e-06,
"min": 4.629741313928572e-06,
"max": 7.203146884667855e-05,
"count": 13
},
"Pyramids.Policy.LearningRate.sum": {
"value": 6.481637839500001e-05,
"min": 6.481637839500001e-05,
"max": 0.0010084405638534997,
"count": 13
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1015432142857143,
"min": 0.1015432142857143,
"max": 0.1240104642857143,
"count": 13
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4216050000000002,
"min": 1.4216050000000002,
"max": 1.7361465000000003,
"count": 13
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00016416710714285722,
"min": 0.00016416710714285722,
"max": 0.002408645382142857,
"count": 13
},
"Pyramids.Policy.Beta.sum": {
"value": 0.002298339500000001,
"min": 0.002298339500000001,
"max": 0.03372103535,
"count": 13
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00714080547913909,
"min": 0.007051652763038874,
"max": 0.010782291181385517,
"count": 13
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09997127950191498,
"min": 0.09872313588857651,
"max": 0.1509520709514618,
"count": 13
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1750182615",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1750184077"
},
"total": 1461.9023671020004,
"count": 1,
"self": 0.6293885169998248,
"children": {
"run_training.setup": {
"total": 0.02867036400039069,
"count": 1,
"self": 0.02867036400039069
},
"TrainerController.start_learning": {
"total": 1461.2443082210002,
"count": 1,
"self": 1.119093739008349,
"children": {
"TrainerController._reset_env": {
"total": 3.476560967000296,
"count": 1,
"self": 3.476560967000296
},
"TrainerController.advance": {
"total": 1456.5675661149917,
"count": 25604,
"self": 1.1881691179823974,
"children": {
"env_step": {
"total": 1002.010742176064,
"count": 25604,
"self": 929.6181848720043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 71.73163796901736,
"count": 25604,
"self": 3.546071992996076,
"children": {
"TorchPolicy.evaluate": {
"total": 68.18556597602128,
"count": 25050,
"self": 68.18556597602128
}
}
},
"workers": {
"total": 0.6609193350423084,
"count": 25604,
"self": 0.0,
"children": {
"worker_root": {
"total": 1457.1067589910745,
"count": 25604,
"is_parallel": true,
"self": 611.0327728740058,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028622559993891628,
"count": 1,
"is_parallel": true,
"self": 0.0011329169983582688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001729339001030894,
"count": 8,
"is_parallel": true,
"self": 0.001729339001030894
}
}
},
"UnityEnvironment.step": {
"total": 0.16418239399990853,
"count": 1,
"is_parallel": true,
"self": 0.0007876739991843351,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006024300000717631,
"count": 1,
"is_parallel": true,
"self": 0.0006024300000717631
},
"communicator.exchange": {
"total": 0.15549212699988857,
"count": 1,
"is_parallel": true,
"self": 0.15549212699988857
},
"steps_from_proto": {
"total": 0.00730016300076386,
"count": 1,
"is_parallel": true,
"self": 0.005706316000214429,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001593847000549431,
"count": 8,
"is_parallel": true,
"self": 0.001593847000549431
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 846.0739861170687,
"count": 25603,
"is_parallel": true,
"self": 19.921130432023347,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.73654646396335,
"count": 25603,
"is_parallel": true,
"self": 13.73654646396335
},
"communicator.exchange": {
"total": 756.7903139829805,
"count": 25603,
"is_parallel": true,
"self": 756.7903139829805
},
"steps_from_proto": {
"total": 55.625995238101495,
"count": 25603,
"is_parallel": true,
"self": 12.366150976169592,
"children": {
"_process_rank_one_or_two_observation": {
"total": 43.2598442619319,
"count": 204824,
"is_parallel": true,
"self": 43.2598442619319
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 453.36865482094527,
"count": 25604,
"self": 2.35918888696051,
"children": {
"process_trajectory": {
"total": 73.63783944298757,
"count": 25604,
"self": 73.51909879798768,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1187406449998889,
"count": 1,
"self": 0.1187406449998889
}
}
},
"_update_policy": {
"total": 377.3716264909972,
"count": 189,
"self": 149.91948618401057,
"children": {
"TorchPPOOptimizer.update": {
"total": 227.45214030698662,
"count": 9093,
"self": 227.45214030698662
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.810002327663824e-07,
"count": 1,
"self": 8.810002327663824e-07
},
"TrainerController._save_models": {
"total": 0.0810865189996548,
"count": 1,
"self": 0.0025666270003057434,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07851989199934906,
"count": 1,
"self": 0.07851989199934906
}
}
}
}
}
}
}