ppo-Pyramid / run_logs /timers.json
jegeblad's picture
First push
6c8303c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.655445396900177,
"min": 0.655445396900177,
"max": 1.4176071882247925,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19768.232421875,
"min": 19768.232421875,
"max": 43004.53125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989997.0,
"min": 29963.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989997.0,
"min": 29963.0,
"max": 989997.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2514427602291107,
"min": -0.14635920524597168,
"max": 0.2514427602291107,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 64.87223052978516,
"min": -34.833492279052734,
"max": 64.87223052978516,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.035746317356824875,
"min": -0.010690957307815552,
"max": 0.18452799320220947,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.222549438476562,
"min": -2.6299755573272705,
"max": 44.47124481201172,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06714938138945861,
"min": 0.06612086555195311,
"max": 0.07338122287236702,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9400913394524205,
"min": 0.5438926249545616,
"max": 1.0596665246024106,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011030341665044784,
"min": 0.00013982592196504798,
"max": 0.011030341665044784,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.15442478331062698,
"min": 0.0018177369855456238,
"max": 0.15442478331062698,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.467247510949999e-06,
"min": 7.467247510949999e-06,
"max": 0.00029530522656492496,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010454146515329999,
"min": 0.00010454146515329999,
"max": 0.003757882347372599,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248905,
"min": 0.10248905,
"max": 0.198435075,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348467,
"min": 1.4348467,
"max": 2.6526273999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000258656095,
"min": 0.000258656095,
"max": 0.009843663992499998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036211853300000003,
"min": 0.0036211853300000003,
"max": 0.12527747726,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010004131123423576,
"min": 0.010004131123423576,
"max": 0.33446940779685974,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14005783200263977,
"min": 0.14005783200263977,
"max": 2.675755262374878,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 555.2830188679245,
"min": 555.2830188679245,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29430.0,
"min": 16634.0,
"max": 32649.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.067233936263705,
"min": -0.9997226312275855,
"max": 1.067233936263705,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 56.563398621976376,
"min": -30.991401568055153,
"max": 56.563398621976376,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.067233936263705,
"min": -0.9997226312275855,
"max": 1.067233936263705,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 56.563398621976376,
"min": -30.991401568055153,
"max": 56.563398621976376,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05873983116564561,
"min": 0.05873983116564561,
"max": 5.799143501940896,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.113211051779217,
"min": 3.113211051779217,
"max": 98.58543953299522,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748088067",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748090171"
},
"total": 2103.8608982200003,
"count": 1,
"self": 0.5365540239999973,
"children": {
"run_training.setup": {
"total": 0.02037519599980442,
"count": 1,
"self": 0.02037519599980442
},
"TrainerController.start_learning": {
"total": 2103.3039690000005,
"count": 1,
"self": 1.2213424582150765,
"children": {
"TrainerController._reset_env": {
"total": 2.9237130430001343,
"count": 1,
"self": 2.9237130430001343
},
"TrainerController.advance": {
"total": 2099.0692066687843,
"count": 63334,
"self": 1.370157490753627,
"children": {
"env_step": {
"total": 1423.9974822810318,
"count": 63334,
"self": 1281.0893795877955,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.19684980215243,
"count": 63334,
"self": 4.423648956115358,
"children": {
"TorchPolicy.evaluate": {
"total": 137.77320084603707,
"count": 62562,
"self": 137.77320084603707
}
}
},
"workers": {
"total": 0.7112528910838591,
"count": 63334,
"self": 0.0,
"children": {
"worker_root": {
"total": 2098.6614896981146,
"count": 63334,
"is_parallel": true,
"self": 924.087690531188,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003341496999382798,
"count": 1,
"is_parallel": true,
"self": 0.0007734750006420654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025680219987407327,
"count": 8,
"is_parallel": true,
"self": 0.0025680219987407327
}
}
},
"UnityEnvironment.step": {
"total": 0.0829476369999611,
"count": 1,
"is_parallel": true,
"self": 0.0005766739996033721,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000451656999757688,
"count": 1,
"is_parallel": true,
"self": 0.000451656999757688
},
"communicator.exchange": {
"total": 0.08032644800005073,
"count": 1,
"is_parallel": true,
"self": 0.08032644800005073
},
"steps_from_proto": {
"total": 0.0015928580005493131,
"count": 1,
"is_parallel": true,
"self": 0.0003439269994487404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012489310011005728,
"count": 8,
"is_parallel": true,
"self": 0.0012489310011005728
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1174.5737991669266,
"count": 63333,
"is_parallel": true,
"self": 31.573477332977745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.961314369964384,
"count": 63333,
"is_parallel": true,
"self": 22.961314369964384
},
"communicator.exchange": {
"total": 1026.6548601169798,
"count": 63333,
"is_parallel": true,
"self": 1026.6548601169798
},
"steps_from_proto": {
"total": 93.38414734700473,
"count": 63333,
"is_parallel": true,
"self": 18.217979260664833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.1661680863399,
"count": 506664,
"is_parallel": true,
"self": 75.1661680863399
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 673.7015668969989,
"count": 63334,
"self": 2.485136288083595,
"children": {
"process_trajectory": {
"total": 127.06155294191012,
"count": 63334,
"self": 126.86573518590922,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19581775600090623,
"count": 2,
"self": 0.19581775600090623
}
}
},
"_update_policy": {
"total": 544.1548776670052,
"count": 457,
"self": 303.26414153099813,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.89073613600704,
"count": 22734,
"self": 240.89073613600704
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.280003385152668e-07,
"count": 1,
"self": 8.280003385152668e-07
},
"TrainerController._save_models": {
"total": 0.08970600200063927,
"count": 1,
"self": 0.0015405420008391957,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08816545999980008,
"count": 1,
"self": 0.08816545999980008
}
}
}
}
}
}
}