ppo-Pyramids / run_logs /timers.json
hiboujoyeux's picture
First Push
5d7dca0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3604281544685364,
"min": 0.35506731271743774,
"max": 1.3652037382125854,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10760.943359375,
"min": 10690.6689453125,
"max": 41414.8203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6344459652900696,
"min": -0.08143594861030579,
"max": 0.683842658996582,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 182.08599853515625,
"min": -19.62606430053711,
"max": 197.9730682373047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006639602594077587,
"min": -0.00932199228554964,
"max": 0.5745494365692139,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.9055659770965576,
"min": -2.7033777236938477,
"max": 136.168212890625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06701613959578229,
"min": 0.06477685024116758,
"max": 0.07397381106940364,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0052420939367344,
"min": 0.5044700328779845,
"max": 1.1016671022539501,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01677165952601677,
"min": 0.000265572416849074,
"max": 0.01677165952601677,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25157489289025153,
"min": 0.0034524414190379616,
"max": 0.25157489289025153,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.545497484866665e-06,
"min": 7.545497484866665e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011318246227299998,
"min": 0.00011318246227299998,
"max": 0.003508375130541699,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251513333333336,
"min": 0.10251513333333336,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377270000000003,
"min": 1.3886848,
"max": 2.5724949000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026126181999999997,
"min": 0.00026126181999999997,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039189273,
"min": 0.0039189273,
"max": 0.11696888417,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011511927470564842,
"min": 0.011511927470564842,
"max": 0.5825320482254028,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17267891764640808,
"min": 0.1636643409729004,
"max": 4.077724456787109,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.2718446601942,
"min": 284.48571428571427,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30310.0,
"min": 15984.0,
"max": 33552.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.69250290182609,
"min": -1.0000000521540642,
"max": 1.69250290182609,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 174.32779888808727,
"min": -30.412801668047905,
"max": 176.8181977123022,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.69250290182609,
"min": -1.0000000521540642,
"max": 1.69250290182609,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 174.32779888808727,
"min": -30.412801668047905,
"max": 176.8181977123022,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03463746619010494,
"min": 0.034246506050389834,
"max": 12.726239141076803,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5676590175808087,
"min": 3.5676590175808087,
"max": 203.61982625722885,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1772201830",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1772204363"
},
"total": 2533.417895585,
"count": 1,
"self": 0.4765445760003786,
"children": {
"run_training.setup": {
"total": 0.02320981100001518,
"count": 1,
"self": 0.02320981100001518
},
"TrainerController.start_learning": {
"total": 2532.918141198,
"count": 1,
"self": 1.6995406228716092,
"children": {
"TrainerController._reset_env": {
"total": 2.2266405200002737,
"count": 1,
"self": 2.2266405200002737
},
"TrainerController.advance": {
"total": 2528.9064843501287,
"count": 64180,
"self": 1.7568639410874312,
"children": {
"env_step": {
"total": 1832.0136413650039,
"count": 64180,
"self": 1658.006302359966,
"children": {
"SubprocessEnvManager._take_step": {
"total": 173.0047364910556,
"count": 64180,
"self": 5.144711034152806,
"children": {
"TorchPolicy.evaluate": {
"total": 167.8600254569028,
"count": 62552,
"self": 167.8600254569028
}
}
},
"workers": {
"total": 1.0026025139823105,
"count": 64180,
"self": 0.0,
"children": {
"worker_root": {
"total": 2525.526496044095,
"count": 64180,
"is_parallel": true,
"self": 1000.0451453080323,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017015050002555654,
"count": 1,
"is_parallel": true,
"self": 0.0005535790005524177,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011479259997031477,
"count": 8,
"is_parallel": true,
"self": 0.0011479259997031477
}
}
},
"UnityEnvironment.step": {
"total": 0.047545370999614534,
"count": 1,
"is_parallel": true,
"self": 0.0005856800003130047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045096799976818147,
"count": 1,
"is_parallel": true,
"self": 0.00045096799976818147
},
"communicator.exchange": {
"total": 0.04487906899976224,
"count": 1,
"is_parallel": true,
"self": 0.04487906899976224
},
"steps_from_proto": {
"total": 0.001629653999771108,
"count": 1,
"is_parallel": true,
"self": 0.0003286799997113121,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013009740000597958,
"count": 8,
"is_parallel": true,
"self": 0.0013009740000597958
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1525.4813507360627,
"count": 64179,
"is_parallel": true,
"self": 36.97438997812105,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.329459816044164,
"count": 64179,
"is_parallel": true,
"self": 24.329459816044164
},
"communicator.exchange": {
"total": 1349.7770538788764,
"count": 64179,
"is_parallel": true,
"self": 1349.7770538788764
},
"steps_from_proto": {
"total": 114.40044706302115,
"count": 64179,
"is_parallel": true,
"self": 24.65027404372404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.75017301929711,
"count": 513432,
"is_parallel": true,
"self": 89.75017301929711
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 695.1359790440374,
"count": 64180,
"self": 3.3073379071229283,
"children": {
"process_trajectory": {
"total": 131.23861532890896,
"count": 64180,
"self": 131.05917522790878,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17944010100018204,
"count": 2,
"self": 0.17944010100018204
}
}
},
"_update_policy": {
"total": 560.5900258080055,
"count": 452,
"self": 308.3852236099806,
"children": {
"TorchPPOOptimizer.update": {
"total": 252.20480219802494,
"count": 22767,
"self": 252.20480219802494
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0059993655886501e-06,
"count": 1,
"self": 1.0059993655886501e-06
},
"TrainerController._save_models": {
"total": 0.08547469899986027,
"count": 1,
"self": 0.0010729289997470914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08440177000011317,
"count": 1,
"self": 0.08440177000011317
}
}
}
}
}
}
}