ppo-Pyramide / run_logs /timers.json
Awer1307's picture
First Push
5a9b102 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4244535565376282,
"min": 0.42294439673423767,
"max": 1.4268078804016113,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12801.51953125,
"min": 12695.0986328125,
"max": 43283.64453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29952.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29952.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6406899690628052,
"min": -0.10473007708787918,
"max": 0.6406899690628052,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 185.15939331054688,
"min": -25.239948272705078,
"max": 185.15939331054688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.004117829725146294,
"min": -0.004117829725146294,
"max": 0.2629777193069458,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.1900527477264404,
"min": -1.1900527477264404,
"max": 63.64060974121094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06812349290475159,
"min": 0.0635300489248599,
"max": 0.07394954918615908,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9537289006665222,
"min": 0.5119657118626785,
"max": 1.1092432377923862,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01707030946382482,
"min": 0.00045798211453801645,
"max": 0.01707030946382482,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23898433249354747,
"min": 0.005953767488994214,
"max": 0.23898433249354747,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.461761798492859e-06,
"min": 7.461761798492859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010446466517890002,
"min": 0.00010446466517890002,
"max": 0.0037585870471376994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248722142857145,
"min": 0.10248722142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348211000000002,
"min": 1.3886848,
"max": 2.6528623,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002584734207142858,
"min": 0.0002584734207142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003618627890000001,
"min": 0.003618627890000001,
"max": 0.12530094377,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008988547138869762,
"min": 0.008988547138869762,
"max": 0.46923691034317017,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12583966553211212,
"min": 0.12583966553211212,
"max": 3.284658432006836,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 274.6857142857143,
"min": 274.6857142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28842.0,
"min": 15984.0,
"max": 33948.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7259523696842647,
"min": -1.0000000521540642,
"max": 1.7259523696842647,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 181.2249988168478,
"min": -28.767001517117023,
"max": 181.2249988168478,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7259523696842647,
"min": -1.0000000521540642,
"max": 1.7259523696842647,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 181.2249988168478,
"min": -28.767001517117023,
"max": 181.2249988168478,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02522160971005048,
"min": 0.02522160971005048,
"max": 9.687635842710733,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6482690195553005,
"min": 2.6482690195553005,
"max": 155.00217348337173,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733566941",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1733569149"
},
"total": 2208.0239664769997,
"count": 1,
"self": 0.4898817559997042,
"children": {
"run_training.setup": {
"total": 0.054378752000047825,
"count": 1,
"self": 0.054378752000047825
},
"TrainerController.start_learning": {
"total": 2207.479705969,
"count": 1,
"self": 1.308461412977067,
"children": {
"TrainerController._reset_env": {
"total": 2.258942081999976,
"count": 1,
"self": 2.258942081999976
},
"TrainerController.advance": {
"total": 2203.823205378023,
"count": 64159,
"self": 1.375892323086191,
"children": {
"env_step": {
"total": 1515.3065768289348,
"count": 64159,
"self": 1371.8963863879121,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.62680482900646,
"count": 64159,
"self": 4.471776400049748,
"children": {
"TorchPolicy.evaluate": {
"total": 138.1550284289567,
"count": 62565,
"self": 138.1550284289567
}
}
},
"workers": {
"total": 0.7833856120162181,
"count": 64159,
"self": 0.0,
"children": {
"worker_root": {
"total": 2202.6516069509753,
"count": 64159,
"is_parallel": true,
"self": 944.7722812009272,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002068687000019054,
"count": 1,
"is_parallel": true,
"self": 0.0006445750002512796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014241119997677743,
"count": 8,
"is_parallel": true,
"self": 0.0014241119997677743
}
}
},
"UnityEnvironment.step": {
"total": 0.048407217999965724,
"count": 1,
"is_parallel": true,
"self": 0.0006296639999163745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005142730001352902,
"count": 1,
"is_parallel": true,
"self": 0.0005142730001352902
},
"communicator.exchange": {
"total": 0.04568080600006397,
"count": 1,
"is_parallel": true,
"self": 0.04568080600006397
},
"steps_from_proto": {
"total": 0.001582474999850092,
"count": 1,
"is_parallel": true,
"self": 0.0003315469998597109,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001250927999990381,
"count": 8,
"is_parallel": true,
"self": 0.001250927999990381
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1257.879325750048,
"count": 64158,
"is_parallel": true,
"self": 32.200059572999635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.5322458620019,
"count": 64158,
"is_parallel": true,
"self": 22.5322458620019
},
"communicator.exchange": {
"total": 1109.1566616949722,
"count": 64158,
"is_parallel": true,
"self": 1109.1566616949722
},
"steps_from_proto": {
"total": 93.9903586200744,
"count": 64158,
"is_parallel": true,
"self": 18.44389080215774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.54646781791666,
"count": 513264,
"is_parallel": true,
"self": 75.54646781791666
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 687.1407362260022,
"count": 64159,
"self": 2.486621429931347,
"children": {
"process_trajectory": {
"total": 133.81213751606788,
"count": 64159,
"self": 133.5776646530678,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23447286300006454,
"count": 2,
"self": 0.23447286300006454
}
}
},
"_update_policy": {
"total": 550.841977280003,
"count": 457,
"self": 308.6473247049812,
"children": {
"TorchPPOOptimizer.update": {
"total": 242.1946525750218,
"count": 22794,
"self": 242.1946525750218
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.93999947240809e-07,
"count": 1,
"self": 9.93999947240809e-07
},
"TrainerController._save_models": {
"total": 0.08909610199998497,
"count": 1,
"self": 0.002040319000116142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08705578299986882,
"count": 1,
"self": 0.08705578299986882
}
}
}
}
}
}
}