ppo-Pyramids / run_logs /timers.json
Nienke5821's picture
first push
1061865 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.285113662481308,
"min": 0.28019753098487854,
"max": 1.3373417854309082,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8553.41015625,
"min": 8320.74609375,
"max": 40569.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29977.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29977.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.742624044418335,
"min": -0.07317552715539932,
"max": 0.7502455711364746,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 217.58883666992188,
"min": -17.708477020263672,
"max": 222.07269287109375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01739008165895939,
"min": 0.00175029959063977,
"max": 0.38777002692222595,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.095293998718262,
"min": 0.5075868964195251,
"max": 93.06480407714844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06926023140355635,
"min": 0.06414183754840795,
"max": 0.07220410677733038,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9696432396497888,
"min": 0.5552216753219285,
"max": 1.0607346540200524,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01638117568689354,
"min": 0.001232118952374212,
"max": 0.017923484782971042,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22933645961650956,
"min": 0.012321189523742119,
"max": 0.26189641419235465,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.532154632171433e-06,
"min": 7.532154632171433e-06,
"max": 0.00029522051409316247,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010545016485040006,
"min": 0.00010545016485040006,
"max": 0.0036337453887515998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251068571428572,
"min": 0.10251068571428572,
"max": 0.1984068375,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351496000000001,
"min": 1.4351496000000001,
"max": 2.6173617,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000260817502857143,
"min": 0.000260817502857143,
"max": 0.00984084306625,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003651445040000002,
"min": 0.003651445040000002,
"max": 0.12114371516,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012743408791720867,
"min": 0.012743408791720867,
"max": 0.6665088534355164,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1784077286720276,
"min": 0.1784077286720276,
"max": 5.332070827484131,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 248.86614173228347,
"min": 247.01612903225808,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31606.0,
"min": 16776.0,
"max": 33431.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7346190318820023,
"min": -1.0000000521540642,
"max": 1.7472543688981157,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 218.56199801713228,
"min": -31.00000161677599,
"max": 218.56199801713228,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7346190318820023,
"min": -1.0000000521540642,
"max": 1.7472543688981157,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 218.56199801713228,
"min": -31.00000161677599,
"max": 218.56199801713228,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0327601416957184,
"min": 0.0327601416957184,
"max": 14.381607474649654,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.127777853660518,
"min": 3.966959278041031,
"max": 244.4873270690441,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756378652",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ../config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756380981"
},
"total": 2328.278660369,
"count": 1,
"self": 0.4762808940004106,
"children": {
"run_training.setup": {
"total": 0.023940635999679216,
"count": 1,
"self": 0.023940635999679216
},
"TrainerController.start_learning": {
"total": 2327.778438839,
"count": 1,
"self": 1.339578540047114,
"children": {
"TrainerController._reset_env": {
"total": 2.0610387349997836,
"count": 1,
"self": 2.0610387349997836
},
"TrainerController.advance": {
"total": 2324.3017410129532,
"count": 64495,
"self": 1.3548746797541753,
"children": {
"env_step": {
"total": 1651.3001822041106,
"count": 64495,
"self": 1501.537429327977,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.98576050510655,
"count": 64495,
"self": 4.51671116405214,
"children": {
"TorchPolicy.evaluate": {
"total": 144.46904934105441,
"count": 62558,
"self": 144.46904934105441
}
}
},
"workers": {
"total": 0.7769923710270632,
"count": 64495,
"self": 0.0,
"children": {
"worker_root": {
"total": 2323.150059353868,
"count": 64495,
"is_parallel": true,
"self": 933.2224880878898,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002264876000026561,
"count": 1,
"is_parallel": true,
"self": 0.0007253339999806485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015395420000459126,
"count": 8,
"is_parallel": true,
"self": 0.0015395420000459126
}
}
},
"UnityEnvironment.step": {
"total": 0.04912001799993959,
"count": 1,
"is_parallel": true,
"self": 0.0005158320000191452,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004982979999113013,
"count": 1,
"is_parallel": true,
"self": 0.0004982979999113013
},
"communicator.exchange": {
"total": 0.04642198200008352,
"count": 1,
"is_parallel": true,
"self": 0.04642198200008352
},
"steps_from_proto": {
"total": 0.0016839059999256278,
"count": 1,
"is_parallel": true,
"self": 0.0003655319997051265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013183740002205013,
"count": 8,
"is_parallel": true,
"self": 0.0013183740002205013
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1389.9275712659783,
"count": 64494,
"is_parallel": true,
"self": 31.355824202882104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.015810393014817,
"count": 64494,
"is_parallel": true,
"self": 23.015810393014817
},
"communicator.exchange": {
"total": 1240.005566531072,
"count": 64494,
"is_parallel": true,
"self": 1240.005566531072
},
"steps_from_proto": {
"total": 95.55037013900937,
"count": 64494,
"is_parallel": true,
"self": 19.16597075935124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.38439937965813,
"count": 515952,
"is_parallel": true,
"self": 76.38439937965813
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 671.6466841290885,
"count": 64495,
"self": 2.6764614840049035,
"children": {
"process_trajectory": {
"total": 128.1584318350774,
"count": 64495,
"self": 127.9594408980779,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19899093699950754,
"count": 2,
"self": 0.19899093699950754
}
}
},
"_update_policy": {
"total": 540.8117908100062,
"count": 457,
"self": 299.36157837494466,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.45021243506153,
"count": 22803,
"self": 241.45021243506153
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.240003808168694e-07,
"count": 1,
"self": 8.240003808168694e-07
},
"TrainerController._save_models": {
"total": 0.0760797269995237,
"count": 1,
"self": 0.0014419099989027018,
"children": {
"RLTrainer._checkpoint": {
"total": 0.074637817000621,
"count": 1,
"self": 0.074637817000621
}
}
}
}
}
}
}