ppo-Pyramids / run_logs /timers.json
lzacchini's picture
First commit of pyramids agent
b6de43d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.36424240469932556,
"min": 0.3634914755821228,
"max": 1.437780499458313,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10903.9609375,
"min": 10864.033203125,
"max": 43616.5078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.42119160294532776,
"min": -0.1006065085530281,
"max": 0.5419298410415649,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 112.87934875488281,
"min": -24.4473819732666,
"max": 148.48876953125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.021614430472254753,
"min": 0.003182812128216028,
"max": 0.34326279163360596,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.792667388916016,
"min": 0.8307139873504639,
"max": 81.35327911376953,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06824893290675911,
"min": 0.06488914567974255,
"max": 0.07420356765100125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9554850606946275,
"min": 0.5140171498401445,
"max": 1.083827710603752,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01361347139304382,
"min": 0.0011377399571849821,
"max": 0.01583745261676758,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19058859950261348,
"min": 0.008333442692036307,
"max": 0.22172433663474614,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.509268925514284e-06,
"min": 7.509268925514284e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010512976495719998,
"min": 0.00010512976495719998,
"max": 0.003490238236587299,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250305714285715,
"min": 0.10250305714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350428000000002,
"min": 1.3691136000000002,
"max": 2.5273622999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002600554085714285,
"min": 0.0002600554085714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036407757199999995,
"min": 0.0036407757199999995,
"max": 0.11635492873,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014448952861130238,
"min": 0.014448952861130238,
"max": 0.5017713308334351,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20228533446788788,
"min": 0.20228533446788788,
"max": 3.512399196624756,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 449.953125,
"min": 355.0352941176471,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28797.0,
"min": 15984.0,
"max": 33984.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.366606129362033,
"min": -1.0000000521540642,
"max": 1.5978988049661411,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 88.82939840853214,
"min": -32.000001668930054,
"max": 135.821398422122,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.366606129362033,
"min": -1.0000000521540642,
"max": 1.5978988049661411,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 88.82939840853214,
"min": -32.000001668930054,
"max": 135.821398422122,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06704652177241559,
"min": 0.05639902749578195,
"max": 10.183888606727123,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.3580239152070135,
"min": 4.3580239152070135,
"max": 162.94221770763397,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714570670",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714572852"
},
"total": 2182.457483486,
"count": 1,
"self": 0.5264585850000003,
"children": {
"run_training.setup": {
"total": 0.0824290089999522,
"count": 1,
"self": 0.0824290089999522
},
"TrainerController.start_learning": {
"total": 2181.848595892,
"count": 1,
"self": 1.4722383540015471,
"children": {
"TrainerController._reset_env": {
"total": 2.8434281009999722,
"count": 1,
"self": 2.8434281009999722
},
"TrainerController.advance": {
"total": 2177.4435083869985,
"count": 63687,
"self": 1.5559316448807294,
"children": {
"env_step": {
"total": 1557.0082382080118,
"count": 63687,
"self": 1420.8476999979666,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.2553475830441,
"count": 63687,
"self": 4.82105018203265,
"children": {
"TorchPolicy.evaluate": {
"total": 130.43429740101146,
"count": 62551,
"self": 130.43429740101146
}
}
},
"workers": {
"total": 0.9051906270010477,
"count": 63687,
"self": 0.0,
"children": {
"worker_root": {
"total": 2176.4886837319773,
"count": 63687,
"is_parallel": true,
"self": 882.1648216628832,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0031218469998748333,
"count": 1,
"is_parallel": true,
"self": 0.0008277280003312626,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022941189995435707,
"count": 8,
"is_parallel": true,
"self": 0.0022941189995435707
}
}
},
"UnityEnvironment.step": {
"total": 0.05646011799990447,
"count": 1,
"is_parallel": true,
"self": 0.0006773219997739943,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005608550000033574,
"count": 1,
"is_parallel": true,
"self": 0.0005608550000033574
},
"communicator.exchange": {
"total": 0.053333191000092484,
"count": 1,
"is_parallel": true,
"self": 0.053333191000092484
},
"steps_from_proto": {
"total": 0.0018887500000346336,
"count": 1,
"is_parallel": true,
"self": 0.00043748900043283356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014512609996018,
"count": 8,
"is_parallel": true,
"self": 0.0014512609996018
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1294.323862069094,
"count": 63686,
"is_parallel": true,
"self": 35.03410613913138,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.697665524041895,
"count": 63686,
"is_parallel": true,
"self": 24.697665524041895
},
"communicator.exchange": {
"total": 1130.8548296899926,
"count": 63686,
"is_parallel": true,
"self": 1130.8548296899926
},
"steps_from_proto": {
"total": 103.73726071592819,
"count": 63686,
"is_parallel": true,
"self": 21.17665471689088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.5606059990373,
"count": 509488,
"is_parallel": true,
"self": 82.5606059990373
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 618.879338534106,
"count": 63687,
"self": 2.754453555162854,
"children": {
"process_trajectory": {
"total": 130.40506966494422,
"count": 63687,
"self": 130.21081508594443,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19425457899978937,
"count": 2,
"self": 0.19425457899978937
}
}
},
"_update_policy": {
"total": 485.7198153139989,
"count": 450,
"self": 284.9946631780622,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.7251521359367,
"count": 22812,
"self": 200.7251521359367
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0079993444378488e-06,
"count": 1,
"self": 1.0079993444378488e-06
},
"TrainerController._save_models": {
"total": 0.08942004200071096,
"count": 1,
"self": 0.0014415350005947403,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08797850700011622,
"count": 1,
"self": 0.08797850700011622
}
}
}
}
}
}
}