ppo-Pyramids / run_logs /timers.json
mriusero's picture
Initial commit
84f2182 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4383748471736908,
"min": 0.4383748471736908,
"max": 1.5374524593353271,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13193.3291015625,
"min": 13193.3291015625,
"max": 46640.15625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989936.0,
"min": 29952.0,
"max": 989936.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989936.0,
"min": 29952.0,
"max": 989936.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5079846978187561,
"min": -0.22263436019420624,
"max": 0.5079846978187561,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 140.2037811279297,
"min": -52.76434326171875,
"max": 140.2037811279297,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015380463562905788,
"min": -0.0013826994691044092,
"max": 0.4471650719642639,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.2450079917907715,
"min": -0.33737868070602417,
"max": 105.97811889648438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06767598536163051,
"min": 0.06614082330828643,
"max": 0.0750729314328585,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0151397804244577,
"min": 0.5082534573853134,
"max": 1.0510210400600188,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01583725335205802,
"min": 0.000370919254713319,
"max": 0.01583725335205802,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23755880028087029,
"min": 0.004821950311273147,
"max": 0.23755880028087029,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.510277496606667e-06,
"min": 7.510277496606667e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001126541624491,
"min": 0.0001126541624491,
"max": 0.0032528627157125,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250339333333332,
"min": 0.10250339333333332,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375508999999998,
"min": 1.3886848,
"max": 2.4019303,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026008899400000005,
"min": 0.00026008899400000005,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039013349100000005,
"min": 0.0039013349100000005,
"max": 0.10845032125,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008669668808579445,
"min": 0.008669668808579445,
"max": 0.2906513214111328,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13004502654075623,
"min": 0.12576048076152802,
"max": 2.0345592498779297,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 365.94117647058823,
"min": 365.94117647058823,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31105.0,
"min": 15984.0,
"max": 32724.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5199325370580652,
"min": -1.0000000521540642,
"max": 1.5303839832544326,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 130.7141981869936,
"min": -30.999201610684395,
"max": 130.7141981869936,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5199325370580652,
"min": -1.0000000521540642,
"max": 1.5303839832544326,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 130.7141981869936,
"min": -30.999201610684395,
"max": 130.7141981869936,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03288487576457217,
"min": 0.03288487576457217,
"max": 5.715780191123486,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8280993157532066,
"min": 2.705640456362744,
"max": 91.45248305797577,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743006412",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743008095"
},
"total": 1682.4043333689997,
"count": 1,
"self": 0.3308606430000509,
"children": {
"run_training.setup": {
"total": 0.022087654999722872,
"count": 1,
"self": 0.022087654999722872
},
"TrainerController.start_learning": {
"total": 1682.051385071,
"count": 1,
"self": 1.0957553690504938,
"children": {
"TrainerController._reset_env": {
"total": 2.2655211070000405,
"count": 1,
"self": 2.2655211070000405
},
"TrainerController.advance": {
"total": 1678.6008556439483,
"count": 63587,
"self": 1.1251304790248469,
"children": {
"env_step": {
"total": 1094.0155611188661,
"count": 63587,
"self": 960.9409637176518,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.39931852602876,
"count": 63587,
"self": 4.0109017480922375,
"children": {
"TorchPolicy.evaluate": {
"total": 128.38841677793653,
"count": 62549,
"self": 128.38841677793653
}
}
},
"workers": {
"total": 0.6752788751855405,
"count": 63587,
"self": 0.0,
"children": {
"worker_root": {
"total": 1679.970926014007,
"count": 63587,
"is_parallel": true,
"self": 805.1095039979891,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002073827000003803,
"count": 1,
"is_parallel": true,
"self": 0.0006601839991162706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014136430008875323,
"count": 8,
"is_parallel": true,
"self": 0.0014136430008875323
}
}
},
"UnityEnvironment.step": {
"total": 0.034926434000226436,
"count": 1,
"is_parallel": true,
"self": 0.0003410270005588245,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00026866500002142857,
"count": 1,
"is_parallel": true,
"self": 0.00026866500002142857
},
"communicator.exchange": {
"total": 0.033327857999665866,
"count": 1,
"is_parallel": true,
"self": 0.033327857999665866
},
"steps_from_proto": {
"total": 0.0009888839999803167,
"count": 1,
"is_parallel": true,
"self": 0.00025690799975564005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007319760002246767,
"count": 8,
"is_parallel": true,
"self": 0.0007319760002246767
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 874.861422016018,
"count": 63586,
"is_parallel": true,
"self": 22.39549254911799,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.804825727955176,
"count": 63586,
"is_parallel": true,
"self": 15.804825727955176
},
"communicator.exchange": {
"total": 767.4515109929325,
"count": 63586,
"is_parallel": true,
"self": 767.4515109929325
},
"steps_from_proto": {
"total": 69.20959274601228,
"count": 63586,
"is_parallel": true,
"self": 14.190171379028016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.01942136698426,
"count": 508688,
"is_parallel": true,
"self": 55.01942136698426
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 583.4601640460573,
"count": 63587,
"self": 2.069307583149566,
"children": {
"process_trajectory": {
"total": 109.05045299190988,
"count": 63587,
"self": 108.84455155191017,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20590143999970678,
"count": 2,
"self": 0.20590143999970678
}
}
},
"_update_policy": {
"total": 472.3404034709979,
"count": 443,
"self": 255.27660092694668,
"children": {
"TorchPPOOptimizer.update": {
"total": 217.0638025440512,
"count": 22824,
"self": 217.0638025440512
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.80000698089134e-07,
"count": 1,
"self": 8.80000698089134e-07
},
"TrainerController._save_models": {
"total": 0.08925207100037369,
"count": 1,
"self": 0.0014386470002136775,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08781342400016001,
"count": 1,
"self": 0.08781342400016001
}
}
}
}
}
}
}