Pyramid1 / run_logs /timers.json
annguyen2004's picture
First Push
7135164 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.11878236383199692,
"min": 0.10809889435768127,
"max": 1.2392959594726562,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 3557.769287109375,
"min": 3248.155517578125,
"max": 37595.28125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0937093123793602,
"min": -0.13980858027935028,
"max": -0.03965657576918602,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -22.490234375,
"min": -33.1346321105957,
"max": -9.517578125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.29258492588996887,
"min": 0.2259417027235031,
"max": 0.48964154720306396,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 70.22038269042969,
"min": 54.45195007324219,
"max": 117.51396942138672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0701524821032039,
"min": 0.06535336099897435,
"max": 0.07161272738445458,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9821347494448547,
"min": 0.5008668755187824,
"max": 0.9821347494448547,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0005013788562184888,
"min": 0.000373782523542247,
"max": 0.008696133524944761,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.007019303987058843,
"min": 0.004859172806049211,
"max": 0.06087293467461333,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.347826122185713e-06,
"min": 7.347826122185713e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010286956571059998,
"min": 0.00010286956571059998,
"max": 0.0031375841541387,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244924285714285,
"min": 0.10244924285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342894,
"min": 1.3691136000000002,
"max": 2.3458613,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025467936142857143,
"min": 0.00025467936142857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035655110599999997,
"min": 0.0035655110599999997,
"max": 0.10461154386999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.2888457477092743,
"min": 0.179472878575325,
"max": 0.5697922110557556,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 4.043840408325195,
"min": 2.153674602508545,
"max": 4.191043376922607,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 928.0,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27972.0,
"min": 15984.0,
"max": 32726.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9996500518172979,
"min": -1.0000000521540642,
"max": -0.6978154314252046,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -27.990201450884342,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9996500518172979,
"min": -1.0000000521540642,
"max": -0.6978154314252046,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -27.990201450884342,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 2.846828587885414,
"min": 1.9769958045799285,
"max": 10.185381253249943,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 79.71120046079159,
"min": 56.77791683236137,
"max": 162.9661000519991,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739006243",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739008490"
},
"total": 2246.997819224,
"count": 1,
"self": 0.5393444309997903,
"children": {
"run_training.setup": {
"total": 0.024433104999843636,
"count": 1,
"self": 0.024433104999843636
},
"TrainerController.start_learning": {
"total": 2246.4340416880004,
"count": 1,
"self": 1.5748731400240104,
"children": {
"TrainerController._reset_env": {
"total": 3.960077573000035,
"count": 1,
"self": 3.960077573000035
},
"TrainerController.advance": {
"total": 2240.8027684699764,
"count": 62994,
"self": 1.6008817979759442,
"children": {
"env_step": {
"total": 1513.275015226022,
"count": 62994,
"self": 1335.5422932179872,
"children": {
"SubprocessEnvManager._take_step": {
"total": 176.80688636500167,
"count": 62994,
"self": 5.35005148505752,
"children": {
"TorchPolicy.evaluate": {
"total": 171.45683487994415,
"count": 62540,
"self": 171.45683487994415
}
}
},
"workers": {
"total": 0.9258356430332242,
"count": 62994,
"self": 0.0,
"children": {
"worker_root": {
"total": 2240.684774630029,
"count": 62994,
"is_parallel": true,
"self": 1032.195184332002,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0068483129998639924,
"count": 1,
"is_parallel": true,
"self": 0.004990184000007503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018581289998564898,
"count": 8,
"is_parallel": true,
"self": 0.0018581289998564898
}
}
},
"UnityEnvironment.step": {
"total": 0.05441686400013168,
"count": 1,
"is_parallel": true,
"self": 0.0006464980001510412,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000482693000094514,
"count": 1,
"is_parallel": true,
"self": 0.000482693000094514
},
"communicator.exchange": {
"total": 0.051234337999858326,
"count": 1,
"is_parallel": true,
"self": 0.051234337999858326
},
"steps_from_proto": {
"total": 0.0020533350000278006,
"count": 1,
"is_parallel": true,
"self": 0.0005514749998383195,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001501860000189481,
"count": 8,
"is_parallel": true,
"self": 0.001501860000189481
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1208.489590298027,
"count": 62993,
"is_parallel": true,
"self": 34.57345123904065,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.367545844980214,
"count": 62993,
"is_parallel": true,
"self": 25.367545844980214
},
"communicator.exchange": {
"total": 1042.905064199027,
"count": 62993,
"is_parallel": true,
"self": 1042.905064199027
},
"steps_from_proto": {
"total": 105.6435290149791,
"count": 62993,
"is_parallel": true,
"self": 21.54192881193353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.10160020304556,
"count": 503944,
"is_parallel": true,
"self": 84.10160020304556
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 725.9268714459783,
"count": 62994,
"self": 2.6566088199870137,
"children": {
"process_trajectory": {
"total": 139.04254404098697,
"count": 62994,
"self": 138.7163282629865,
"children": {
"RLTrainer._checkpoint": {
"total": 0.32621577800046,
"count": 2,
"self": 0.32621577800046
}
}
},
"_update_policy": {
"total": 584.2277185850044,
"count": 423,
"self": 322.1025937070101,
"children": {
"TorchPPOOptimizer.update": {
"total": 262.12512487799427,
"count": 22842,
"self": 262.12512487799427
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0199996722803917e-06,
"count": 1,
"self": 1.0199996722803917e-06
},
"TrainerController._save_models": {
"total": 0.0963214850003169,
"count": 1,
"self": 0.0013519990002350823,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09496948600008182,
"count": 1,
"self": 0.09496948600008182
}
}
}
}
}
}
}