ppo-PyramidsRND / run_logs /timers.json
rezashr's picture
Initial Commit
a0c4027
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35756173729896545,
"min": 0.35254934430122375,
"max": 1.5066057443618774,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10846.9931640625,
"min": 10533.35546875,
"max": 45704.390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29952.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29952.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3797052502632141,
"min": -0.09596633166074753,
"max": 0.44829073548316956,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 98.7233657836914,
"min": -23.223852157592773,
"max": 121.48678588867188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025256171822547913,
"min": -0.025717997923493385,
"max": 0.16532054543495178,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.5666046142578125,
"min": -6.455217361450195,
"max": 39.18096923828125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06525615897927699,
"min": 0.06472599649264123,
"max": 0.07560378394245512,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9135862257098779,
"min": 0.49793026089315684,
"max": 1.0584529751943716,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010093762087871298,
"min": 0.001006706680580413,
"max": 0.011073197464914603,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14131266923019817,
"min": 0.01308718684754537,
"max": 0.1607818079307132,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.557526052285716e-06,
"min": 7.557526052285716e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010580536473200002,
"min": 0.00010580536473200002,
"max": 0.0036333937888688,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251914285714284,
"min": 0.10251914285714284,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352679999999998,
"min": 1.3886848,
"max": 2.611131200000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002616623714285715,
"min": 0.0002616623714285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036632732000000012,
"min": 0.0036632732000000012,
"max": 0.12113200688,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007462288718670607,
"min": 0.007418777793645859,
"max": 0.2794453501701355,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.10447204113006592,
"min": 0.10386288911104202,
"max": 1.9561173915863037,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 470.8448275862069,
"min": 426.3421052631579,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27309.0,
"min": 15984.0,
"max": 34753.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2489457429718163,
"min": -1.0000000521540642,
"max": 1.4361786430080732,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 73.68779883533716,
"min": -28.84100167453289,
"max": 107.71339822560549,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2489457429718163,
"min": -1.0000000521540642,
"max": 1.4361786430080732,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 73.68779883533716,
"min": -28.84100167453289,
"max": 107.71339822560549,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0364751645415169,
"min": 0.03459986108022609,
"max": 5.91924156434834,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.152034707949497,
"min": 2.152034707949497,
"max": 94.70786502957344,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704604761",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704607068"
},
"total": 2307.772790555,
"count": 1,
"self": 0.47722021399977166,
"children": {
"run_training.setup": {
"total": 0.052840837000076135,
"count": 1,
"self": 0.052840837000076135
},
"TrainerController.start_learning": {
"total": 2307.242729504,
"count": 1,
"self": 1.6562200650323575,
"children": {
"TrainerController._reset_env": {
"total": 1.9882843140001114,
"count": 1,
"self": 1.9882843140001114
},
"TrainerController.advance": {
"total": 2303.5135463529678,
"count": 63598,
"self": 1.6787391680154542,
"children": {
"env_step": {
"total": 1641.2701765309607,
"count": 63598,
"self": 1494.9501485319668,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.3066577480506,
"count": 63598,
"self": 5.14844194699981,
"children": {
"TorchPolicy.evaluate": {
"total": 140.1582158010508,
"count": 62569,
"self": 140.1582158010508
}
}
},
"workers": {
"total": 1.0133702509433533,
"count": 63598,
"self": 0.0,
"children": {
"worker_root": {
"total": 2301.512000554993,
"count": 63598,
"is_parallel": true,
"self": 937.824281582014,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017246590000468132,
"count": 1,
"is_parallel": true,
"self": 0.0005495890002293891,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001175069999817424,
"count": 8,
"is_parallel": true,
"self": 0.001175069999817424
}
}
},
"UnityEnvironment.step": {
"total": 0.05288674600001286,
"count": 1,
"is_parallel": true,
"self": 0.0006310139997367514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005027370000334486,
"count": 1,
"is_parallel": true,
"self": 0.0005027370000334486
},
"communicator.exchange": {
"total": 0.05003439800020715,
"count": 1,
"is_parallel": true,
"self": 0.05003439800020715
},
"steps_from_proto": {
"total": 0.0017185970000355155,
"count": 1,
"is_parallel": true,
"self": 0.0003517459997510741,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013668510002844414,
"count": 8,
"is_parallel": true,
"self": 0.0013668510002844414
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1363.687718972979,
"count": 63597,
"is_parallel": true,
"self": 36.65025568389842,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.640481193030837,
"count": 63597,
"is_parallel": true,
"self": 26.640481193030837
},
"communicator.exchange": {
"total": 1192.0340881270204,
"count": 63597,
"is_parallel": true,
"self": 1192.0340881270204
},
"steps_from_proto": {
"total": 108.3628939690293,
"count": 63597,
"is_parallel": true,
"self": 22.704955567909792,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.65793840111951,
"count": 508776,
"is_parallel": true,
"self": 85.65793840111951
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 660.5646306539918,
"count": 63598,
"self": 3.1169219970433915,
"children": {
"process_trajectory": {
"total": 134.2382949799469,
"count": 63598,
"self": 134.03501752594707,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20327745399981723,
"count": 2,
"self": 0.20327745399981723
}
}
},
"_update_policy": {
"total": 523.2094136770015,
"count": 452,
"self": 309.7505712369955,
"children": {
"TorchPPOOptimizer.update": {
"total": 213.45884244000604,
"count": 22815,
"self": 213.45884244000604
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3599997146229725e-06,
"count": 1,
"self": 1.3599997146229725e-06
},
"TrainerController._save_models": {
"total": 0.0846774120000191,
"count": 1,
"self": 0.0015345730002991331,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08314283899971997,
"count": 1,
"self": 0.08314283899971997
}
}
}
}
}
}
}