ppo-Pyramids / run_logs /timers.json
tarpalsus's picture
Pyramids v1
51ad5f0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.46165579557418823,
"min": 0.46165579557418823,
"max": 1.4438221454620361,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13975.244140625,
"min": 13924.521484375,
"max": 43799.7890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989973.0,
"min": 29928.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989973.0,
"min": 29928.0,
"max": 989973.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.618240475654602,
"min": -0.19759468734264374,
"max": 0.618240475654602,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 176.1985321044922,
"min": -46.82994079589844,
"max": 176.1985321044922,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01467522419989109,
"min": -0.0504487082362175,
"max": 0.3410690724849701,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.182438850402832,
"min": -12.612176895141602,
"max": 82.19764709472656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06757753466444119,
"min": 0.0650555044826534,
"max": 0.07531298675034956,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9460854853021766,
"min": 0.48097637014553196,
"max": 1.0976017205102835,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014768230875812017,
"min": 0.0007413514506228313,
"max": 0.016588016223083278,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20675523226136824,
"min": 0.010378920308719638,
"max": 0.2488202433462492,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.253754724971427e-06,
"min": 7.253754724971427e-06,
"max": 0.00029523394444582856,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010155256614959997,
"min": 0.00010155256614959997,
"max": 0.0036322645892451996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241788571428571,
"min": 0.10241788571428571,
"max": 0.19841131428571426,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338504,
"min": 1.3888791999999999,
"max": 2.6107548,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025154678285714283,
"min": 0.00025154678285714283,
"max": 0.009841290297142856,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00352165496,
"min": 0.00352165496,
"max": 0.12109440452000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011843138374388218,
"min": 0.011843138374388218,
"max": 0.5239995121955872,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1658039391040802,
"min": 0.1658039391040802,
"max": 3.667996644973755,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.47872340425533,
"min": 294.47872340425533,
"max": 992.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27681.0,
"min": 16599.0,
"max": 32243.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7059789315650338,
"min": -0.8677063011564314,
"max": 1.7059789315650338,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 162.0679984986782,
"min": -28.273601770401,
"max": 165.08299835026264,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7059789315650338,
"min": -0.8677063011564314,
"max": 1.7059789315650338,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 162.0679984986782,
"min": -28.273601770401,
"max": 165.08299835026264,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03613186541770119,
"min": 0.03613186541770119,
"max": 9.697137282175177,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4325272146816133,
"min": 3.4325272146816133,
"max": 164.851333796978,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713720014",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713722186"
},
"total": 2171.542801079,
"count": 1,
"self": 0.8780618380001215,
"children": {
"run_training.setup": {
"total": 0.04486124400000335,
"count": 1,
"self": 0.04486124400000335
},
"TrainerController.start_learning": {
"total": 2170.619877997,
"count": 1,
"self": 1.7395002469861538,
"children": {
"TrainerController._reset_env": {
"total": 2.1644791970000483,
"count": 1,
"self": 2.1644791970000483
},
"TrainerController.advance": {
"total": 2166.6068785320135,
"count": 63889,
"self": 1.8939756769882479,
"children": {
"env_step": {
"total": 1460.3876735180354,
"count": 63889,
"self": 1341.2771155020112,
"children": {
"SubprocessEnvManager._take_step": {
"total": 117.9739057519871,
"count": 63889,
"self": 4.470343421028531,
"children": {
"TorchPolicy.evaluate": {
"total": 113.50356233095857,
"count": 62558,
"self": 113.50356233095857
}
}
},
"workers": {
"total": 1.1366522640370817,
"count": 63889,
"self": 0.0,
"children": {
"worker_root": {
"total": 2166.65908952502,
"count": 63889,
"is_parallel": true,
"self": 950.7275624980227,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006299468999998226,
"count": 1,
"is_parallel": true,
"self": 0.003081528999928196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032179400000700298,
"count": 8,
"is_parallel": true,
"self": 0.0032179400000700298
}
}
},
"UnityEnvironment.step": {
"total": 0.042284953999967456,
"count": 1,
"is_parallel": true,
"self": 0.0005700389999105937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043862000001126944,
"count": 1,
"is_parallel": true,
"self": 0.00043862000001126944
},
"communicator.exchange": {
"total": 0.0398655950000375,
"count": 1,
"is_parallel": true,
"self": 0.0398655950000375
},
"steps_from_proto": {
"total": 0.0014107000000080916,
"count": 1,
"is_parallel": true,
"self": 0.00029277100009039714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011179289999176945,
"count": 8,
"is_parallel": true,
"self": 0.0011179289999176945
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1215.9315270269974,
"count": 63888,
"is_parallel": true,
"self": 36.85755013992639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 18.969451179998885,
"count": 63888,
"is_parallel": true,
"self": 18.969451179998885
},
"communicator.exchange": {
"total": 1066.5555063650193,
"count": 63888,
"is_parallel": true,
"self": 1066.5555063650193
},
"steps_from_proto": {
"total": 93.54901934205287,
"count": 63888,
"is_parallel": true,
"self": 19.784483338020607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.76453600403227,
"count": 511104,
"is_parallel": true,
"self": 73.76453600403227
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 704.3252293369897,
"count": 63889,
"self": 3.4192386629981684,
"children": {
"process_trajectory": {
"total": 119.6919217289921,
"count": 63889,
"self": 119.4966711559918,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19525057300029403,
"count": 2,
"self": 0.19525057300029403
}
}
},
"_update_policy": {
"total": 581.2140689449994,
"count": 453,
"self": 236.18842300199657,
"children": {
"TorchPPOOptimizer.update": {
"total": 345.02564594300287,
"count": 22770,
"self": 345.02564594300287
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5400000847876072e-06,
"count": 1,
"self": 1.5400000847876072e-06
},
"TrainerController._save_models": {
"total": 0.10901848100002098,
"count": 1,
"self": 0.004576508999889484,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1044419720001315,
"count": 1,
"self": 0.1044419720001315
}
}
}
}
}
}
}