ppo-PyramidsRND / run_logs /timers.json
neaven77's picture
First Push
e75ec38 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7192228436470032,
"min": 0.6814202070236206,
"max": 1.4821686744689941,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 21542.162109375,
"min": 20529.828125,
"max": 44963.0703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989914.0,
"min": 29952.0,
"max": 989914.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.17974357306957245,
"min": -0.11352578550577164,
"max": 0.17974357306957245,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 45.295379638671875,
"min": -27.35971450805664,
"max": 45.295379638671875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.03648325428366661,
"min": -0.03648325428366661,
"max": 0.2792997360229492,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -9.193779945373535,
"min": -9.193779945373535,
"max": 66.19403839111328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06685801928549717,
"min": 0.06685801928549717,
"max": 0.07325031839157482,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9360122699969604,
"min": 0.5117345317733546,
"max": 1.0873268810973966,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010475908567342162,
"min": 0.00011163550611655083,
"max": 0.010475908567342162,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14666271994279026,
"min": 0.0014512615795151608,
"max": 0.14666271994279026,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2750118607428615e-06,
"min": 7.2750118607428615e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010185016605040006,
"min": 0.00010185016605040006,
"max": 0.0035087843304052995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242497142857145,
"min": 0.10242497142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339496000000003,
"min": 1.3886848,
"max": 2.5695947000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025225464571428585,
"min": 0.00025225464571428585,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035315650400000015,
"min": 0.0035315650400000015,
"max": 0.11698251052999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010733752511441708,
"min": 0.010410493239760399,
"max": 0.402604341506958,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15027253329753876,
"min": 0.14976488053798676,
"max": 2.818230390548706,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 652.1739130434783,
"min": 652.1739130434783,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30000.0,
"min": 15984.0,
"max": 33145.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8259347448854343,
"min": -1.0000000521540642,
"max": 0.8259347448854343,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 37.99299826472998,
"min": -31.99640168249607,
"max": 37.99299826472998,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8259347448854343,
"min": -1.0000000521540642,
"max": 0.8259347448854343,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 37.99299826472998,
"min": -31.99640168249607,
"max": 37.99299826472998,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07305579967847418,
"min": 0.07305579967847418,
"max": 7.632940627634525,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3605667852098122,
"min": 3.193130294501316,
"max": 122.1270500421524,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729301449",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729304361"
},
"total": 2911.9007037360007,
"count": 1,
"self": 0.5796403280000959,
"children": {
"run_training.setup": {
"total": 0.07114815099976113,
"count": 1,
"self": 0.07114815099976113
},
"TrainerController.start_learning": {
"total": 2911.249915257001,
"count": 1,
"self": 2.3053217980186673,
"children": {
"TrainerController._reset_env": {
"total": 2.3905235980000725,
"count": 1,
"self": 2.3905235980000725
},
"TrainerController.advance": {
"total": 2906.477463718984,
"count": 63234,
"self": 2.40347424270567,
"children": {
"env_step": {
"total": 1867.8115031531297,
"count": 63234,
"self": 1715.4886679779047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.99164593033493,
"count": 63234,
"self": 6.562723601292419,
"children": {
"TorchPolicy.evaluate": {
"total": 144.4289223290425,
"count": 62556,
"self": 144.4289223290425
}
}
},
"workers": {
"total": 1.3311892448900835,
"count": 63234,
"self": 0.0,
"children": {
"worker_root": {
"total": 2904.586713236903,
"count": 63234,
"is_parallel": true,
"self": 1356.1549520307517,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033131310001408565,
"count": 1,
"is_parallel": true,
"self": 0.001110160998905485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022029700012353715,
"count": 8,
"is_parallel": true,
"self": 0.0022029700012353715
}
}
},
"UnityEnvironment.step": {
"total": 0.06242869700054143,
"count": 1,
"is_parallel": true,
"self": 0.0008265260003099684,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00053550400025415,
"count": 1,
"is_parallel": true,
"self": 0.00053550400025415
},
"communicator.exchange": {
"total": 0.05896357700021326,
"count": 1,
"is_parallel": true,
"self": 0.05896357700021326
},
"steps_from_proto": {
"total": 0.0021030899997640518,
"count": 1,
"is_parallel": true,
"self": 0.000502521000271372,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016005689994926797,
"count": 8,
"is_parallel": true,
"self": 0.0016005689994926797
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1548.4317612061514,
"count": 63233,
"is_parallel": true,
"self": 47.44071844912651,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.797714527941025,
"count": 63233,
"is_parallel": true,
"self": 29.797714527941025
},
"communicator.exchange": {
"total": 1345.511532765906,
"count": 63233,
"is_parallel": true,
"self": 1345.511532765906
},
"steps_from_proto": {
"total": 125.68179546317788,
"count": 63233,
"is_parallel": true,
"self": 27.0220816505489,
"children": {
"_process_rank_one_or_two_observation": {
"total": 98.65971381262898,
"count": 505864,
"is_parallel": true,
"self": 98.65971381262898
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1036.2624863231486,
"count": 63234,
"self": 4.322292622318855,
"children": {
"process_trajectory": {
"total": 160.63538537981822,
"count": 63234,
"self": 160.4148888968184,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22049648299980618,
"count": 2,
"self": 0.22049648299980618
}
}
},
"_update_policy": {
"total": 871.3048083210115,
"count": 449,
"self": 362.9493422448995,
"children": {
"TorchPPOOptimizer.update": {
"total": 508.35546607611195,
"count": 22761,
"self": 508.35546607611195
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6189987945836037e-06,
"count": 1,
"self": 1.6189987945836037e-06
},
"TrainerController._save_models": {
"total": 0.0766045229993324,
"count": 1,
"self": 0.0018705669990595197,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07473395600027288,
"count": 1,
"self": 0.07473395600027288
}
}
}
}
}
}
}