ppo-Pyramids / run_logs /timers.json
JohnnyBoy00's picture
Upload model
2e34c8f verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.44157305359840393,
"min": 0.43676286935806274,
"max": 1.4378165006637573,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13141.2138671875,
"min": 13123.8505859375,
"max": 43617.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 29921.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 29921.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4675433337688446,
"min": -0.10235287994146347,
"max": 0.5306376814842224,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 127.63932800292969,
"min": -24.871749877929688,
"max": 145.3947296142578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.061313942074775696,
"min": -0.007736389525234699,
"max": 0.35186150670051575,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 16.738706588745117,
"min": -2.088825225830078,
"max": 83.39117431640625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07140147745598195,
"min": 0.06625515024300993,
"max": 0.07321805168272126,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9996206843837474,
"min": 0.5125263617790489,
"max": 1.0655628848762717,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01638331957933371,
"min": 0.0005011507156734543,
"max": 0.016899993951018488,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22936647411067193,
"min": 0.005095331658197552,
"max": 0.2534999092652773,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.659740303928572e-06,
"min": 7.659740303928572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010723636425500001,
"min": 0.00010723636425500001,
"max": 0.0035085839304720996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255321428571429,
"min": 0.10255321428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.435745,
"min": 1.3886848,
"max": 2.5695279000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026506610714285714,
"min": 0.00026506610714285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037109255,
"min": 0.0037109255,
"max": 0.11697583720999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013357214629650116,
"min": 0.013357214629650116,
"max": 0.528098464012146,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18700100481510162,
"min": 0.18700100481510162,
"max": 3.6966891288757324,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 372.5,
"min": 372.5,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29055.0,
"min": 16848.0,
"max": 33148.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.502891117824784,
"min": -0.9999500517733395,
"max": 1.5760023581484954,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 118.72839830815792,
"min": -31.998401656746864,
"max": 132.3841980844736,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.502891117824784,
"min": -0.9999500517733395,
"max": 1.5760023581484954,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 118.72839830815792,
"min": -31.998401656746864,
"max": 132.3841980844736,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05053089526308777,
"min": 0.05053089526308777,
"max": 10.316037229755345,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.991940725783934,
"min": 3.991940725783934,
"max": 175.37263290584087,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739968749",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739971199"
},
"total": 2449.8467404450003,
"count": 1,
"self": 0.8238253650001752,
"children": {
"run_training.setup": {
"total": 0.026107305999971686,
"count": 1,
"self": 0.026107305999971686
},
"TrainerController.start_learning": {
"total": 2448.996807774,
"count": 1,
"self": 1.6614750589847063,
"children": {
"TrainerController._reset_env": {
"total": 3.244699923999974,
"count": 1,
"self": 3.244699923999974
},
"TrainerController.advance": {
"total": 2443.933636672015,
"count": 63625,
"self": 1.654925191050097,
"children": {
"env_step": {
"total": 1710.9046968079656,
"count": 63625,
"self": 1532.2881324569491,
"children": {
"SubprocessEnvManager._take_step": {
"total": 177.6585783690174,
"count": 63625,
"self": 5.351975674020309,
"children": {
"TorchPolicy.evaluate": {
"total": 172.30660269499708,
"count": 62572,
"self": 172.30660269499708
}
}
},
"workers": {
"total": 0.9579859819990588,
"count": 63625,
"self": 0.0,
"children": {
"worker_root": {
"total": 2443.126842369968,
"count": 63625,
"is_parallel": true,
"self": 1041.6887557779805,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006266750000008869,
"count": 1,
"is_parallel": true,
"self": 0.004704338000351527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015624119996573427,
"count": 8,
"is_parallel": true,
"self": 0.0015624119996573427
}
}
},
"UnityEnvironment.step": {
"total": 0.06719748900002287,
"count": 1,
"is_parallel": true,
"self": 0.000636998000118183,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005791820000240477,
"count": 1,
"is_parallel": true,
"self": 0.0005791820000240477
},
"communicator.exchange": {
"total": 0.05666760099995827,
"count": 1,
"is_parallel": true,
"self": 0.05666760099995827
},
"steps_from_proto": {
"total": 0.009313707999922372,
"count": 1,
"is_parallel": true,
"self": 0.007664094999881854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016496130000405174,
"count": 8,
"is_parallel": true,
"self": 0.0016496130000405174
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1401.4380865919875,
"count": 63624,
"is_parallel": true,
"self": 35.447663888977786,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.13110265003786,
"count": 63624,
"is_parallel": true,
"self": 26.13110265003786
},
"communicator.exchange": {
"total": 1229.5082169019938,
"count": 63624,
"is_parallel": true,
"self": 1229.5082169019938
},
"steps_from_proto": {
"total": 110.35110315097802,
"count": 63624,
"is_parallel": true,
"self": 23.07206488892814,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.27903826204988,
"count": 508992,
"is_parallel": true,
"self": 87.27903826204988
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 731.3740146729994,
"count": 63625,
"self": 3.002050928956578,
"children": {
"process_trajectory": {
"total": 141.10566598804337,
"count": 63625,
"self": 140.7835109210431,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3221550670002671,
"count": 2,
"self": 0.3221550670002671
}
}
},
"_update_policy": {
"total": 587.2662977559994,
"count": 446,
"self": 324.0806416729464,
"children": {
"TorchPPOOptimizer.update": {
"total": 263.185656083053,
"count": 22806,
"self": 263.185656083053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3300000318849925e-06,
"count": 1,
"self": 1.3300000318849925e-06
},
"TrainerController._save_models": {
"total": 0.15699478900023678,
"count": 1,
"self": 0.002125228000295465,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1548695609999413,
"count": 1,
"self": 0.1548695609999413
}
}
}
}
}
}
}