ppo-Pyramids / run_logs /timers.json
Nao233's picture
First Push
2c0d1e7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.5344569683074951,
"min": 1.5344569683074951,
"max": 1.5344569683074951,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 46549.28515625,
"min": 46549.28515625,
"max": 46549.28515625,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.040529459714889526,
"min": -0.040529459714889526,
"max": -0.040529459714889526,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -9.60548210144043,
"min": -9.60548210144043,
"max": -9.60548210144043,
"count": 1
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.01069763395935297,
"min": 0.01069763395935297,
"max": 0.01069763395935297,
"count": 1
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 2.53533935546875,
"min": 2.53533935546875,
"max": 2.53533935546875,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07218139797353726,
"min": 0.07218139797353726,
"max": 0.07218139797353726,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5052697858147608,
"min": 0.5052697858147608,
"max": 0.5052697858147608,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006008170108746815,
"min": 0.006008170108746815,
"max": 0.006008170108746815,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0420571907612277,
"min": 0.0420571907612277,
"max": 0.0420571907612277,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00020301260375771423,
"min": 0.00020301260375771423,
"max": 0.00020301260375771423,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0014210882263039997,
"min": 0.0014210882263039997,
"max": 0.0014210882263039997,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16767085714285712,
"min": 0.16767085714285712,
"max": 0.16767085714285712,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1736959999999999,
"min": 1.1736959999999999,
"max": 1.1736959999999999,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006770318628571428,
"min": 0.006770318628571428,
"max": 0.006770318628571428,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0473922304,
"min": 0.0473922304,
"max": 0.0473922304,
"count": 1
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.45153971315072533,
"min": 0.45153971315072533,
"max": 0.45153971315072533,
"count": 1
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 3.160777992055077,
"min": 3.160777992055077,
"max": 3.160777992055077,
"count": 1
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.7131189181116121,
"min": 0.7131189181116121,
"max": 0.7131189181116121,
"count": 1
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 4.991832426781285,
"min": 4.991832426781285,
"max": 4.991832426781285,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 6.557371728122234,
"min": 6.557371728122234,
"max": 6.557371728122234,
"count": 1
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 104.91794764995575,
"min": 104.91794764995575,
"max": 104.91794764995575,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1761210483",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/content/myenv/bin/mlagents-learn ./ml-agents/config/ppo/Pyramids.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1761210587"
},
"total": 104.13527862399997,
"count": 1,
"self": 0.7503009429997292,
"children": {
"run_training.setup": {
"total": 0.018744918000038524,
"count": 1,
"self": 0.018744918000038524
},
"TrainerController.start_learning": {
"total": 103.3662327630002,
"count": 1,
"self": 0.0603248660004283,
"children": {
"TrainerController._reset_env": {
"total": 3.353776130000142,
"count": 1,
"self": 3.353776130000142
},
"TrainerController.advance": {
"total": 99.76844716399955,
"count": 3166,
"self": 0.06313490799902866,
"children": {
"env_step": {
"total": 63.39057403800007,
"count": 3166,
"self": 55.823666054992145,
"children": {
"SubprocessEnvManager._take_step": {
"total": 7.526520903999199,
"count": 3166,
"self": 0.22845426899607446,
"children": {
"TorchPolicy.evaluate": {
"total": 7.298066635003124,
"count": 3164,
"self": 7.298066635003124
}
}
},
"workers": {
"total": 0.04038707900872396,
"count": 3166,
"self": 0.0,
"children": {
"worker_root": {
"total": 102.8410500759976,
"count": 3166,
"is_parallel": true,
"self": 52.53286563799952,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005567378000023382,
"count": 1,
"is_parallel": true,
"self": 0.0038096079997558263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017577700002675556,
"count": 8,
"is_parallel": true,
"self": 0.0017577700002675556
}
}
},
"UnityEnvironment.step": {
"total": 0.050386253999931796,
"count": 1,
"is_parallel": true,
"self": 0.0005206569999245403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004592340001181583,
"count": 1,
"is_parallel": true,
"self": 0.0004592340001181583
},
"communicator.exchange": {
"total": 0.04750612899988482,
"count": 1,
"is_parallel": true,
"self": 0.04750612899988482
},
"steps_from_proto": {
"total": 0.001900234000004275,
"count": 1,
"is_parallel": true,
"self": 0.00048702900016905915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014132049998352159,
"count": 8,
"is_parallel": true,
"self": 0.0014132049998352159
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 50.30818443799808,
"count": 3165,
"is_parallel": true,
"self": 1.627714770978855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.1167298590098653,
"count": 3165,
"is_parallel": true,
"self": 1.1167298590098653
},
"communicator.exchange": {
"total": 42.20733412700565,
"count": 3165,
"is_parallel": true,
"self": 42.20733412700565
},
"steps_from_proto": {
"total": 5.356405681003707,
"count": 3165,
"is_parallel": true,
"self": 1.123587368985227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 4.23281831201848,
"count": 25320,
"is_parallel": true,
"self": 4.23281831201848
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 36.31473821800046,
"count": 3166,
"self": 0.08105861100079892,
"children": {
"process_trajectory": {
"total": 5.444911317999413,
"count": 3166,
"self": 5.444911317999413
},
"_update_policy": {
"total": 30.788768289000245,
"count": 14,
"self": 19.62381536099838,
"children": {
"TorchPPOOptimizer.update": {
"total": 11.164952928001867,
"count": 1167,
"self": 11.164952928001867
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4719998944201507e-06,
"count": 1,
"self": 1.4719998944201507e-06
},
"TrainerController._save_models": {
"total": 0.18368313100017986,
"count": 1,
"self": 0.0020902960002331383,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18159283499994672,
"count": 1,
"self": 0.18159283499994672
}
}
}
}
}
}
}