ppo-Huggy / run_logs /timers.json
wefio's picture
Upload 29 files
758eac4 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404548168182373,
"min": 1.404548168182373,
"max": 1.4283303022384644,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69702.109375,
"min": 69479.2734375,
"max": 78059.546875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.60849056603773,
"min": 73.0701754385965,
"max": 394.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49359.0,
"min": 48662.0,
"max": 50038.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999937.0,
"min": 49497.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999937.0,
"min": 49497.0,
"max": 1999937.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4457247257232666,
"min": 0.011310546658933163,
"max": 2.5358903408050537,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1555.48095703125,
"min": 1.4251289367675781,
"max": 1734.549072265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8374005292759,
"min": 1.9182565957307816,
"max": 4.089399206210505,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2440.5867366194725,
"min": 241.70033106207848,
"max": 2695.6147460341454,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8374005292759,
"min": 1.9182565957307816,
"max": 4.089399206210505,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2440.5867366194725,
"min": 241.70033106207848,
"max": 2695.6147460341454,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016602272058662493,
"min": 0.014811632278118244,
"max": 0.019846152761359308,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04980681617598748,
"min": 0.029623264556236487,
"max": 0.05800713046337478,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05617684941324922,
"min": 0.02339305852850278,
"max": 0.06471687679489455,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16853054823974767,
"min": 0.04684987136473258,
"max": 0.18574739334483942,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.903848698749996e-06,
"min": 3.903848698749996e-06,
"max": 0.00029533125155625003,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1711546096249987e-05,
"min": 1.1711546096249987e-05,
"max": 0.00084428686857105,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10130125,
"min": 0.10130125,
"max": 0.19844375000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30390375,
"min": 0.20773845,
"max": 0.58142895,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.493237499999992e-05,
"min": 7.493237499999992e-05,
"max": 0.004922343124999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022479712499999977,
"min": 0.00022479712499999977,
"max": 0.014073304605,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753435371",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753437861"
},
"total": 2489.98708954,
"count": 1,
"self": 0.5338031060000503,
"children": {
"run_training.setup": {
"total": 0.022515256000133377,
"count": 1,
"self": 0.022515256000133377
},
"TrainerController.start_learning": {
"total": 2489.4307711779998,
"count": 1,
"self": 4.561985415966774,
"children": {
"TrainerController._reset_env": {
"total": 4.043477012000039,
"count": 1,
"self": 4.043477012000039
},
"TrainerController.advance": {
"total": 2480.7184066070326,
"count": 233104,
"self": 4.925619916285996,
"children": {
"env_step": {
"total": 1987.2538871188212,
"count": 233104,
"self": 1558.1198193257003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 426.37795754012313,
"count": 233104,
"self": 15.853140970169761,
"children": {
"TorchPolicy.evaluate": {
"total": 410.5248165699534,
"count": 222886,
"self": 410.5248165699534
}
}
},
"workers": {
"total": 2.7561102529978143,
"count": 233104,
"self": 0.0,
"children": {
"worker_root": {
"total": 2482.1576618910326,
"count": 233104,
"is_parallel": true,
"self": 1213.3868869060836,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009001010000702081,
"count": 1,
"is_parallel": true,
"self": 0.00023007400000096823,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006700270000692399,
"count": 2,
"is_parallel": true,
"self": 0.0006700270000692399
}
}
},
"UnityEnvironment.step": {
"total": 0.02935159699995893,
"count": 1,
"is_parallel": true,
"self": 0.00028836599994974677,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018193500000052154,
"count": 1,
"is_parallel": true,
"self": 0.00018193500000052154
},
"communicator.exchange": {
"total": 0.028100767000069027,
"count": 1,
"is_parallel": true,
"self": 0.028100767000069027
},
"steps_from_proto": {
"total": 0.0007805289999396336,
"count": 1,
"is_parallel": true,
"self": 0.0003216529999008344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045887600003879925,
"count": 2,
"is_parallel": true,
"self": 0.00045887600003879925
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1268.770774984949,
"count": 233103,
"is_parallel": true,
"self": 36.62354131887946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.54846202088743,
"count": 233103,
"is_parallel": true,
"self": 82.54846202088743
},
"communicator.exchange": {
"total": 1060.97566498603,
"count": 233103,
"is_parallel": true,
"self": 1060.97566498603
},
"steps_from_proto": {
"total": 88.62310665915197,
"count": 233103,
"is_parallel": true,
"self": 32.706827480141555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.91627917901042,
"count": 466206,
"is_parallel": true,
"self": 55.91627917901042
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 488.53889957192564,
"count": 233104,
"self": 6.687137063834143,
"children": {
"process_trajectory": {
"total": 173.533441807092,
"count": 233104,
"self": 172.2312014810916,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3022403260004012,
"count": 10,
"self": 1.3022403260004012
}
}
},
"_update_policy": {
"total": 308.3183207009995,
"count": 97,
"self": 244.27346191199763,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.04485878900186,
"count": 2910,
"self": 64.04485878900186
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.919999683916103e-07,
"count": 1,
"self": 9.919999683916103e-07
},
"TrainerController._save_models": {
"total": 0.10690115100032926,
"count": 1,
"self": 0.0016243690001829236,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10527678200014634,
"count": 1,
"self": 0.10527678200014634
}
}
}
}
}
}
}