SAC_huggy / run_logs /timers.json
Sc0urge's picture
Huggy
886744a verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 0.6007463335990906,
"min": 0.6007463335990906,
"max": 0.6007463335990906,
"count": 1
},
"Huggy.Policy.Entropy.sum": {
"value": 4443.720703125,
"min": 4443.720703125,
"max": 4443.720703125,
"count": 1
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 167.78571428571428,
"min": 167.78571428571428,
"max": 167.78571428571428,
"count": 1
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 2349.0,
"min": 2349.0,
"max": 2349.0,
"count": 1
},
"Huggy.Step.mean": {
"value": 99925.0,
"min": 99925.0,
"max": 99925.0,
"count": 1
},
"Huggy.Step.sum": {
"value": 99925.0,
"min": 99925.0,
"max": 99925.0,
"count": 1
},
"Huggy.Policy.ExtrinsicValue.mean": {
"value": 355.0265808105469,
"min": 355.0265808105469,
"max": 355.0265808105469,
"count": 1
},
"Huggy.Policy.ExtrinsicValue.sum": {
"value": 4615.345703125,
"min": 4615.345703125,
"max": 4615.345703125,
"count": 1
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 1.1283439581210797,
"min": 1.1283439581210797,
"max": 1.1283439581210797,
"count": 1
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 14.668471455574036,
"min": 14.668471455574036,
"max": 14.668471455574036,
"count": 1
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 1.1283439581210797,
"min": 1.1283439581210797,
"max": 1.1283439581210797,
"count": 1
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 14.668471455574036,
"min": 14.668471455574036,
"max": 14.668471455574036,
"count": 1
},
"Huggy.Losses.PolicyLoss.mean": {
"value": -377.51686883773505,
"min": -377.51686883773505,
"max": -377.51686883773505,
"count": 1
},
"Huggy.Losses.PolicyLoss.sum": {
"value": -736912.9279712588,
"min": -736912.9279712588,
"max": -736912.9279712588,
"count": 1
},
"Huggy.Losses.ValueLoss.mean": {
"value": 37.55478162918095,
"min": 37.55478162918095,
"max": 37.55478162918095,
"count": 1
},
"Huggy.Losses.ValueLoss.sum": {
"value": 73306.93374016121,
"min": 73306.93374016121,
"max": 73306.93374016121,
"count": 1
},
"Huggy.Losses.Q1Loss.mean": {
"value": 32.88027379275666,
"min": 32.88027379275666,
"max": 32.88027379275666,
"count": 1
},
"Huggy.Losses.Q1Loss.sum": {
"value": 64182.294443461,
"min": 64182.294443461,
"max": 64182.294443461,
"count": 1
},
"Huggy.Losses.Q2Loss.mean": {
"value": 30.215098221913532,
"min": 30.215098221913532,
"max": 30.215098221913532,
"count": 1
},
"Huggy.Losses.Q2Loss.sum": {
"value": 58979.87172917522,
"min": 58979.87172917522,
"max": 58979.87172917522,
"count": 1
},
"Huggy.Policy.ContinuousEntropyCoeff.mean": {
"value": 0.6101228481604988,
"min": 0.6101228481604988,
"max": 0.6101228481604988,
"count": 1
},
"Huggy.Policy.ContinuousEntropyCoeff.sum": {
"value": 1190.9597996092937,
"min": 1190.9597996092937,
"max": 1190.9597996092937,
"count": 1
},
"Huggy.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.5855999999999999,
"min": 0.5855999999999999,
"max": 0.5855999999999999,
"count": 1
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738934928",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --resume --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738935067"
},
"total": 138.4005149980003,
"count": 1,
"self": 0.05426449300102831,
"children": {
"run_training.setup": {
"total": 0.020437452999431116,
"count": 1,
"self": 0.020437452999431116
},
"TrainerController.start_learning": {
"total": 138.32581305199983,
"count": 1,
"self": 0.019194586986486684,
"children": {
"TrainerController._reset_env": {
"total": 2.163946042000134,
"count": 1,
"self": 2.163946042000134
},
"TrainerController.advance": {
"total": 135.7644718560141,
"count": 962,
"self": 0.016848251021656324,
"children": {
"env_step": {
"total": 8.339853386981304,
"count": 962,
"self": 6.3132783790006215,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2.0163242189964876,
"count": 962,
"self": 0.06530384300913283,
"children": {
"TorchPolicy.evaluate": {
"total": 1.9510203759873548,
"count": 954,
"self": 1.9510203759873548
}
}
},
"workers": {
"total": 0.010250788984194514,
"count": 962,
"self": 0.0,
"children": {
"worker_root": {
"total": 134.3363206410104,
"count": 962,
"is_parallel": true,
"self": 129.18815153601554,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008550060001653037,
"count": 1,
"is_parallel": true,
"self": 0.0002193910004280042,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006356149997372995,
"count": 2,
"is_parallel": true,
"self": 0.0006356149997372995
}
}
},
"UnityEnvironment.step": {
"total": 0.029458187999807706,
"count": 1,
"is_parallel": true,
"self": 0.00026676699962990824,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001875770003607613,
"count": 1,
"is_parallel": true,
"self": 0.0001875770003607613
},
"communicator.exchange": {
"total": 0.028169782000077248,
"count": 1,
"is_parallel": true,
"self": 0.028169782000077248
},
"steps_from_proto": {
"total": 0.000834061999739788,
"count": 1,
"is_parallel": true,
"self": 0.0003398689996174653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004941930001223227,
"count": 2,
"is_parallel": true,
"self": 0.0004941930001223227
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5.1481691049948495,
"count": 961,
"is_parallel": true,
"self": 0.14491284300765983,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.332936723979401,
"count": 961,
"is_parallel": true,
"self": 0.332936723979401
},
"communicator.exchange": {
"total": 4.328824988002452,
"count": 961,
"is_parallel": true,
"self": 4.328824988002452
},
"steps_from_proto": {
"total": 0.34149455000533635,
"count": 961,
"is_parallel": true,
"self": 0.1215183390177117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.21997621098762465,
"count": 1922,
"is_parallel": true,
"self": 0.21997621098762465
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 127.40777021801114,
"count": 962,
"self": 0.02621321100559726,
"children": {
"process_trajectory": {
"total": 0.23487424600170925,
"count": 962,
"self": 0.23487424600170925
},
"_update_policy": {
"total": 127.14668276100383,
"count": 698,
"self": 0.004324537019783747,
"children": {
"OffPolicyTrainer._update_policy": {
"total": 127.14235822398405,
"count": 698,
"self": 54.006551052007126,
"children": {
"TorchSACOptimizer.update": {
"total": 73.13580717197692,
"count": 3639,
"self": 73.13580717197692
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6889998732949607e-06,
"count": 1,
"self": 1.6889998732949607e-06
},
"TrainerController._save_models": {
"total": 0.3781988779992389,
"count": 1,
"self": 0.005318793999322224,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3728800839999167,
"count": 1,
"self": 0.3728800839999167
}
}
}
}
}
}
}