ppo-Huggy / run_logs /timers.json
mayx95's picture
Huggy default setting training
f438fdd verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4086658954620361,
"min": 1.4086658954620361,
"max": 1.4284313917160034,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70051.546875,
"min": 67395.2890625,
"max": 78332.2421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.71367521367522,
"min": 86.61295971978984,
"max": 429.53846153846155,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49474.0,
"min": 48812.0,
"max": 50256.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999929.0,
"min": 49900.0,
"max": 1999929.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999929.0,
"min": 49900.0,
"max": 1999929.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3669047355651855,
"min": 0.047567326575517654,
"max": 2.462758779525757,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1107.71142578125,
"min": 5.517809867858887,
"max": 1371.111328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5733100386002126,
"min": 1.8522965010127117,
"max": 4.002329950114243,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1672.3090980648994,
"min": 214.86639411747456,
"max": 2182.159847378731,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5733100386002126,
"min": 1.8522965010127117,
"max": 4.002329950114243,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1672.3090980648994,
"min": 214.86639411747456,
"max": 2182.159847378731,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01847977898044822,
"min": 0.013450066396682005,
"max": 0.018974830085709174,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05543933694134466,
"min": 0.027684305377745962,
"max": 0.05692449025712752,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04592331945896149,
"min": 0.021643135510385034,
"max": 0.05664320923388005,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13776995837688447,
"min": 0.04328627102077007,
"max": 0.16992962770164013,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2975489008500015e-06,
"min": 3.2975489008500015e-06,
"max": 0.0002952597015801001,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.892646702550004e-06,
"min": 9.892646702550004e-06,
"max": 0.0008437558687480499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10109915000000001,
"min": 0.10109915000000001,
"max": 0.19841990000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30329745,
"min": 0.2073969,
"max": 0.5812519500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.484758500000005e-05,
"min": 6.484758500000005e-05,
"max": 0.004921153010000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019454275500000015,
"min": 0.00019454275500000015,
"max": 0.014064472305000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739140175",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739142262"
},
"total": 2087.116442301,
"count": 1,
"self": 0.3214538890001677,
"children": {
"run_training.setup": {
"total": 0.02034465499991711,
"count": 1,
"self": 0.02034465499991711
},
"TrainerController.start_learning": {
"total": 2086.774643757,
"count": 1,
"self": 3.991031791957539,
"children": {
"TrainerController._reset_env": {
"total": 2.138240708000012,
"count": 1,
"self": 2.138240708000012
},
"TrainerController.advance": {
"total": 2080.530850262043,
"count": 232067,
"self": 4.256772581978112,
"children": {
"env_step": {
"total": 1633.4379589690245,
"count": 232067,
"self": 1239.9881275549658,
"children": {
"SubprocessEnvManager._take_step": {
"total": 390.82023900599404,
"count": 232067,
"self": 14.544256193935212,
"children": {
"TorchPolicy.evaluate": {
"total": 376.2759828120588,
"count": 223048,
"self": 376.2759828120588
}
}
},
"workers": {
"total": 2.6295924080646955,
"count": 232067,
"self": 0.0,
"children": {
"worker_root": {
"total": 2079.4403283130564,
"count": 232067,
"is_parallel": true,
"self": 1082.804870860039,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009553429999868968,
"count": 1,
"is_parallel": true,
"self": 0.00035587799993663793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005994650000502588,
"count": 2,
"is_parallel": true,
"self": 0.0005994650000502588
}
}
},
"UnityEnvironment.step": {
"total": 0.028072909000002255,
"count": 1,
"is_parallel": true,
"self": 0.00025990200003889186,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001615259999425689,
"count": 1,
"is_parallel": true,
"self": 0.0001615259999425689
},
"communicator.exchange": {
"total": 0.02715216599995074,
"count": 1,
"is_parallel": true,
"self": 0.02715216599995074
},
"steps_from_proto": {
"total": 0.0004993150000700552,
"count": 1,
"is_parallel": true,
"self": 0.00015519999999469292,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00034411500007536233,
"count": 2,
"is_parallel": true,
"self": 0.00034411500007536233
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 996.6354574530174,
"count": 232066,
"is_parallel": true,
"self": 27.077954687094348,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 52.468536558006804,
"count": 232066,
"is_parallel": true,
"self": 52.468536558006804
},
"communicator.exchange": {
"total": 855.1657719319788,
"count": 232066,
"is_parallel": true,
"self": 855.1657719319788
},
"steps_from_proto": {
"total": 61.92319427593736,
"count": 232066,
"is_parallel": true,
"self": 23.667851503958673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.25534277197869,
"count": 464132,
"is_parallel": true,
"self": 38.25534277197869
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 442.83611871103994,
"count": 232067,
"self": 6.138126941121641,
"children": {
"process_trajectory": {
"total": 149.0205879649194,
"count": 232067,
"self": 147.72959939191992,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2909885729994812,
"count": 10,
"self": 1.2909885729994812
}
}
},
"_update_policy": {
"total": 287.6774038049989,
"count": 97,
"self": 225.99300024599574,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.68440355900316,
"count": 2910,
"self": 61.68440355900316
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.06899960883311e-06,
"count": 1,
"self": 1.06899960883311e-06
},
"TrainerController._save_models": {
"total": 0.1145199259999572,
"count": 1,
"self": 0.0018093240000780497,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11271060199987915,
"count": 1,
"self": 0.11271060199987915
}
}
}
}
}
}
}