|
|
| """
|
| 将PyTorch模型转换为TensorRT Engine格式
|
|
|
| 支持YOLO模型转换为.engine格式以提高推理速度
|
| """
|
|
|
| import os
|
| import sys
|
| from pathlib import Path
|
| import argparse
|
| import shutil
|
|
|
| def convert_pt_to_engine(model_path, output_path=None, img_size=640, batch_size=1, device=0, fp16=True):
|
| """
|
| 将PyTorch模型转换为TensorRT Engine
|
|
|
| Args:
|
| model_path: PyTorch模型路径 (.pt)
|
| output_path: 输出engine路径 (可选)
|
| img_size: 模型输入尺寸
|
| batch_size: 批次大小
|
| device: GPU设备ID
|
| fp16: 是否启用FP16(T4 推荐 True)
|
| """
|
| try:
|
| from ultralytics import YOLO
|
| print("Ultralytics已加载")
|
| except ImportError:
|
| print("错误: 无法导入ultralytics")
|
| print("请确保在正确的环境中运行: mamba activate procedure")
|
| return False
|
|
|
|
|
| if not os.path.exists(model_path):
|
| print(f"错误: 模型文件不存在 - {model_path}")
|
| return False
|
|
|
|
|
| if output_path is None:
|
| model_name = Path(model_path).stem
|
| output_path = f"{model_name}.engine"
|
|
|
| print(f"开始转换模型: {model_path}")
|
| print(f"输出路径: {output_path}")
|
| print(f"输入尺寸: {img_size}")
|
| print(f"批次大小: {batch_size}")
|
| print(f"FP16: {fp16}")
|
| print("-" * 50)
|
| print("注意:TensorRT engine 通常与生成它的 GPU 架构绑定,建议在目标 GPU(例如 T4)上生成 engine。")
|
|
|
| try:
|
|
|
| print("加载PyTorch模型...")
|
| model = YOLO(model_path)
|
|
|
|
|
| print("开始转换为 TensorRT Engine (.engine)...")
|
| export_result = model.export(
|
| format='engine',
|
| imgsz=img_size,
|
| batch=batch_size,
|
| device=device,
|
| half=bool(fp16),
|
| verbose=True
|
| )
|
|
|
| if export_result:
|
| print("✅ 转换成功!")
|
| print(f"导出结果: {export_result}")
|
|
|
|
|
| try:
|
| export_path = Path(str(export_result))
|
| out_path = Path(output_path)
|
| if export_path.exists():
|
| if out_path.resolve() != export_path.resolve():
|
| out_path.parent.mkdir(parents=True, exist_ok=True)
|
| shutil.copy2(export_path, out_path)
|
| print(f"✅ 已写出: {out_path}")
|
| final_path = out_path
|
| else:
|
| final_path = export_path
|
| else:
|
|
|
| final_path = export_result
|
| except Exception as e:
|
| print(f"⚠️ 写出到指定路径失败,将使用导出结果路径:{e}")
|
| final_path = export_result
|
|
|
|
|
| if isinstance(final_path, (str, Path)) and os.path.exists(str(final_path)):
|
| file_size = os.path.getsize(str(final_path)) / (1024 * 1024)
|
| print(f"文件大小: {file_size:.1f} MB")
|
| return True
|
| else:
|
| print("❌ 转换失败")
|
| return False
|
|
|
| except Exception as e:
|
| print(f"❌ 转换过程中出错: {e}")
|
| import traceback
|
| traceback.print_exc()
|
| return False
|
|
|
| def main():
|
| print("PyTorch → TensorRT Engine 转换工具")
|
| print("=" * 50)
|
|
|
| ap = argparse.ArgumentParser(description="将 YOLO .pt 模型导出为 TensorRT .engine(推荐在目标 GPU 上生成)")
|
| ap.add_argument("--model", default="d:/code/AA-python/best_light3.pt", help="输入模型路径 (.pt)")
|
| ap.add_argument("--out", default=None, help="输出 .engine 路径(默认:<model_stem>.engine)")
|
| ap.add_argument("--imgsz", type=int, default=640, help="输入尺寸 imgsz(默认 640)")
|
| ap.add_argument("--batch", type=int, default=1, help="batch(默认 1)")
|
| ap.add_argument("--device", default="0", help="device(默认 0;也可写 cpu,但 engine 需要 GPU)")
|
| ap.add_argument("--fp16", action="store_true", help="启用 FP16(T4 推荐)")
|
| ap.add_argument("--fp32", action="store_true", help="强制 FP32(不推荐)")
|
| args = ap.parse_args()
|
|
|
| MODEL_PATH = args.model
|
| IMG_SIZE = args.imgsz
|
| BATCH_SIZE = args.batch
|
| DEVICE = args.device
|
| OUT = args.out
|
| FP16 = True
|
| if args.fp32:
|
| FP16 = False
|
| if args.fp16:
|
| FP16 = True
|
|
|
|
|
| if not os.path.exists(MODEL_PATH):
|
| print(f"错误: 找不到模型文件 '{MODEL_PATH}'")
|
| print("请确保模型文件在当前目录中")
|
| return
|
|
|
| print(f"输入模型: {MODEL_PATH}")
|
| print(f"模型大小: {os.path.getsize(MODEL_PATH) / (1024*1024):.1f} MB")
|
|
|
|
|
| success = convert_pt_to_engine(
|
| model_path=MODEL_PATH,
|
| output_path=OUT,
|
| img_size=IMG_SIZE,
|
| batch_size=BATCH_SIZE,
|
| device=DEVICE,
|
| fp16=FP16,
|
| )
|
|
|
| if success:
|
| print("\n🎉 转换完成!")
|
| print("你现在可以使用 .engine 文件在 T4 上进行 FP16 推理了")
|
| print("\n使用示例:")
|
| print("from ultralytics import YOLO")
|
| print("model = YOLO('person.engine')")
|
| print("results = model('image.jpg')")
|
| print("\n部署提示:将 gengyishi/config.py 里的 DETECT_MODEL 改为 'person.engine'(或你指定的输出文件名)")
|
| else:
|
| print("\n❌ 转换失败")
|
| print("请检查错误信息并重试")
|
|
|
| if __name__ == "__main__":
|
| try:
|
| main()
|
| except KeyboardInterrupt:
|
| print("\n用户中断")
|
| except Exception as e:
|
| print(f"\n程序异常: {e}")
|
| import traceback
|
| traceback.print_exc()
|
|
|