ChipYTY's picture
Add files using upload-large-folder tool
34a4bcb verified
#!/usr/bin/env python3
"""
LoRA (Low-Rank Adaptation) 模块用于SAM3微调
参考: https://arxiv.org/abs/2106.09685
"""
import math
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
class LoRALinear(nn.Module):
"""
LoRA线性层
将原始线性层分解为: W = W_0 + BA
其中 B ∈ R^{d×r}, A ∈ R^{r×k}, r << min(d, k)
"""
def __init__(
self,
original_layer: nn.Linear,
rank: int = 4,
alpha: float = 1.0,
dropout: float = 0.0,
):
super().__init__()
self.original_layer = original_layer
self.rank = rank
self.alpha = alpha
self.scaling = alpha / rank
in_features = original_layer.in_features
out_features = original_layer.out_features
# LoRA参数
self.lora_A = nn.Parameter(torch.zeros(rank, in_features))
self.lora_B = nn.Parameter(torch.zeros(out_features, rank))
# 初始化
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
# Dropout
self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
# 冻结原始层
for param in self.original_layer.parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor) -> torch.Tensor:
# 原始前向传播
result = self.original_layer(x)
# 确保LoRA参数在正确的设备上
if self.lora_A.device != x.device:
self.lora_A = nn.Parameter(self.lora_A.to(x.device))
self.lora_B = nn.Parameter(self.lora_B.to(x.device))
# LoRA分支
x_dropout = self.dropout(x)
with torch.amp.autocast('cuda', enabled=False):
x_fp32 = x_dropout.float()
lora_A = self.lora_A.float()
lora_B = self.lora_B.float()
lora_out = x_fp32 @ lora_A.T @ lora_B.T
lora_out = lora_out.to(result.dtype)
return result + lora_out * self.scaling
def merge_weights(self):
"""将LoRA权重合并到原始层"""
if self.rank > 0:
self.original_layer.weight.data += (
self.lora_B @ self.lora_A * self.scaling
)
def unmerge_weights(self):
"""从原始层移除LoRA权重"""
if self.rank > 0:
self.original_layer.weight.data -= (
self.lora_B @ self.lora_A * self.scaling
)
class LoRAConv2d(nn.Module):
"""
LoRA卷积层
使用与原始卷积相同的stride和padding
"""
def __init__(
self,
original_layer: nn.Conv2d,
rank: int = 4,
alpha: float = 1.0,
dropout: float = 0.0,
):
super().__init__()
self.original_layer = original_layer
self.rank = rank
self.alpha = alpha
self.scaling = alpha / rank
in_channels = original_layer.in_channels
out_channels = original_layer.out_channels
kernel_size = original_layer.kernel_size
stride = original_layer.stride
padding = original_layer.padding
# LoRA参数
# lora_A: 降维 (in_ch -> rank),使用1x1卷积
# lora_B: 升维 (rank -> out_ch),使用原始kernel_size保持空间维度一致
self.lora_A = nn.Conv2d(
in_channels, rank, kernel_size=1, stride=1, padding=0, bias=False
)
self.lora_B = nn.Conv2d(
rank, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False
)
# 初始化
nn.init.kaiming_uniform_(self.lora_A.weight, a=math.sqrt(5))
nn.init.zeros_(self.lora_B.weight)
self.dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
# 冻结原始层
for param in self.original_layer.parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor) -> torch.Tensor:
result = self.original_layer(x)
# 确保LoRA层在正确的设备上
if self.lora_A.weight.device != x.device:
self.lora_A = self.lora_A.to(x.device)
self.lora_B = self.lora_B.to(x.device)
x_dropout = self.dropout(x)
# 使用与输入相同的dtype
with torch.amp.autocast('cuda', enabled=False):
x_fp32 = x_dropout.float()
h = self.lora_A(x_fp32)
lora_out = self.lora_B(h)
lora_out = lora_out.to(result.dtype)
return result + lora_out * self.scaling
def apply_lora_to_model(
model: nn.Module,
rank: int = 4,
alpha: float = 1.0,
dropout: float = 0.0,
target_modules: Optional[List[str]] = None,
exclude_modules: Optional[List[str]] = None,
) -> nn.Module:
"""
将LoRA应用到模型的指定层
Args:
model: 原始模型
rank: LoRA秩
alpha: LoRA缩放因子
dropout: LoRA dropout
target_modules: 目标模块名称列表 (如 ['q_proj', 'v_proj'])
exclude_modules: 排除的模块名称列表
Returns:
应用了LoRA的模型
"""
if target_modules is None:
# 默认目标: attention层的QKV投影
target_modules = [
'q_proj', 'k_proj', 'v_proj',
'qkv', 'to_q', 'to_k', 'to_v',
'query', 'key', 'value',
]
if exclude_modules is None:
exclude_modules = []
lora_count = 0
for name, module in model.named_modules():
# 检查是否应该排除
should_exclude = any(ex in name for ex in exclude_modules)
if should_exclude:
continue
# 检查是否是目标模块
is_target = any(target in name for target in target_modules)
if is_target:
if isinstance(module, nn.Linear):
# 获取父模块和属性名
parent_name = '.'.join(name.split('.')[:-1])
attr_name = name.split('.')[-1]
parent = model
if parent_name:
for part in parent_name.split('.'):
parent = getattr(parent, part)
# 替换为LoRA层
lora_layer = LoRALinear(module, rank=rank, alpha=alpha, dropout=dropout)
setattr(parent, attr_name, lora_layer)
lora_count += 1
elif isinstance(module, nn.Conv2d):
parent_name = '.'.join(name.split('.')[:-1])
attr_name = name.split('.')[-1]
parent = model
if parent_name:
for part in parent_name.split('.'):
parent = getattr(parent, part)
lora_layer = LoRAConv2d(module, rank=rank, alpha=alpha, dropout=dropout)
setattr(parent, attr_name, lora_layer)
lora_count += 1
print(f"Applied LoRA to {lora_count} layers")
return model
def get_lora_params(model: nn.Module) -> List[nn.Parameter]:
"""获取所有LoRA参数"""
lora_params = []
for name, param in model.named_parameters():
if 'lora_' in name:
lora_params.append(param)
return lora_params
def get_trainable_params(model: nn.Module) -> Tuple[List[nn.Parameter], List[str]]:
"""获取所有可训练参数"""
trainable_params = []
trainable_names = []
for name, param in model.named_parameters():
if param.requires_grad:
trainable_params.append(param)
trainable_names.append(name)
return trainable_params, trainable_names
def freeze_model_except_lora(model: nn.Module):
"""冻结除LoRA外的所有参数"""
for name, param in model.named_parameters():
if 'lora_' not in name:
param.requires_grad = False
else:
param.requires_grad = True
def count_parameters(model: nn.Module) -> Dict[str, int]:
"""统计参数数量"""
total = sum(p.numel() for p in model.parameters())
trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
frozen = total - trainable
return {
'total': total,
'trainable': trainable,
'frozen': frozen,
'trainable_ratio': trainable / total if total > 0 else 0,
}
def save_lora_weights(model: nn.Module, path: str):
"""只保存LoRA权重"""
lora_state_dict = {}
for name, param in model.named_parameters():
if 'lora_' in name:
lora_state_dict[name] = param.data
torch.save(lora_state_dict, path)
print(f"Saved LoRA weights to {path}")
def load_lora_weights(model: nn.Module, path: str):
"""加载LoRA权重"""
lora_state_dict = torch.load(path, map_location='cpu')
model_state_dict = model.state_dict()
for name, param in lora_state_dict.items():
if name in model_state_dict:
model_state_dict[name] = param
model.load_state_dict(model_state_dict, strict=False)
print(f"Loaded LoRA weights from {path}")
if __name__ == "__main__":
# 测试
print("Testing LoRA module...")
# 测试线性层
linear = nn.Linear(768, 768)
lora_linear = LoRALinear(linear, rank=8, alpha=16)
x = torch.randn(2, 768)
y = lora_linear(x)
print(f"LoRA Linear output shape: {y.shape}")
# 测试卷积层
conv = nn.Conv2d(256, 256, 3, padding=1)
lora_conv = LoRAConv2d(conv, rank=8, alpha=16)
x = torch.randn(2, 256, 32, 32)
y = lora_conv(x)
print(f"LoRA Conv output shape: {y.shape}")
# 测试参数统计
model = nn.Sequential(
lora_linear,
nn.ReLU(),
nn.Linear(768, 10)
)
stats = count_parameters(model)
print(f"Parameters: {stats}")