File size: 5,053 Bytes
32beb88 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | import torch, math
import torch.nn as nn
import bitsandbytes as bnb
from torch.nn import functional as F
from torch._lowrank import svd_lowrank
import functools
def rwkv_quantize(quant_type, weight):
if quant_type=='4bit':
qweight, qstate= bnb.functional.quantize_4bit((weight.data).to('cuda'))
elif quant_type=='nf4':
qweight, qstate= bnb.functional.quantize_nf4((weight.data).to('cuda'))
elif quant_type=='fp4':
qweight, qstate= bnb.functional.quantize_fp4((weight.data).to('cuda'))
elif quant_type=='int8':
qweight, qstate= bnb.functional.quantize((weight.data).to('cuda'))
return qweight, qstate
def rwkv_dequantize(quant_type, weight, qstate):
if quant_type=='4bit':
deweight= bnb.functional.dequantize_4bit(weight.data,quant_state=qstate)
elif quant_type=='nf4':
deweight= bnb.functional.dequantize_nf4(weight.data,quant_state=qstate)
elif quant_type=='fp4':
deweight= bnb.functional.dequantize_fp4(weight.data,quant_state=qstate)
elif quant_type=='int8':
deweight= bnb.functional.dequantize(weight.data,state=qstate)
return deweight
LORA_CONFIG = {
"r": 0,
"alpha": 0,
"dropout": 0,
"parts": {"att", "ln", "time", "ffn"},
"quant": False,
}
class LoraLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool):
super().__init__()
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
assert bias == False, "Biased LoraLinear not supported"
r, alpha, dropout = LORA_CONFIG["r"], LORA_CONFIG[
"alpha"], LORA_CONFIG["dropout"]
self.lora_A = nn.Parameter(torch.empty(r, in_features))
self.lora_B = nn.Parameter(torch.empty(out_features, r))
self.lora_dropout = nn.Dropout(dropout)
self.scaling = alpha / r
self.r = r
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
nn.init.zeros_(self.lora_B)
self.pissa = False
self.is_quant = False
def pissa_load(self, init_A, init_B):
self.pissa = True
self.weight.data = self.weight.data - init_B @ init_A
def pissa_init(self, svd_niter):
self.pissa = True
Ur, Sr, Vr = svd_lowrank(self.weight.data, self.r, niter=svd_niter)
Vhr = Vr.t()
lora_A = torch.diag(torch.sqrt(Sr)) @ Vhr
lora_B = Ur @ torch.diag(torch.sqrt(Sr))
self.lora_A.data = lora_A
self.lora_B.data = lora_B
self.weight.data = self.weight.data - lora_B @ lora_A
def quant(self, quant_type):
self.is_quant = True
self.quant_type = quant_type
self.weight.data, self.qstate= rwkv_quantize(self.quant_type, (self.weight.data).to('cuda'))
def forward(self, x):
if self.is_quant:
if self.pissa:
return (
F.linear(x, rwkv_dequantize(self.quant_type, self.weight.data, self.qstate).to(torch.bfloat16)) +
F.linear(F.linear(x, self.lora_A), self.lora_B))
return (
F.linear(x, rwkv_dequantize(self.quant_type, self.weight.data, self.qstate)) + self.scaling *
F.linear(F.linear(self.lora_dropout(x), self.lora_A), self.lora_B))
if self.pissa:
return (
F.linear(x, self.weight) +
F.linear(F.linear(x, self.lora_A), self.lora_B))
return (
F.linear(x, self.weight) + self.scaling *
F.linear(F.linear(self.lora_dropout(x), self.lora_A), self.lora_B))
class QuantLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool):
super().__init__()
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
assert bias == False, "Biased QuantLinear not supported"
self.is_quant = False
def quant(self, quant_type):
self.is_quant = True
self.quant_type = quant_type
#self.dummy_tensor = nn.Parameter(torch.zeros(1))
self.weight.data, self.qstate= rwkv_quantize(self.quant_type, (self.weight.data).to('cuda'))
def forward(self, x):
if self.is_quant:
return F.linear(x, rwkv_dequantize(self.quant_type, self.weight.data, self.qstate).to(torch.bfloat16))
else:
return F.linear(x, self.weight)
@functools.wraps(LoraLinear)
def make_linear_att(*args, **kwargs):
if "att" in LORA_CONFIG["parts"] and LORA_CONFIG["r"] > 0:
return LoraLinear(*args, **kwargs)
elif LORA_CONFIG["quant"]:
return QuantLinear(*args, **kwargs)
else:
return nn.Linear(*args, **kwargs)
@functools.wraps(LoraLinear)
def make_linear_ffn(*args, **kwargs):
if "ffn" in LORA_CONFIG["parts"] and LORA_CONFIG["r"] > 0:
return LoraLinear(*args, **kwargs)
elif LORA_CONFIG["quant"]:
return QuantLinear(*args, **kwargs)
else:
return nn.Linear(*args, **kwargs) |