Create README.md
Browse files
README.md
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
datasets:
|
| 3 |
+
- abideen/Cosmopedia-100k-pretrain
|
| 4 |
+
tags:
|
| 5 |
+
- Mistral
|
| 6 |
+
- 1bit
|
| 7 |
+
- bitnet
|
| 8 |
+
- abideen
|
| 9 |
+
---
|
| 10 |
+
"""this is my first attempt at converting a model float16 quantized model to 1.5bit. i used alpindale/Mistral-7B-v0.2-hf for the base model and \n
|
| 11 |
+
trained on: abideen/cosmopedia-100k-pretain dataset and used his google colab project to make this"""
|
| 12 |
+
|
| 13 |
+
#EXAMPLE INFERENCE CODE FROM ABIDEEN'S COLAB PROJECT
|
| 14 |
+
```
|
| 15 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 16 |
+
from transformers.models.llama.modeling_llama import *
|
| 17 |
+
# Load a pretrained BitNet model
|
| 18 |
+
model = "liminerity/Bitnet-Mistral.0.2-70M"
|
| 19 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
| 20 |
+
model = AutoModelForCausalLM.from_pretrained(model)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def activation_quant(x):
|
| 24 |
+
scale = 127.0 / x.abs().max(dim=-1, keepdim=True).values.clamp_(min=1e-5)
|
| 25 |
+
y = (x * scale).round().clamp_(-128, 127)
|
| 26 |
+
y = y / scale
|
| 27 |
+
return y
|
| 28 |
+
def weight_quant(w):
|
| 29 |
+
scale = 1.0 / w.abs().mean().clamp_(min=1e-5)
|
| 30 |
+
u = (w * scale).round().clamp_(-1, 1)
|
| 31 |
+
u = u / scale
|
| 32 |
+
return u
|
| 33 |
+
|
| 34 |
+
class BitLinear(nn.Linear):
|
| 35 |
+
def forward(self, x):
|
| 36 |
+
w = self.weight # a weight tensor with shape [d, k]
|
| 37 |
+
x = x.to(w.device)
|
| 38 |
+
RMSNorm = LlamaRMSNorm(x.shape[-1]).to(w.device)
|
| 39 |
+
x_norm = RMSNorm(x)
|
| 40 |
+
# A trick for implementing Straight−Through−Estimator (STE) using detach()
|
| 41 |
+
x_quant = x_norm + (activation_quant(x_norm) - x_norm).detach()
|
| 42 |
+
w_quant = w + (weight_quant(w) - w).detach()
|
| 43 |
+
y = F.linear(x_quant, w_quant)
|
| 44 |
+
return y
|
| 45 |
+
|
| 46 |
+
def convert_to_bitnet(model, copy_weights):
|
| 47 |
+
for name, module in model.named_modules():
|
| 48 |
+
# Replace linear layers with BitNet
|
| 49 |
+
if isinstance(module, LlamaSdpaAttention) or isinstance(module, LlamaMLP):
|
| 50 |
+
for child_name, child_module in module.named_children():
|
| 51 |
+
if isinstance(child_module, nn.Linear):
|
| 52 |
+
bitlinear = BitLinear(child_module.in_features, child_module.out_features, child_module.bias is not None).to(device="cuda:0")
|
| 53 |
+
if copy_weights:
|
| 54 |
+
bitlinear.weight = child_module.weight
|
| 55 |
+
if child_module.bias is not None:
|
| 56 |
+
bitlinear.bias = child_module.bias
|
| 57 |
+
setattr(module, child_name, bitlinear)
|
| 58 |
+
# Remove redundant input_layernorms
|
| 59 |
+
elif isinstance(module, LlamaDecoderLayer):
|
| 60 |
+
for child_name, child_module in module.named_children():
|
| 61 |
+
if isinstance(child_module, LlamaRMSNorm) and child_name == "input_layernorm":
|
| 62 |
+
setattr(module, child_name, nn.Identity().to(device="cuda:0"))
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
convert_to_bitnet(model, copy_weights=True)
|
| 66 |
+
model.to(device="cuda:0")
|
| 67 |
+
|
| 68 |
+
prompt = "What is Machine Learning?"
|
| 69 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 70 |
+
generate_ids = model.generate(inputs.input_ids, max_length=50)
|
| 71 |
+
tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| 72 |
+
```
|