karl commited on
Commit ·
fdcd239
1
Parent(s): 80ea4ee
deduplication bugfixes. turns out safetensors does not support deduplicated tensor files.
Browse files- _safetensors.py +31 -16
- run_test.py +3 -3
- scripts/compare_safetensors.py +4 -3
_safetensors.py
CHANGED
|
@@ -1,30 +1,35 @@
|
|
| 1 |
# ran into memory issues with safetensors. this code moves by them.
|
| 2 |
-
import contextlib, json, mmap, os
|
| 3 |
|
| 4 |
import torch
|
| 5 |
|
| 6 |
from _bighash import hash
|
| 7 |
|
| 8 |
class WritingSafeTensors:
|
| 9 |
-
def __init__(self, name, file_size=16*1024*1024*1024, **metadata):
|
| 10 |
self.name = name.removesuffix('.safetensors')
|
| 11 |
self.metadata = metadata
|
| 12 |
self.file = self.File(self.name + '.safetensors')
|
| 13 |
self.files = {self.file.filename:self.file}
|
| 14 |
self.file_size = file_size
|
| 15 |
self.weight_map = {}
|
| 16 |
-
|
|
|
|
|
|
|
| 17 |
def add(self, name, tensor):
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
if image_of is not name:
|
| 21 |
-
self.file.undo(name, tensor
|
| 22 |
-
|
| 23 |
-
assert imaged_hash == tensor_hash
|
| 24 |
else:
|
| 25 |
print(name, '...')
|
| 26 |
if self.file.size >= self.file_size:
|
| 27 |
-
self.file.undo(name, tensor
|
| 28 |
ct = len(self.files)
|
| 29 |
if len(self.files) == 1:
|
| 30 |
self.file.rename(f'{self.name}-{ct:05}.safetensors')
|
|
@@ -106,7 +111,7 @@ class WritingSafeTensors:
|
|
| 106 |
os.truncate(self.filename, new_capacity)
|
| 107 |
self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
|
| 108 |
self.capacity = new_capacity
|
| 109 |
-
def add(self, name, tensor, image_of=None):
|
| 110 |
length = tensor.numel() * tensor.dtype.itemsize
|
| 111 |
if image_of is None:
|
| 112 |
self._reserve(length)
|
|
@@ -115,6 +120,7 @@ class WritingSafeTensors:
|
|
| 115 |
self.mmapview[start : end],
|
| 116 |
dtype=tensor.dtype, count=tensor.numel(),
|
| 117 |
).view(tensor.shape or [1])[:] = tensor
|
|
|
|
| 118 |
assert end >= self.size
|
| 119 |
self.size = end
|
| 120 |
else:
|
|
@@ -127,7 +133,10 @@ class WritingSafeTensors:
|
|
| 127 |
).view(tensor.shape)).all()
|
| 128 |
|
| 129 |
tensor.flatten()
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
self.header[name] = {
|
| 133 |
'dtype':
|
|
@@ -144,12 +153,18 @@ class WritingSafeTensors:
|
|
| 144 |
[start, end],
|
| 145 |
}
|
| 146 |
return tensor_hash
|
| 147 |
-
def undo(self, name, tensor
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
del self.header[name]
|
|
|
|
|
|
|
| 153 |
def set_metadata(self, **metadata):
|
| 154 |
m = self.header['__metadata__']
|
| 155 |
for k, v in metadata.items():
|
|
|
|
| 1 |
# ran into memory issues with safetensors. this code moves by them.
|
| 2 |
+
import contextlib, json, mmap, os, warnings
|
| 3 |
|
| 4 |
import torch
|
| 5 |
|
| 6 |
from _bighash import hash
|
| 7 |
|
| 8 |
class WritingSafeTensors:
|
| 9 |
+
def __init__(self, name, file_size=16*1024*1024*1024, deduplicate=False, **metadata):
|
| 10 |
self.name = name.removesuffix('.safetensors')
|
| 11 |
self.metadata = metadata
|
| 12 |
self.file = self.File(self.name + '.safetensors')
|
| 13 |
self.files = {self.file.filename:self.file}
|
| 14 |
self.file_size = file_size
|
| 15 |
self.weight_map = {}
|
| 16 |
+
if deduplicate:
|
| 17 |
+
warnings.warn('Safetensors deduplication enabled. The file will not be readable with the official library without https://github.com/huggingface/safetensors/pull/586', stacklevel=2)
|
| 18 |
+
self.hash_map = {} if deduplicate else None
|
| 19 |
def add(self, name, tensor):
|
| 20 |
+
if self.hash_map is None:
|
| 21 |
+
self.file.add(name, tensor, return_hash=False)
|
| 22 |
+
image_of = name
|
| 23 |
+
else:
|
| 24 |
+
tensor_hash = self.file.add(name, tensor, return_hash=True)
|
| 25 |
+
image_of = self.hash_map.setdefault(tensor_hash, name)
|
| 26 |
if image_of is not name:
|
| 27 |
+
self.file.undo(name, tensor)
|
| 28 |
+
self.weight_map[image_of].add(name, tensor, return_hash=False, image_of=image_of)
|
|
|
|
| 29 |
else:
|
| 30 |
print(name, '...')
|
| 31 |
if self.file.size >= self.file_size:
|
| 32 |
+
self.file.undo(name, tensor)
|
| 33 |
ct = len(self.files)
|
| 34 |
if len(self.files) == 1:
|
| 35 |
self.file.rename(f'{self.name}-{ct:05}.safetensors')
|
|
|
|
| 111 |
os.truncate(self.filename, new_capacity)
|
| 112 |
self.mmapview = memoryview(mmap.mmap(self.fd, new_capacity))
|
| 113 |
self.capacity = new_capacity
|
| 114 |
+
def add(self, name, tensor, return_hash, image_of=None):
|
| 115 |
length = tensor.numel() * tensor.dtype.itemsize
|
| 116 |
if image_of is None:
|
| 117 |
self._reserve(length)
|
|
|
|
| 120 |
self.mmapview[start : end],
|
| 121 |
dtype=tensor.dtype, count=tensor.numel(),
|
| 122 |
).view(tensor.shape or [1])[:] = tensor
|
| 123 |
+
#assert len(self.header)<2 or max(list(self.header.items())[1:], key=lambda item:item[1]['data_offsets'])[1]['data_offsets'][-1] == self.size
|
| 124 |
assert end >= self.size
|
| 125 |
self.size = end
|
| 126 |
else:
|
|
|
|
| 133 |
).view(tensor.shape)).all()
|
| 134 |
|
| 135 |
tensor.flatten()
|
| 136 |
+
if return_hash:
|
| 137 |
+
tensor_hash = hash(self.mmapview[start : end])
|
| 138 |
+
else:
|
| 139 |
+
tensor_hash = None
|
| 140 |
|
| 141 |
self.header[name] = {
|
| 142 |
'dtype':
|
|
|
|
| 153 |
[start, end],
|
| 154 |
}
|
| 155 |
return tensor_hash
|
| 156 |
+
def undo(self, name, tensor):
|
| 157 |
+
last_name = None
|
| 158 |
+
last_header = None
|
| 159 |
+
#max_name, max_header = max(list(self.header.items())[1:], key = lambda item: item[1]['data_offsets'][-1])
|
| 160 |
+
#assert max_name == name
|
| 161 |
+
#assert max_header['data_offsets'][-1] == self.size
|
| 162 |
+
length = tensor.numel() * tensor.dtype.itemsize
|
| 163 |
+
assert [self.size - length, self.size] == self.header[name]['data_offsets']
|
| 164 |
+
self.size -= length
|
| 165 |
del self.header[name]
|
| 166 |
+
#max_name, max_header = max(list(self.header.items())[1:], key = lambda item: item[1]['data_offsets'][-1])
|
| 167 |
+
#assert max_header['data_offsets'][-1] == self.size
|
| 168 |
def set_metadata(self, **metadata):
|
| 169 |
m = self.header['__metadata__']
|
| 170 |
for k, v in metadata.items():
|
run_test.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
| 2 |
import os, sys
|
| 3 |
|
| 4 |
STORE_WEIGHTS = False
|
|
|
|
| 5 |
FAKE_H100 = False
|
| 6 |
TORCH_DTYPE = 'float64'
|
| 7 |
USE_GPU = False
|
|
@@ -9,7 +10,7 @@ DEVICE_MAP = 'auto'
|
|
| 9 |
model_id, revision = sys.argv[1:]
|
| 10 |
user, model = model_id.split('/')
|
| 11 |
prompt = 'Once upon a time,'
|
| 12 |
-
fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.safetensors'
|
| 13 |
|
| 14 |
import torch, numpy as np, random
|
| 15 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
@@ -51,6 +52,7 @@ pipe = transformers.pipeline('text-generation', model=model, config=config, toke
|
|
| 51 |
|
| 52 |
SafeTensors = WritingSafeTensors(
|
| 53 |
fn,
|
|
|
|
| 54 |
prompt = prompt,
|
| 55 |
store_weights = STORE_WEIGHTS,
|
| 56 |
use_gpu = USE_GPU,
|
|
@@ -82,8 +84,6 @@ def add_if_tensor(name, tensor):
|
|
| 82 |
def hook(module, inputs, kwinputs, outputs):
|
| 83 |
global IDX
|
| 84 |
prefix = module_prefixes[module]
|
| 85 |
-
if not prefix:
|
| 86 |
-
import pdb; pdb.set_trace()
|
| 87 |
HAS_HF_HOOK = hasattr(module, '_hf_hook')
|
| 88 |
if HAS_HF_HOOK:
|
| 89 |
inputs, kwinputs = module._hf_hook.pre_forward(module, *inputs, **kwinputs)
|
|
|
|
| 2 |
import os, sys
|
| 3 |
|
| 4 |
STORE_WEIGHTS = False
|
| 5 |
+
DEDUPLICATE_SAFETENSORS = True
|
| 6 |
FAKE_H100 = False
|
| 7 |
TORCH_DTYPE = 'float64'
|
| 8 |
USE_GPU = False
|
|
|
|
| 10 |
model_id, revision = sys.argv[1:]
|
| 11 |
user, model = model_id.split('/')
|
| 12 |
prompt = 'Once upon a time,'
|
| 13 |
+
fn = f'{user}_{model}_{revision}.{"logits-and-weights" if STORE_WEIGHTS else "logits"}.{"DEDUPLICATED.safetensors" if DEDUPLICATE_SAFETENSORS else "safetensors"}'
|
| 14 |
|
| 15 |
import torch, numpy as np, random
|
| 16 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
|
|
| 52 |
|
| 53 |
SafeTensors = WritingSafeTensors(
|
| 54 |
fn,
|
| 55 |
+
deduplicate = DEDUPLICATE_SAFETENSORS,
|
| 56 |
prompt = prompt,
|
| 57 |
store_weights = STORE_WEIGHTS,
|
| 58 |
use_gpu = USE_GPU,
|
|
|
|
| 84 |
def hook(module, inputs, kwinputs, outputs):
|
| 85 |
global IDX
|
| 86 |
prefix = module_prefixes[module]
|
|
|
|
|
|
|
| 87 |
HAS_HF_HOOK = hasattr(module, '_hf_hook')
|
| 88 |
if HAS_HF_HOOK:
|
| 89 |
inputs, kwinputs = module._hf_hook.pre_forward(module, *inputs, **kwinputs)
|
scripts/compare_safetensors.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
-
import safetensors
|
| 3 |
|
| 4 |
def compare(*fns):
|
| 5 |
global files, mismatching_keys, avgs, dists, errs
|
|
@@ -9,13 +9,14 @@ def compare(*fns):
|
|
| 9 |
assert set(files[0].keys()) == set(files[1].keys())
|
| 10 |
|
| 11 |
print('dtypes ...')
|
| 12 |
-
dtypes = {k: [
|
| 13 |
dtypes = {k: [min(dts, key=lambda dt: dt.itemsize),max(dts, key=lambda dt: dt.itemsize)] for k, dts in dtypes.items()}
|
| 14 |
mismatching_dtypes = [k for k, dts in dtypes.items() if dts[0] is not dts[1]]
|
|
|
|
| 15 |
print('midpoints ...')
|
| 16 |
avgs = {k:((files[0].get_tensor(k) + files[1].get_tensor(k))/2).to(dtypes[k][0]) for k in files[0].keys()}
|
| 17 |
print('dists ...')
|
| 18 |
-
dists = {k:(files[0].get_tensor(k).to(
|
| 19 |
|
| 20 |
print('keys ...')
|
| 21 |
mismatching_keys = [k for k, d in dists.items() if (d!=0).any()]
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
+
import safetensors.torch, torch # any tensor library would work
|
| 3 |
|
| 4 |
def compare(*fns):
|
| 5 |
global files, mismatching_keys, avgs, dists, errs
|
|
|
|
| 9 |
assert set(files[0].keys()) == set(files[1].keys())
|
| 10 |
|
| 11 |
print('dtypes ...')
|
| 12 |
+
dtypes = {k: [safetensors.torch._TYPES[file.get_slice(k).get_dtype()] for file in files] for k in files[0].keys()}
|
| 13 |
dtypes = {k: [min(dts, key=lambda dt: dt.itemsize),max(dts, key=lambda dt: dt.itemsize)] for k, dts in dtypes.items()}
|
| 14 |
mismatching_dtypes = [k for k, dts in dtypes.items() if dts[0] is not dts[1]]
|
| 15 |
+
cmp_dtypes = {k: torch.int8 if dts[0] is torch.bool else dts[0] for k, dts in dtypes.items()}
|
| 16 |
print('midpoints ...')
|
| 17 |
avgs = {k:((files[0].get_tensor(k) + files[1].get_tensor(k))/2).to(dtypes[k][0]) for k in files[0].keys()}
|
| 18 |
print('dists ...')
|
| 19 |
+
dists = {k:(files[0].get_tensor(k).to(cmp_dtypes[k]) - files[1].get_tensor(k).to(cmp_dtypes[k])).abs() for k in files[0].keys()}
|
| 20 |
|
| 21 |
print('keys ...')
|
| 22 |
mismatching_keys = [k for k, d in dists.items() if (d!=0).any()]
|