| |
| import safetensors.torch, torch |
|
|
| def safe_open(filename, framework): |
| if filename.endswith('.json'): |
| class IndexFile: |
| def __init__(self, filename, framework): |
| import json |
| with open(filename) as fh: |
| index = json.load(fh) |
| files = { |
| file: safetensors.safe_open(file, framework=framework) |
| for file in index['weight_map'].values() |
| } |
| self.weight_map = {k:files[v] for k,v in index['weight_map'].items()} |
| def get_tensor(self, name): |
| return self.weight_map[name].get_tensor(name) |
| def get_slice(self, name): |
| return self.weight_map[name].get_slice(name) |
| def keys(self): |
| return self.weight_map.keys() |
| return IndexFile(filename, framework=framework) |
| else: |
| return safetensors.safe_open(filename, framework=framework) |
|
|
| def compare(*fns): |
| global files, mismatching_keys, avgs, dists, errs |
|
|
| files = [safe_open(files, framework='pt') for files in fns] |
|
|
| assert set(files[0].keys()) == set(files[1].keys()) |
|
|
| print('dtypes ...') |
| dtypes = {k: [safetensors.torch._TYPES[file.get_slice(k).get_dtype()] for file in files] for k in files[0].keys()} |
| dtypes = {k: [min(dts, key=lambda dt: dt.itemsize),max(dts, key=lambda dt: dt.itemsize)] for k, dts in dtypes.items()} |
| mismatching_dtypes = [k for k, dts in dtypes.items() if dts[0] is not dts[1]] |
| cmp_dtypes = {k: torch.int8 if dts[0] is torch.bool else dts[0] for k, dts in dtypes.items()} |
| print('midpoints ...') |
| avgs = {k:((files[0].get_tensor(k) + files[1].get_tensor(k))/2).to(dtypes[k][0]) for k in files[0].keys()} |
| print('dists ...') |
| dists = {k:(files[0].get_tensor(k).to(cmp_dtypes[k]) - files[1].get_tensor(k).to(cmp_dtypes[k])).abs() for k in files[0].keys()} |
|
|
| print('keys ...') |
| mismatching_keys = [k for k, d in dists.items() if (d!=0).any()] |
|
|
| print(f'{len(mismatching_keys)/len(files[0].keys())*100:.2f}% keys mismatch') |
| print(f'{len(mismatching_dtypes)/len(files[0].keys())*100:.2f}% dtypes mismatch') |
|
|
| |
|
|
| |
| |
| |
| |
|
|
| embed_name = [x for x in ['model.embed_tokens'] if x + '.output' in dtypes][0] |
| head_name = [x for x in ['lm_head'] if x + '.output' in dtypes][0] |
|
|
| print('input embed dist:', dists[embed_name+'.output'].sum().item()) |
| |
|
|
| print('output head dist:', dists[head_name+'.output'].sum().item()) |
| |
|
|
| for idx in range(2): |
| head_tokens = files[idx].get_tensor(head_name+'.output')[0].softmax(dim=-1).sort(dim=-1,descending=True) |
| print('file',idx,'tokens: ', end='') |
| for range_idx, range_ in enumerate([range(3), range(-3,0)]): |
| if range_idx > 0: |
| print('.. ', end='') |
| for idx in range_: |
| for token in range(head_tokens.values.shape[-2]): |
| if range_idx == 0: |
| print(f'{head_tokens.indices[token][idx]}({head_tokens.values[token][idx]*100:.3f}%) ',end='') |
| else: |
| print(f'{head_tokens.indices[token][idx]}({head_tokens.values[token][idx]*100:.3e}%) ',end='') |
| print() |
|
|
|
|
|
|
| if __name__ == '__main__': |
| import sys |
| assert len(sys.argv[1:]) == 2 |
| compare(*sys.argv[1:]) |
|
|
|
|