2026peng qbo-odp commited on
Commit
b6f2549
·
0 Parent(s):

Duplicate from qbo-odp/deep1b

Browse files

Co-authored-by: hgf <qbo-odp@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +56 -0
  2. README.md +20 -0
  3. base.10M.fbin +3 -0
  4. loading.py +23 -0
.gitattributes ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
56
+ base.10M.fbin filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license:
3
+ - apache-2.0
4
+ pretty_name: qbo-odp/deep1B
5
+ size_categories:
6
+ - 1M<n<10M
7
+ source_datasets:
8
+ - original
9
+ tags:
10
+ - deep1b,vector,vector search
11
+ task_categories:
12
+ - feature-extraction
13
+ ---
14
+
15
+ ## deep1B
16
+
17
+ deep1B data, copied from [https://research.yandex.com/blog/benchmarks-for-billion-scale-similarity-search](https://research.yandex.com/blog/benchmarks-for-billion-scale-similarity-search), published:
18
+ ```
19
+ Babenko A, Lempitsky V. Efficient indexing of billion-scale datasets of deep descriptors[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2016: 2055-2063.
20
+ ```
base.10M.fbin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:290b341abc7ba570541e013fd20fd6d38c5873490ef395b512de5ed8dee18ce7
3
+ size 3840000008
loading.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def read_fbin(filename, start_idx=0, chunk_size=None):
4
+ """ Read *.fbin file that contains float32 vectors
5
+ Args:
6
+ :param filename (str): path to *.fbin file
7
+ :param start_idx (int): start reading vectors from this index
8
+ :param chunk_size (int): number of vectors to read.
9
+ If None, read all vectors
10
+ Returns:
11
+ Array of float32 vectors (numpy.ndarray)
12
+ """
13
+ with open(filename, "rb") as f:
14
+ nvecs, dim = np.fromfile(f, count=2, dtype=np.int32)
15
+ nvecs = (nvecs - start_idx) if chunk_size is None else chunk_size
16
+ arr = np.fromfile(f, count=nvecs * dim, dtype=np.float32,
17
+ offset=start_idx * 4 * dim)
18
+ return arr.reshape(nvecs, dim)
19
+
20
+
21
+ # read data
22
+ # chunk_size = 50000
23
+ # dataset = read_fbin('deep1B/base.10M.fbin', chunk_size=chunk_size)