File size: 1,216 Bytes
59d7a28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
from datasets import concatenate_datasets, load_dataset
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
from vicinity import Backend, Metric, Vicinity

print("Collecting Scandinavian wikipedia")
subsets = {
    "danish": "20231101.da",
    "norwegian_bokmål": "20231101.no",
    "norwegian_nynorsk": "20231101.nn",
    "swedish": "20231101.sv",
}
datasets = []
for lang in tqdm(subsets, desc="Going through languages"):
    ds = load_dataset("wikimedia/wikipedia", subsets[lang], split="train")
    ds = ds.map(lambda example: {**example, "language": lang})
    datasets.append(ds)
dataset = concatenate_datasets(datasets)

print("Encoding texts")
encoder = SentenceTransformer("static-similarity-mrl-multilingual-v1")
text = [
    f"{title}\n\n {content}"
    for title, content in zip(dataset["title"], dataset["text"])
]
embeddings = encoder.encode(text, show_progress_bar=True, batch_size=256)

print("Building vector store")
store = Vicinity.from_vectors_and_items(
    vectors=embeddings,
    items=list(dataset),
    metric=Metric.COSINE,
    index_type="hnsw",
    backend_type=Backend.FAISS,
)

print("Saving vector store")
store.save("scandi_wiki_vector_store")

print("DONE")