| from datasets import concatenate_datasets, load_dataset | |
| from sentence_transformers import SentenceTransformer | |
| from tqdm import tqdm | |
| from vicinity import Backend, Metric, Vicinity | |
| print("Collecting Scandinavian wikipedia") | |
| subsets = { | |
| "danish": "20231101.da", | |
| "norwegian_bokmål": "20231101.no", | |
| "norwegian_nynorsk": "20231101.nn", | |
| "swedish": "20231101.sv", | |
| } | |
| datasets = [] | |
| for lang in tqdm(subsets, desc="Going through languages"): | |
| ds = load_dataset("wikimedia/wikipedia", subsets[lang], split="train") | |
| ds = ds.map(lambda example: {**example, "language": lang}) | |
| datasets.append(ds) | |
| dataset = concatenate_datasets(datasets) | |
| print("Encoding texts") | |
| encoder = SentenceTransformer("static-similarity-mrl-multilingual-v1") | |
| text = [ | |
| f"{title}\n\n {content}" | |
| for title, content in zip(dataset["title"], dataset["text"]) | |
| ] | |
| embeddings = encoder.encode(text, show_progress_bar=True, batch_size=256) | |
| print("Building vector store") | |
| store = Vicinity.from_vectors_and_items( | |
| vectors=embeddings, | |
| items=list(dataset), | |
| metric=Metric.COSINE, | |
| index_type="hnsw", | |
| backend_type=Backend.FAISS, | |
| ) | |
| print("Saving vector store") | |
| store.save("scandi_wiki_vector_store") | |
| print("DONE") | |