| import random |
| import logging |
| from datasets import load_dataset, Dataset, DatasetDict |
| from sentence_transformers import ( |
| SentenceTransformer, |
| SentenceTransformerTrainer, |
| SentenceTransformerTrainingArguments, |
| SentenceTransformerModelCardData, |
| ) |
| from sentence_transformers.losses import MatryoshkaLoss, MultipleNegativesRankingLoss |
| from sentence_transformers.training_args import BatchSamplers, MultiDatasetBatchSamplers |
| from sentence_transformers.evaluation import NanoBEIREvaluator |
| from sentence_transformers.models.StaticEmbedding import StaticEmbedding |
|
|
| from transformers import AutoTokenizer |
|
|
| logging.basicConfig( |
| format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO |
| ) |
| random.seed(12) |
|
|
|
|
| def load_train_eval_datasets(): |
| """ |
| Either load the train and eval datasets from disk or load them from the datasets library & save them to disk. |
| |
| Upon saving to disk, we quit() to ensure that the datasets are not loaded into memory before training. |
| """ |
| try: |
| train_dataset = DatasetDict.load_from_disk("datasets/train_dataset") |
| eval_dataset = DatasetDict.load_from_disk("datasets/eval_dataset") |
| return train_dataset, eval_dataset |
| except FileNotFoundError: |
| print("Loading gooaq dataset...") |
| gooaq_dataset = load_dataset("sentence-transformers/gooaq", split="train") |
| gooaq_dataset_dict = gooaq_dataset.train_test_split(test_size=10_000, seed=12) |
| gooaq_train_dataset: Dataset = gooaq_dataset_dict["train"] |
| gooaq_eval_dataset: Dataset = gooaq_dataset_dict["test"] |
| print("Loaded gooaq dataset.") |
|
|
| print("Loading msmarco dataset...") |
| msmarco_dataset = load_dataset("sentence-transformers/msmarco-co-condenser-margin-mse-sym-mnrl-mean-v1", "triplet", split="train") |
| msmarco_dataset_dict = msmarco_dataset.train_test_split(test_size=10_000, seed=12) |
| msmarco_train_dataset: Dataset = msmarco_dataset_dict["train"] |
| msmarco_eval_dataset: Dataset = msmarco_dataset_dict["test"] |
| print("Loaded msmarco dataset.") |
|
|
| print("Loading squad dataset...") |
| squad_dataset = load_dataset("sentence-transformers/squad", split="train") |
| squad_dataset_dict = squad_dataset.train_test_split(test_size=10_000, seed=12) |
| squad_train_dataset: Dataset = squad_dataset_dict["train"] |
| squad_eval_dataset: Dataset = squad_dataset_dict["test"] |
| print("Loaded squad dataset.") |
|
|
| print("Loading s2orc dataset...") |
| s2orc_dataset = load_dataset("sentence-transformers/s2orc", "title-abstract-pair", split="train[:100000]") |
| s2orc_dataset_dict = s2orc_dataset.train_test_split(test_size=10_000, seed=12) |
| s2orc_train_dataset: Dataset = s2orc_dataset_dict["train"] |
| s2orc_eval_dataset: Dataset = s2orc_dataset_dict["test"] |
| print("Loaded s2orc dataset.") |
|
|
| print("Loading allnli dataset...") |
| allnli_train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train") |
| allnli_eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev") |
| print("Loaded allnli dataset.") |
|
|
| print("Loading paq dataset...") |
| paq_dataset = load_dataset("sentence-transformers/paq", split="train") |
| paq_dataset_dict = paq_dataset.train_test_split(test_size=10_000, seed=12) |
| paq_train_dataset: Dataset = paq_dataset_dict["train"] |
| paq_eval_dataset: Dataset = paq_dataset_dict["test"] |
| print("Loaded paq dataset.") |
|
|
| print("Loading trivia_qa dataset...") |
| trivia_qa = load_dataset("sentence-transformers/trivia-qa", split="train") |
| trivia_qa_dataset_dict = trivia_qa.train_test_split(test_size=5_000, seed=12) |
| trivia_qa_train_dataset: Dataset = trivia_qa_dataset_dict["train"] |
| trivia_qa_eval_dataset: Dataset = trivia_qa_dataset_dict["test"] |
| print("Loaded trivia_qa dataset.") |
|
|
| print("Loading msmarco_10m dataset...") |
| msmarco_10m_dataset = load_dataset("bclavie/msmarco-10m-triplets", split="train") |
| msmarco_10m_dataset_dict = msmarco_10m_dataset.train_test_split(test_size=10_000, seed=12) |
| msmarco_10m_train_dataset: Dataset = msmarco_10m_dataset_dict["train"] |
| msmarco_10m_eval_dataset: Dataset = msmarco_10m_dataset_dict["test"] |
| print("Loaded msmarco_10m dataset.") |
|
|
| print("Loading swim_ir dataset...") |
| swim_ir_dataset = load_dataset("nthakur/swim-ir-monolingual", "en", split="train").select_columns(["query", "text"]) |
| swim_ir_dataset_dict = swim_ir_dataset.train_test_split(test_size=10_000, seed=12) |
| swim_ir_train_dataset: Dataset = swim_ir_dataset_dict["train"] |
| swim_ir_eval_dataset: Dataset = swim_ir_dataset_dict["test"] |
| print("Loaded swim_ir dataset.") |
|
|
| |
| print("Loading pubmedqa dataset...") |
| pubmedqa_dataset = load_dataset("sentence-transformers/pubmedqa", "triplet-20", split="train") |
| pubmedqa_dataset_dict = pubmedqa_dataset.train_test_split(test_size=100, seed=12) |
| pubmedqa_train_dataset: Dataset = pubmedqa_dataset_dict["train"] |
| pubmedqa_eval_dataset: Dataset = pubmedqa_dataset_dict["test"] |
| print("Loaded pubmedqa dataset.") |
|
|
| |
| print("Loading miracl dataset...") |
| miracl_dataset = load_dataset("sentence-transformers/miracl", "en-triplet-all", split="train") |
| miracl_dataset_dict = miracl_dataset.train_test_split(test_size=10_000, seed=12) |
| miracl_train_dataset: Dataset = miracl_dataset_dict["train"] |
| miracl_eval_dataset: Dataset = miracl_dataset_dict["test"] |
| print("Loaded miracl dataset.") |
|
|
| |
| print("Loading mldr dataset...") |
| mldr_dataset = load_dataset("sentence-transformers/mldr", "en-triplet-all", split="train") |
| mldr_dataset_dict = mldr_dataset.train_test_split(test_size=10_000, seed=12) |
| mldr_train_dataset: Dataset = mldr_dataset_dict["train"] |
| mldr_eval_dataset: Dataset = mldr_dataset_dict["test"] |
| print("Loaded mldr dataset.") |
|
|
| |
| print("Loading mr_tydi dataset...") |
| mr_tydi_dataset = load_dataset("sentence-transformers/mr-tydi", "en-triplet-all", split="train") |
| mr_tydi_dataset_dict = mr_tydi_dataset.train_test_split(test_size=10_000, seed=12) |
| mr_tydi_train_dataset: Dataset = mr_tydi_dataset_dict["train"] |
| mr_tydi_eval_dataset: Dataset = mr_tydi_dataset_dict["test"] |
| print("Loaded mr_tydi dataset.") |
|
|
| train_dataset = DatasetDict({ |
| "gooaq": gooaq_train_dataset, |
| "msmarco": msmarco_train_dataset, |
| "squad": squad_train_dataset, |
| "s2orc": s2orc_train_dataset, |
| "allnli": allnli_train_dataset, |
| "paq": paq_train_dataset, |
| "trivia_qa": trivia_qa_train_dataset, |
| "msmarco_10m": msmarco_10m_train_dataset, |
| "swim_ir": swim_ir_train_dataset, |
| "pubmedqa": pubmedqa_train_dataset, |
| "miracl": miracl_train_dataset, |
| "mldr": mldr_train_dataset, |
| "mr_tydi": mr_tydi_train_dataset, |
| }) |
| eval_dataset = DatasetDict({ |
| "gooaq": gooaq_eval_dataset, |
| "msmarco": msmarco_eval_dataset, |
| "squad": squad_eval_dataset, |
| "s2orc": s2orc_eval_dataset, |
| "allnli": allnli_eval_dataset, |
| "paq": paq_eval_dataset, |
| "trivia_qa": trivia_qa_eval_dataset, |
| "msmarco_10m": msmarco_10m_eval_dataset, |
| "swim_ir": swim_ir_eval_dataset, |
| "pubmedqa": pubmedqa_eval_dataset, |
| "miracl": miracl_eval_dataset, |
| "mldr": mldr_eval_dataset, |
| "mr_tydi": mr_tydi_eval_dataset, |
| }) |
|
|
| train_dataset.save_to_disk("datasets/train_dataset") |
| eval_dataset.save_to_disk("datasets/eval_dataset") |
| |
| |
| |
| quit() |
| |
|
|
| def main(): |
| |
| static_embedding = StaticEmbedding(AutoTokenizer.from_pretrained("google-bert/bert-base-uncased"), embedding_dim=1024) |
| model = SentenceTransformer( |
| modules=[static_embedding], |
| model_card_data=SentenceTransformerModelCardData( |
| language="en", |
| license="apache-2.0", |
| model_name="Static Embeddings with BERT uncased tokenizer finetuned on various datasets", |
| ), |
| ) |
|
|
| |
| train_dataset, eval_dataset = load_train_eval_datasets() |
| print(train_dataset) |
|
|
| |
| loss = MultipleNegativesRankingLoss(model) |
| loss = MatryoshkaLoss(model, loss, matryoshka_dims=[32, 64, 128, 256, 512, 1024]) |
|
|
| |
| run_name = "static-retrieval-mrl-en-v1" |
| args = SentenceTransformerTrainingArguments( |
| |
| output_dir=f"models/{run_name}", |
| |
| num_train_epochs=1, |
| per_device_train_batch_size=2048, |
| per_device_eval_batch_size=2048, |
| learning_rate=2e-1, |
| warmup_ratio=0.1, |
| fp16=False, |
| bf16=True, |
| batch_sampler=BatchSamplers.NO_DUPLICATES, |
| multi_dataset_batch_sampler=MultiDatasetBatchSamplers.PROPORTIONAL, |
| |
| eval_strategy="steps", |
| eval_steps=250, |
| save_strategy="steps", |
| save_steps=250, |
| save_total_limit=2, |
| logging_steps=250, |
| logging_first_step=True, |
| run_name=run_name, |
| ) |
|
|
| |
| evaluator = NanoBEIREvaluator() |
| evaluator(model) |
|
|
| |
| trainer = SentenceTransformerTrainer( |
| model=model, |
| args=args, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| loss=loss, |
| evaluator=evaluator, |
| ) |
| trainer.train() |
|
|
| |
| evaluator(model) |
|
|
| |
| model.save_pretrained(f"models/{run_name}/final") |
|
|
| |
| model.push_to_hub(run_name, private=True) |
|
|
| if __name__ == "__main__": |
| main() |