| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | import random |
| | import re |
| | from dataclasses import dataclass |
| | from pathlib import Path |
| | from typing import Literal |
| |
|
| | import datasets |
| |
|
| |
|
| | @dataclass |
| | class JAQKETHelper: |
| | CITATION = """\ |
| | @article{鈴木正敏2020jaqket, |
| | title={JAQKET: クイズを題材にした日本語 QA データセットの構築}, |
| | author={鈴木正敏 and 鈴木潤 and 松田耕史 and 西田京介 and 井之上直也}, |
| | journal={言語処理学会第 26 回年次大会}, |
| | pages={237--240}, |
| | year={2020} |
| | } |
| | """ |
| |
|
| | DESCRIPTION = """\ |
| | JAQKET (JApanese Questions on Knowledge of EnTities) is a QA dataset that is created based on quiz questions. |
| | The original dataset gives a set of questions, and a set of Wikipedia passages (corpus). |
| | Each question has question text and a set of candidates. The answer is among the candidates. |
| | Each Wikipedia passages has title and text, where titles are consistant with the name of answer candidates. |
| | In retrieval task for embedding model evaluations, candidates are omitted, |
| | and the model is required to predict passages in the corpus that are the most relevant with the question text. |
| | """ |
| |
|
| | HOMEPAGE_URL = "https://sites.google.com/view/project-aio/competition1" |
| | LICENSE = "CC BY-SA 4.0 DEED" |
| | URL = { |
| | "train": "https://jaqket.s3.ap-northeast-1.amazonaws.com/data/aio_01/train_questions.json", |
| | "dev": "https://jaqket.s3.ap-northeast-1.amazonaws.com/data/aio_01/dev1_questions.json", |
| | "test": "https://jaqket.s3.ap-northeast-1.amazonaws.com/data/aio_01/dev2_questions.json", |
| | "corpus": "https://jaqket.s3.ap-northeast-1.amazonaws.com/data/aio_01/candidate_entities.json.gz", |
| | } |
| |
|
| | @staticmethod |
| | def load_jsonl(filename: str | Path) -> list[dict]: |
| | data = [] |
| | with open(filename, "r") as fin: |
| | for line in fin: |
| | data.append(json.loads(line.strip())) |
| | return data |
| |
|
| | def query_split_generator( |
| | self, dl_manager: datasets.DownloadManager |
| | ) -> list[datasets.SplitGenerator]: |
| | dataset_dirs = dl_manager.download_and_extract(self.URL) |
| | train = self.load_jsonl(dataset_dirs["train"]) |
| | dev = self.load_jsonl(dataset_dirs["dev"]) |
| | test = self.load_jsonl(dataset_dirs["test"]) |
| |
|
| | def format_query(example: dict) -> dict: |
| | answer = example["answer_entity"] |
| | if not isinstance(answer, list): |
| | answer = [answer] |
| | query = { |
| | "qid": example["qid"], |
| | "query": example["question"], |
| | "relevant_docs": answer, |
| | } |
| | return query |
| |
|
| | train = [format_query(q) for q in train] |
| | dev = [format_query(q) for q in dev] |
| | test = [format_query(q) for q in test] |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"data": train}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": dev}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": test}, |
| | ), |
| | ] |
| |
|
| | def corpus_generator( |
| | self, dl_manager: datasets.DownloadManager |
| | ) -> list[datasets.SplitGenerator]: |
| | dataset_dirs = dl_manager.download_and_extract(self.URL) |
| | corpus = self.load_jsonl(dataset_dirs["corpus"]) |
| |
|
| | def format_passage(example: dict) -> dict: |
| | return { |
| | "docid": example["title"], |
| | "title": example["title"], |
| | "text": example["text"], |
| | } |
| |
|
| | corpus = [format_passage(line) for line in corpus] |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": corpus}, |
| | ), |
| | ] |
| |
|
| |
|
| | @dataclass |
| | class MrTyDiHelper: |
| | CITATION = """\ |
| | @article{mrtydi, |
| | title={{Mr. TyDi}: A Multi-lingual Benchmark for Dense Retrieval}, |
| | author={Xinyu Zhang and Xueguang Ma and Peng Shi and Jimmy Lin}, |
| | year={2021}, |
| | journal={arXiv:2108.08787}, |
| | } |
| | """ |
| |
|
| | DESCRIPTION = """\ |
| | Mr. TyDi is a multi-lingual benchmark dataset built on TyDi, covering eleven typologically diverse languages. |
| | It is designed for monolingual retrieval, specifically to evaluate ranking with learned dense representations. |
| | """ |
| |
|
| | HOMEPAGE_URL = "https://github.com/castorini/mr.tydi" |
| | LICENSE = "Apache-2.0" |
| |
|
| | @staticmethod |
| | def query_split_generator() -> list[datasets.SplitGenerator]: |
| | data = datasets.load_dataset( |
| | "castorini/mr-tydi", "japanese", trust_remote_code=True |
| | ) |
| |
|
| | def format_query(example: dict) -> dict: |
| | return { |
| | "qid": example["query_id"], |
| | "query": example["query"], |
| | "relevant_docs": [d["docid"] for d in example["positive_passages"]], |
| | } |
| |
|
| | train = [format_query(q) for q in data["train"]] |
| | dev = [format_query(q) for q in data["dev"]] |
| | test = [format_query(q) for q in data["test"]] |
| |
|
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"data": train}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": dev}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": test}, |
| | ), |
| | ] |
| |
|
| | @staticmethod |
| | def corpus_generator() -> list[datasets.SplitGenerator]: |
| | corpus = datasets.load_dataset( |
| | "castorini/mr-tydi-corpus", "japanese", trust_remote_code=True |
| | )["train"] |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": corpus}, |
| | ), |
| | ] |
| |
|
| |
|
| | @dataclass |
| | class JaGovFaqs22kHelper: |
| | HOMEPAGE_URL = "https://huggingface.co/datasets/matsuxr/JaGovFaqs-22k" |
| | CITATION = "" |
| | DESCRIPTION = """\ |
| | このデータは、日本の官公庁のWebサイトに掲載されている「よくある質問」を手作業で抽出し、インストラクション用のデータセットとしたものです。 |
| | """ |
| | VERSION = "1.0.0" |
| | LICENSE = """\ |
| | 日本の官公庁のWebサイトは多くが「政府標準利用規約(第2.0版)」に準拠しており、この規約はCC-BY-4.0(国際)と互換性があると記述されています。 |
| | 参考 https://www.digital.go.jp/copyright-policy |
| | したがって本データセットの著作権者はデータセットのcopyrightに記載された各官公庁であり、ライセンスもCC-BY-4.0(国際)です。 |
| | データセット製作者は著作権を主張しません。 |
| | """ |
| |
|
| | def __init__(self, config: JaGovFaqs22kConfig) -> None: |
| | dataset = datasets.load_dataset("matsuxr/JaGovFaqs-22k", trust_remote_code=True) |
| |
|
| | def preprocess(example: dict, idx: int) -> dict: |
| | example["idx"] = idx + 1 |
| | example["Question"] = example["Question"].strip() |
| | example["Answer"] = example["Answer"].strip() |
| | return example |
| |
|
| | dataset = dataset.map(preprocess, with_indices=True) |
| | queries = dataset.select_columns(["Question", "idx"]).rename_columns( |
| | {"Question": "query", "idx": "relevant_docs"}, |
| | ) |
| | self.corpus = dataset.select_columns(["idx", "Answer"]).rename_columns( |
| | {"idx": "docid", "Answer": "text"}, |
| | ) |
| |
|
| | if config.shuffle: |
| | queries.shuffle(seed=config.seed) |
| | queries = queries["train"].train_test_split(test_size=1 - config.train_ratio) |
| | devtest = queries.pop("test").train_test_split( |
| | test_size=1 - config.dev_ratio / (1 - config.train_ratio) |
| | ) |
| | queries["dev"] = devtest.pop("train") |
| | queries["test"] = devtest.pop("test") |
| | self.queries = queries |
| |
|
| | def format_relevant_docs(example: dict) -> dict: |
| | if not isinstance(example["relevant_docs"], list): |
| | example["relevant_docs"] = [example["relevant_docs"]] |
| | return example |
| |
|
| | self.queries = self.queries.map(format_relevant_docs) |
| |
|
| | def query_split_generator(self) -> list[datasets.SplitGenerator]: |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"data": self.queries["train"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": self.queries["dev"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": self.queries["test"]}, |
| | ), |
| | ] |
| |
|
| | def corpus_generator(self) -> list[datasets.SplitGenerator]: |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": self.corpus["train"]}, |
| | ), |
| | ] |
| |
|
| |
|
| | @dataclass |
| | class NLPJournalHelper: |
| | HOMEPAGE_URL = "https://www.anlp.jp/resource/journal_latex/index.html" |
| | CITATION = "" |
| | DESCRIPTION = """\ |
| | 本データは言語処理学会論文誌「自然言語処理」に掲載された論文のLaTeXのソースファイルを収集したコーパスです。 |
| | 詳細は https://www.anlp.jp/resource/journal_latex/Readme.txt をご覧ください。 |
| | """ |
| | VERSION = "2020.3.16" |
| | LICENSE = "CC-BY 4.0" |
| | URL = "https://www.anlp.jp/resource/journal_latex/NLP_LATEX_CORPUS.zip" |
| |
|
| | def __init__( |
| | self, config: NLPJournalConfig, dl_manager: datasets.DownloadManager |
| | ) -> None: |
| | self.config = config |
| | self.dl_manager = dl_manager |
| |
|
| | docids, titles, abstracts, introductions, articles = self.load_papers() |
| | self.titles = titles |
| | self.docids = docids |
| | self.abstracts = abstracts |
| | self.introductions = introductions |
| | self.articles = articles |
| |
|
| | @staticmethod |
| | def load_txt(filename: str) -> str: |
| | """ |
| | テキストファイルを複数のエンコーディングで試行して読み込む |
| | 日本語学術論文でよく使われるエンコーディングを順次試行 |
| | """ |
| | |
| | encodings_to_try = [ |
| | 'iso2022_jp', |
| | 'utf-8', |
| | 'cp932', |
| | 'shift-jis', |
| | 'euc-jp', |
| | 'latin-1' |
| | ] |
| | |
| | for encoding in encodings_to_try: |
| | try: |
| | with open(filename, "r", encoding=encoding) as fin: |
| | content = "\n".join([line.strip() for line in fin.readlines()]).strip() |
| | |
| | if encoding != 'utf-8' and encoding != 'iso2022_jp': |
| | print(f"Warning: ファイル {filename} を {encoding} エンコーディングで読み込みました") |
| | return content |
| | except UnicodeDecodeError: |
| | continue |
| | except Exception as e: |
| | print(f"Error reading {filename} with {encoding}: {e}") |
| | continue |
| | |
| | |
| | try: |
| | with open(filename, "r", encoding="utf-8", errors="replace") as fin: |
| | content = "\n".join([line.strip() for line in fin.readlines()]).strip() |
| | print(f"Warning: ファイル {filename} で文字置換を使用しました") |
| | return content |
| | except Exception as e: |
| | print(f"Fatal error reading {filename}: {e}") |
| | return "" |
| |
|
| | @staticmethod |
| | def extract_from_command( |
| | command: str, text: str, start: int = 0 |
| | ) -> tuple[str, tuple[int, int]]: |
| | """Extract text in a command. |
| | Example: extract `hello world` from `\\title{hello world}` |
| | when the command is `title`. |
| | |
| | Args: |
| | command (str): command. For example, `title`, `author`, `section*`. |
| | text (str): All text (Mathpix md format) of the paper. |
| | start (int): Search from text[start]. |
| | |
| | Returns: |
| | tuple[str, tuple[int, int]]: the extracted texts, and the start and end index |
| | of the whole command (e.g., the span indices of `\\title{hello world}`). |
| | """ |
| | regular_expression = r"\\" + re.escape(command) + r"\{([^}]*)\}" |
| | text = text[start:] |
| | match = re.search(regular_expression, text) |
| | if not match: |
| | return "", (-1, -1) |
| |
|
| | span = match.span(0) |
| |
|
| | |
| | nested_level = 1 |
| | extracted = "" |
| | p = span[0] + 1 + len(command) + 1 |
| | |
| | while nested_level > 0: |
| | char = text[p] |
| | if char == "{": |
| | nested_level += 1 |
| | extracted += char |
| | elif char == "}": |
| | nested_level -= 1 |
| | if nested_level > 0: |
| | extracted += char |
| | else: |
| | extracted += char |
| | p += 1 |
| |
|
| | return extracted.strip(), (span[0] + start, p + start) |
| |
|
| | def extract_sections(self, text: str) -> dict[str, str]: |
| | """Extract sections, given `\\section*{..}` indicating a section. |
| | |
| | Args: |
| | text (str): All text (Mathpix md format) of the paper. |
| | |
| | Returns: |
| | dict[str, str]: a dictionary of section title and its texts. |
| | """ |
| | spans = [] |
| | results = {} |
| | start = end = 0 |
| | while True: |
| | section_title, (start, end) = self.extract_from_command( |
| | "section", text, end |
| | ) |
| | if start == -1: |
| | break |
| | spans.append([section_title, start, end]) |
| |
|
| | for i in range(len(spans) - 1): |
| | this_section = spans[i] |
| | next_section = spans[i + 1] |
| | section_title = this_section[0] |
| | section_text = text[this_section[2] + 1 : next_section[1]].strip() |
| | results[section_title] = section_text |
| | |
| | last_span = spans[-1] |
| | last_section_title = last_span[0] |
| | results[last_section_title] = text[last_span[2] + 1 :].strip() |
| | return results |
| |
|
| | def parse_papers(self, files: list) -> list[dict]: |
| | data = [] |
| | for file in files: |
| | try: |
| | text = self.load_txt(file) |
| | text = re.sub(r"\\" + re.escape("section "), "\\\section", text) |
| | jabs = self.extract_from_command("jabstract", text)[0] |
| | if not jabs: |
| | continue |
| | title = self.extract_from_command("title", text)[0] |
| | if not title: |
| | title = self.extract_from_command("jtitle", text)[0] |
| | if not title: |
| | continue |
| | sections = self.extract_sections(text) |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | intro = list(sections.items())[0] |
| | |
| | if re.match(r"([a-z]|[A-Z]|[0-9]|\s)+", intro[0]) is not None: |
| | continue |
| | title = re.sub(r"\s+", "", title) |
| | jabs = re.sub(r"\s+", "", jabs) |
| | intro = { |
| | "section_title": re.sub(r"\s+", "", intro[0]), |
| | "text": re.sub(r"\s+", "", intro[1]), |
| | } |
| | article = "" |
| | for sec_title, sec_text in sections.items(): |
| | sec_title = re.sub(r"\s+", "", sec_title) |
| | sec_title = "\section{" + sec_title + "}" |
| | sec_text = re.sub(r"\s+", "", sec_text) |
| | article += f"{sec_title}\n{sec_text}\n" |
| |
|
| | data.append( |
| | { |
| | "filename": file, |
| | "title": title, |
| | "abstract": jabs, |
| | "introduction": intro, |
| | "article": article, |
| | } |
| | ) |
| | except Exception as e: |
| | print(f"{file} failed due to {e}") |
| | return data |
| |
|
| | def load_papers(self) -> tuple[list]: |
| | dataset_dir = ( |
| | Path(self.dl_manager.download_and_extract(self.URL)) / "NLP_LATEX_CORPUS" |
| | ) |
| | all_tex_files: list[Path] = [] |
| | for dir, _, fs in os.walk(dataset_dir): |
| | for f in fs: |
| | if f.endswith(".tex"): |
| | all_tex_files.append(Path(dir) / f) |
| | papers = self.parse_papers(all_tex_files) |
| |
|
| | docids = [] |
| | titles = [] |
| | abstracts = [] |
| | introductions = [] |
| | articles = [] |
| |
|
| | for paper in papers: |
| | title = paper["title"] |
| | docid = str(paper["filename"]).split("/")[-1].replace(".tex", "") |
| | abstract = paper["abstract"] |
| | introduction = paper["introduction"]["text"] |
| | article = paper["article"] |
| |
|
| | titles.append(title) |
| | docids.append(docid) |
| | abstracts.append(abstract) |
| | introductions.append(introduction) |
| | articles.append(article) |
| |
|
| | return docids, titles, abstracts, introductions, articles |
| |
|
| | def get_query_corpus( |
| | self, |
| | query: Literal["title", "abstract"], |
| | corpus: Literal["abstract", "introduction", "article"], |
| | ) -> tuple[list[dict], list[dict]]: |
| | queries = [] |
| | corpora = [] |
| |
|
| | if query == "title" and corpus == "abstract": |
| | for i, (docid, title, abstract) in enumerate( |
| | zip(self.docids, self.titles, self.abstracts) |
| | ): |
| | queries.append( |
| | { |
| | "qid": i + 1, |
| | "query": title, |
| | "relevant_docs": docid, |
| | } |
| | ) |
| | corpora.append( |
| | { |
| | "docid": docid, |
| | "text": abstract, |
| | } |
| | ) |
| | if self.config.shuffle: |
| | random.seed(self.config.seed) |
| | random.shuffle(corpora) |
| |
|
| | if query == "title" and corpus == "introduction": |
| | for i, (docid, title, introduction) in enumerate( |
| | zip(self.docids, self.titles, self.introductions) |
| | ): |
| | queries.append( |
| | { |
| | "qid": i + 1, |
| | "query": title, |
| | "relevant_docs": docid, |
| | } |
| | ) |
| | corpora.append( |
| | { |
| | "docid": docid, |
| | "text": introduction, |
| | } |
| | ) |
| | if self.config.shuffle: |
| | random.seed(self.config.seed) |
| | random.shuffle(corpora) |
| |
|
| | if query == "abstract" and corpus == "introduction": |
| | for i, (docid, abstract, introduction) in enumerate( |
| | zip(self.docids, self.abstracts, self.introductions) |
| | ): |
| | queries.append( |
| | { |
| | "qid": i + 1, |
| | "query": abstract, |
| | "relevant_docs": docid, |
| | } |
| | ) |
| | corpora.append( |
| | { |
| | "docid": docid, |
| | "text": introduction, |
| | } |
| | ) |
| | if self.config.shuffle: |
| | random.seed(self.config.seed) |
| | random.shuffle(corpora) |
| |
|
| | if query == "abstract" and corpus == "article": |
| | for i, (docid, abstract, article) in enumerate( |
| | zip(self.docids, self.abstracts, self.articles) |
| | ): |
| | queries.append( |
| | { |
| | "qid": i + 1, |
| | "query": abstract, |
| | "relevant_docs": docid, |
| | } |
| | ) |
| | corpora.append( |
| | { |
| | "docid": docid, |
| | "text": article, |
| | } |
| | ) |
| | if self.config.shuffle: |
| | random.seed(self.config.seed) |
| | random.shuffle(corpora) |
| |
|
| | return queries, corpora |
| |
|
| | def query_generator( |
| | self, |
| | query: Literal["title", "abstract"], |
| | corpus: Literal["abstract", "introduction", "article"], |
| | ) -> list[datasets.SplitGenerator]: |
| | queries, _ = self.get_query_corpus(query, corpus) |
| | if self.config.dev_ratio > 0: |
| | n_dev = int(len(queries) * self.config.dev_ratio) |
| | dev = queries[:n_dev] |
| | test = queries[n_dev:] |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": dev}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": test}, |
| | ), |
| | ] |
| | else: |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": queries}, |
| | ), |
| | ] |
| |
|
| | def corpus_generator( |
| | self, |
| | query: Literal["title", "abstract"], |
| | corpus: Literal["abstract", "introduction", "article"], |
| | ) -> list[datasets.SplitGenerator]: |
| | _, corpora = self.get_query_corpus(query, corpus) |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": corpora}, |
| | ), |
| | ] |
| |
|
| |
|
| | @dataclass |
| | class JaCWIRRetrievalHelper: |
| | HOMEPAGE_URL = "https://huggingface.co/datasets/hotchpotch/JaCWIR" |
| | CITATION = """\ |
| | @misc{yuichi-tateno-2024-jacwir, |
| | url={[https://huggingface.co/datasets/hotchpotch/JaCWIR](https://huggingface.co/datasets/hotchpotch/JaCWIR)}, |
| | title={JaCWIR: Japanese Casual Web IR - 日本語情報検索評価のための小規模でカジュアルなWebタイトルと概要のデータセット}, |
| | author={Yuichi Tateno} |
| | } |
| | """ |
| | DESCRIPTION = """\ |
| | JaCWIR は、5000の質問文と、約50万のWebページのタイトル・Webページ冒頭文もしくは概要(meta descriptionなど)で |
| | 構成される短いデータの小規模な日本語の情報検索の評価データセットです。 |
| | 質問文は、50万Webページのどれかを元に作成しており、そのデータを質問文の正例としています。 |
| | """ |
| | VERSION = "1.0.0" |
| | LICENSE = "" |
| |
|
| | def __init__(self, config: JaCWIRRetrievalConfig) -> None: |
| | self.config = config |
| |
|
| | def query_split_generator(self) -> list[datasets.SplitGenerator]: |
| | queries = datasets.load_dataset( |
| | "hotchpotch/JaCWIR", "eval", trust_remote_code=True |
| | )["eval"] |
| | devtest = queries.train_test_split( |
| | test_size=1 - self.config.dev_ratio, |
| | shuffle=self.config.shuffle, |
| | seed=self.config.seed, |
| | ) |
| | self.queries = datasets.DatasetDict( |
| | {"dev": devtest.pop("train"), "test": devtest.pop("test")} |
| | ) |
| |
|
| | def format_query(query: dict) -> dict: |
| | relevant_docs = query.pop("positive") |
| | if not isinstance(relevant_docs, list): |
| | relevant_docs = [relevant_docs] |
| | query.pop("negatives") |
| | return { |
| | "query": query.pop("query"), |
| | "relevant_docs": relevant_docs, |
| | } |
| |
|
| | self.queries = self.queries.map(format_query) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": self.queries["dev"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": self.queries["test"]}, |
| | ), |
| | ] |
| |
|
| | def corpus_generator(self) -> list[datasets.SplitGenerator]: |
| | corpus = datasets.load_dataset( |
| | "hotchpotch/JaCWIR", "collection", trust_remote_code=True |
| | )["collection"] |
| |
|
| | def format_corpus(line: dict) -> dict: |
| | if self.config.without_title: |
| | text = line.pop("description") |
| | line.pop("title") |
| | else: |
| | text = line.pop("title") + ": " + line.pop("description") |
| | line.pop("link") |
| | line.pop("date") |
| | return { |
| | "docid": line.pop("doc_id"), |
| | "text": text, |
| | } |
| |
|
| | self.corpus = corpus.map(format_corpus) |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": self.corpus}, |
| | ), |
| | ] |
| |
|
| |
|
| | @dataclass |
| | class MIRACLRetrievalHelper: |
| | HOMEPAGE_URL = "https://huggingface.co/datasets/miracl/miracl" |
| | CITATION = """\ |
| | @article{10.1162/tacl_a_00595, |
| | author = {Zhang, Xinyu and Thakur, Nandan and Ogundepo, Odunayo and Kamalloo, Ehsan and Alfonso-Hermelo, David and Li, Xiaoguang and Liu, Qun and Rezagholizadeh, Mehdi and Lin, Jimmy}, # noqa: E501 |
| | title = "{MIRACL: A Multilingual Retrieval Dataset Covering 18 Diverse Languages}", |
| | journal = {Transactions of the Association for Computational Linguistics}, |
| | volume = {11}, |
| | pages = {1114-1131}, |
| | year = {2023}, |
| | month = {09}, |
| | issn = {2307-387X}, |
| | doi = {10.1162/tacl_a_00595}, |
| | url = {https://doi.org/10.1162/tacl_a_00595}, |
| | eprint = {https://direct.mit.edu/tacl/article-pdf/doi/10.1162/tacl_a_00595/2157340/tacl_a_00595.pdf}, |
| | } |
| | """ |
| | DESCRIPTION = """\ |
| | MIRACL 🌍🙌🌏 (Multilingual Information Retrieval Across a Continuum of Languages) is a multilingual retrieval |
| | dataset that focuses on search across 18 different languages, which collectively encompass over three billion |
| | native speakers around the world. |
| | """ |
| | VERSION = "1.0.0" |
| | LICENSE = "Apache-2.0" |
| |
|
| | def __init__(self, config: MIRACLRetrievalConfig) -> None: |
| | self.config = config |
| |
|
| | def query_split_generator(self) -> list[datasets.SplitGenerator]: |
| | queries = datasets.load_dataset("miracl/miracl", "ja", trust_remote_code=True) |
| | queries_traindev = queries["train"].train_test_split( |
| | test_size=self.config.dev_ratio, |
| | shuffle=self.config.shuffle, |
| | seed=self.config.seed, |
| | ) |
| | queries = datasets.DatasetDict( |
| | { |
| | "train": queries_traindev.pop("train"), |
| | "dev": queries_traindev.pop("test"), |
| | "test": queries["dev"], |
| | } |
| | ) |
| |
|
| | def format_query(query: dict) -> dict: |
| | query.pop("query_id") |
| | positive_passages = query.pop("positive_passages") |
| | query.pop("negative_passages") |
| | return { |
| | "query": query.pop("query"), |
| | "relevant_docs": [doc["docid"] for doc in positive_passages], |
| | } |
| |
|
| | self.queries = queries.map(format_query) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"data": self.queries["train"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": self.queries["dev"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": self.queries["test"]}, |
| | ), |
| | ] |
| |
|
| | def corpus_generator(self) -> list[datasets.SplitGenerator]: |
| | corpus = datasets.load_dataset( |
| | "miracl/miracl-corpus", "ja", trust_remote_code=True |
| | )["train"] |
| |
|
| | def format_corpus(line: dict) -> dict: |
| | if not self.config.without_title: |
| | text = line.pop("title") + ": " + line.pop("text") |
| | else: |
| | text = line.pop("text") |
| | line.pop("title") |
| | return {"docid": line["docid"], "text": text} |
| |
|
| | self.corpus: datasets.Dataset = corpus.map(format_corpus) |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": self.corpus}, |
| | ), |
| | ] |
| |
|
| |
|
| | @dataclass |
| | class MLDRRetrievalHelper: |
| | HOMEPAGE_URL = "https://huggingface.co/datasets/Shitao/MLDR" |
| | CITATION = """\ |
| | @misc{bge-m3, |
| | title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings \ |
| | Through Self-Knowledge Distillation}, |
| | author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu}, |
| | year={2024}, |
| | eprint={2402.03216}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL} |
| | } |
| | """ |
| | DESCRIPTION = """\ |
| | MLDR is a Multilingual Long-Document Retrieval dataset built on Wikipeida, Wudao and mC4, |
| | covering 13 typologically diverse languages. Specifically, we sample lengthy articles |
| | from Wikipedia, Wudao and mC4 datasets and randomly choose paragraphs from them. Then we |
| | use GPT-3.5 to generate questions based on these paragraphs. The generated question and |
| | the sampled article constitute a new text pair to the dataset. |
| | """ |
| | VERSION = "1.0.0" |
| | LICENSE = "MIT" |
| |
|
| | def __init__(self, config: MLDRRetrievalConfig) -> None: |
| | self.config = config |
| |
|
| | def query_split_generator(self) -> list[datasets.SplitGenerator]: |
| | queries = datasets.load_dataset("Shitao/MLDR", "ja", trust_remote_code=True) |
| |
|
| | def format_query(query: dict) -> dict: |
| | query.pop("query_id") |
| | positive_passages = query.pop("positive_passages") |
| | query.pop("negative_passages") |
| | return { |
| | "query": query.pop("query"), |
| | "relevant_docs": [doc["docid"] for doc in positive_passages], |
| | } |
| |
|
| | self.queries = queries.map(format_query) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | gen_kwargs={"data": self.queries["train"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | gen_kwargs={"data": self.queries["dev"]}, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | gen_kwargs={"data": self.queries["test"]}, |
| | ), |
| | ] |
| |
|
| | def corpus_generator(self) -> list[datasets.SplitGenerator]: |
| | corpus = datasets.load_dataset( |
| | "Shitao/MLDR", "corpus-ja", split="corpus", trust_remote_code=True |
| | ) |
| | return [ |
| | datasets.SplitGenerator( |
| | name="corpus", |
| | gen_kwargs={"data": corpus}, |
| | ), |
| | ] |
| |
|
| |
|
| | class JAQKETConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "jaqket", |
| | version: datasets.Version | str | None = datasets.Version("1.0.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = JAQKETHelper.DESCRIPTION, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| |
|
| |
|
| | class MrTyDiConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "mrtydi", |
| | version: datasets.Version | str | None = datasets.Version("1.0.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = JAQKETHelper.DESCRIPTION, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| |
|
| |
|
| | class NLPJournalConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "nlp_journal", |
| | version: datasets.Version | str | None = datasets.Version("1.1.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = NLPJournalHelper.DESCRIPTION, |
| | dev_ratio: float = 0.2, |
| | shuffle: bool = True, |
| | seed: int = 42, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| | self.dev_ratio = dev_ratio |
| | self.shuffle = shuffle |
| | self.seed = seed |
| |
|
| |
|
| | class JaGovFaqs22kConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "jagovfaqs_22k", |
| | version: datasets.Version | str | None = datasets.Version("1.0.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = JaGovFaqs22kHelper.DESCRIPTION, |
| | shuffle: bool = True, |
| | seed: int = 42, |
| | train_ratio: float = 0.7, |
| | dev_ratio: float = 0.15, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| | self.shuffle = shuffle |
| | self.seed = seed |
| | self.train_ratio = train_ratio |
| | self.dev_ratio = dev_ratio |
| |
|
| |
|
| | class JaCWIRRetrievalConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "jacwir-retrieval", |
| | version: datasets.Version | str | None = datasets.Version("1.0.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = JaCWIRRetrievalHelper.DESCRIPTION, |
| | shuffle: bool = True, |
| | seed: int = 42, |
| | without_title: bool = False, |
| | dev_ratio: float = 0.2, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| | self.shuffle = shuffle |
| | self.seed = seed |
| | self.without_title = without_title |
| | self.dev_ratio = dev_ratio |
| |
|
| |
|
| | class MIRACLRetrievalConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "miracl-retrieval", |
| | version: datasets.Version | str | None = datasets.Version("1.0.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = MIRACLRetrievalHelper.DESCRIPTION, |
| | shuffle: bool = True, |
| | seed: int = 42, |
| | dev_ratio: float = 0.3, |
| | without_title: bool = False, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| | self.shuffle = shuffle |
| | self.seed = seed |
| | self.dev_ratio = dev_ratio |
| | self.without_title = without_title |
| |
|
| |
|
| | class MLDRRetrievalConfig(datasets.BuilderConfig): |
| | def __init__( |
| | self, |
| | name: str = "mldr-retrieval", |
| | version: datasets.Version | str | None = datasets.Version("1.0.0"), |
| | data_dir: str | None = None, |
| | data_files: datasets.data_files.DataFilesDict | None = None, |
| | description: str | None = MLDRRetrievalHelper.DESCRIPTION, |
| | ): |
| | super().__init__( |
| | name=name, |
| | version=version, |
| | data_dir=data_dir, |
| | data_files=data_files, |
| | description=description, |
| | ) |
| |
|