| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| This is inpsired from the mednil implementation: https://huggingface.co/datasets/bigbio/mednli/blob/main/mednli.py |
| |
| The files comprising this dataset must be on the users local machine in a single directory that is |
| passed to `datasets.load_datset` via the `data_dir` kwarg. This loader script will read the archive |
| files directly (i.e. the user should not uncompress, untar or unzip any of the files). For example, |
| if `data_dir` is `"testdataset"` it should contain the following files: |
| testdataset |
| ├── testdataset.zip |
| """ |
|
|
| import csv |
| import os |
| from dataclasses import dataclass |
| from typing import Dict, List, Tuple |
|
|
| import datasets |
|
|
| _LANGUAGES = ["English"] |
| _PUBMED = False |
| _LOCAL = True |
|
|
|
|
| _DATASETNAME = "testdataset" |
| _DISPLAYNAME = "TESTDATASET" |
|
|
| _DESCRIPTION = """\ |
| Test Dataset |
| """ |
|
|
| _CITATION = "" |
|
|
| _HOMEPAGE = "https://www.synapse.org/" |
|
|
| _LICENSE = "other" |
|
|
| _URLS = {} |
|
|
| _SOURCE_VERSION = "1.0.0" |
| _BIGBIO_VERSION = "1.0.0" |
|
|
|
|
| @dataclass |
| class BigBioConfig(datasets.BuilderConfig): |
| """BuilderConfig for BigBio.""" |
|
|
| name: str = None |
| version: datasets.Version = None |
| description: str = None |
| schema: str = None |
| subset_id: str = None |
|
|
|
|
| class TestDataset(datasets.GeneratorBasedBuilder): |
| """MedNLI""" |
|
|
| SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) |
| BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION) |
|
|
| BUILDER_CONFIGS = [ |
| BigBioConfig( |
| name="default", |
| version=SOURCE_VERSION, |
| description="test dataset source schema", |
| schema="source", |
| subset_id="testdataset", |
| ), |
| BigBioConfig( |
| name="testdataset_te", |
| version=BIGBIO_VERSION, |
| description="test dataset BigBio schema", |
| schema="testdataset_te", |
| subset_id="testdataset", |
| ), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "testdataset_source" |
|
|
| def _info(self) -> datasets.DatasetInfo: |
| print(self.config) |
| features = datasets.Features( |
| { |
| "test": datasets.Value("string") |
| } |
| ) |
|
|
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| homepage=_HOMEPAGE, |
| license=str(_LICENSE), |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]: |
| data_dir = self.config.data_dir |
| if data_dir is None: |
| raise ValueError( |
| "This is a local dataset. Please pass the data_dir kwarg to load_dataset." |
| ) |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, "train.csv"), |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, "test.csv"), |
| "split": "test", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath": os.path.join(data_dir, "dev.csv"), |
| "split": "dev", |
| }, |
| ), |
| ] |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|