| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """A dataset of 10K filings from SEC EDGAR system.""" |
|
|
|
|
| import json |
| import datasets |
|
|
| _DESCRIPTION = """ |
| The dataset contains annual filings (10K) of all publicly traded firms from 1993-2020. The table data is stripped but all text is retained. |
| This dataset allows easy access to the EDGAR-CORPUS dataset based on the paper EDGAR-CORPUS: Billions of Tokens Make The World Go Round (See References in README.md for details). |
| """ |
|
|
| _LICENSE = "apache-2.0" |
|
|
| _VERSION = "1.0.0" |
|
|
| _FEATURES = [ |
| "filename", |
| "cik", |
| "year", |
| "section_1", |
| "section_1A", |
| "section_1B", |
| "section_2", |
| "section_3", |
| "section_4", |
| "section_5", |
| "section_6", |
| "section_7", |
| "section_7A", |
| "section_8", |
| "section_9", |
| "section_9A", |
| "section_9B", |
| "section_10", |
| "section_11", |
| "section_12", |
| "section_13", |
| "section_14", |
| "section_15", |
| ] |
|
|
| _URLS = {"full":"", **{"year_"+str(year):str(year)+"/" for year in range(1993,2021,1)}} |
|
|
| class EdgarCorpus(datasets.GeneratorBasedBuilder): |
|
|
| BUILDER_CONFIGS = [ |
| *[datasets.BuilderConfig(name="full", version=datasets.Version(_VERSION), description="The full dataset from 1993-2020")], |
| *[datasets.BuilderConfig(name="year_"+str(year), version=datasets.Version(_VERSION), description="The dataset containg only the year "+str(year)) for year in range(1993, 2021, 1)] |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "full" |
|
|
| def _info(self): |
| features = datasets.Features({item: datasets.Value("string") for item in _FEATURES}) |
| |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| license=_LICENSE |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| |
| exclude_keys = ['full'] |
| if self.config.name == "full": |
| urls = {key: value for key, value in _URLS.items() if key not in exclude_keys} |
| else: |
| urls = {self.config.name: _URLS[self.config.name]} |
| |
| |
| urls = {k+'_'+item: v+item+'.jsonl' for item in ['train', 'test', 'validate'] for k, v in urls.items()} |
|
|
| |
| data_dir = dl_manager.download_and_extract(urls) |
| |
| |
| filepaths = { |
| "test": {k: v for k, v in data_dir.items() if 'test' in k}, |
| "train": {k: v for k, v in data_dir.items() if 'train' in k}, |
| "validate": {k: v for k, v in data_dir.items() if 'validate' in k}, |
| } |
| |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "filepath": filepaths["train"], |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| |
| gen_kwargs={ |
| "filepath": filepaths["validate"], |
| "split": "validate", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "filepath": filepaths["test"], |
| "split": "test" |
| }, |
| ), |
| ] |
|
|
| |
| def _generate_examples(self, filepath, split): |
| for _, path in filepath.items(): |
| with open(path, encoding="utf-8") as f: |
| for row in f: |
| data = json.loads(row) |
| yield data["filename"], {item: data[item] for item in _FEATURES} |