| {"kapilchauhan--processed_bert_dataset_free_speech": { |
| "description": "This corpus contains preprocessed posts from the Reddit dataset.\nThe dataset consists of 3,848,330 posts with an average length of 270 words for content,\nand 28 words for the summary.\n\nFeatures includes strings: author, body, normalizedBody, content, summary, subreddit, subreddit_id.\nContent is used as document and summary is used as summary.", |
| "citation": "@inproceedings{volske-etal-2017-tl,\n title = {TL;DR: Mining {R}eddit to Learn Automatic Summarization},\n author = {V{\"o}lske, Michael and Potthast, Martin and Syed, Shahbaz and Stein, Benno},\n booktitle = {Proceedings of the Workshop on New Frontiers in Summarization},\n month = {sep},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W17-4508},\n doi = {10.18653/v1/W17-4508},\n pages = {59--63},\n abstract = {Recent advances in automatic text summarization have used deep neural networks to generate high-quality abstractive summaries, but the performance of these models strongly depends on large amounts of suitable training data. We propose a new method for mining social media for author-provided summaries, taking advantage of the common practice of appending a {``}TL;DR{''} to long posts. A case study using a large Reddit crawl yields the Webis-TLDR-17 dataset, complementing existing corpora primarily from the news genre. Our technique is likely applicable to other social media sites and general web crawls.},\n}", |
| "homepage": "https://github.com/webis-de/webis-tldr-17-corpus", |
| "license": "", |
| "features": { |
| "author": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "body": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "normalizedBody": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "subreddit": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "subreddit_id": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "id": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "summary": { |
| "feature": { |
| "dtype": "string", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "input_ids": { |
| "feature": { |
| "dtype": "int32", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "token_type_ids": { |
| "feature": { |
| "dtype": "int8", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "attention_mask": { |
| "feature": { |
| "dtype": "int8", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| }, |
| "special_tokens_mask": { |
| "feature": { |
| "dtype": "int8", |
| "id": null, |
| "_type": "Value" |
| }, |
| "length": -1, |
| "id": null, |
| "_type": "Sequence" |
| } |
| }, |
| "post_processed": null, |
| "supervised_keys": null, |
| "task_templates": null, |
| "builder_name": null, |
| "config_name": null, |
| "version": null, |
| "splits": { |
| "train": { |
| "name": "train", |
| "num_bytes": 1570623293.0, |
| "num_examples": 78093, |
| "dataset_name": "processed_bert_dataset_free_speech" |
| } |
| }, |
| "download_checksums": null, |
| "download_size": 253310712, |
| "post_processing_size": null, |
| "dataset_size": 1570623293.0, |
| "size_in_bytes": 1823934005.0 |
| }} |