--- license: apache-2.0 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* dataset_info: source_dataset: jigsaw-toxic-comment-classification-challenge processed_by: Koushik (https://huggingface.co/datasets/Koushim) tokenizer: bert-base-uncased label_format: float multi-label binary vector label_columns: - toxicity - severe_toxicity - obscene - threat - insult - identity_attack - sexual_explicit features: - name: text dtype: string - name: toxicity dtype: float32 - name: severe_toxicity dtype: float32 - name: obscene dtype: float32 - name: threat dtype: float32 - name: insult dtype: float32 - name: identity_attack dtype: float32 - name: sexual_explicit dtype: float32 - name: labels sequence: float64 - name: input_ids sequence: int32 - name: token_type_ids sequence: int8 - name: attention_mask sequence: int8 splits: - name: train num_bytes: 2110899324 num_examples: 1804874 - name: validation num_bytes: 113965680 num_examples: 97320 - name: test num_bytes: 113712324 num_examples: 97320 download_size: 693905946 dataset_size: 2338577328 annotations_creators: - crowdsourced language_creators: - found language: - en multilinguality: - monolingual pretty_name: Processed Jigsaw Toxic Comment Classification tags: - text classification - toxicity - multi-label classification - NLP - BERT - hate speech size_categories: - 1M