Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Sub-tasks:
hate-speech-detection
Languages:
English
Size:
10K - 100K
License:
Commit
·
0a7913c
1
Parent(s):
eb5f680
Upload Dataset
Browse files- .gitattributes +3 -0
- fstdt_quotes.py +48 -0
- test.json +3 -0
- train.json +3 -0
- validation.json +3 -0
.gitattributes
CHANGED
|
@@ -52,3 +52,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
test.json filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
train.json filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
validation.json filter=lfs diff=lfs merge=lfs -text
|
fstdt_quotes.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FSTDT Quotes dataset loading script
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
import datasets
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class FSTDTQuotes(datasets.GeneratorBasedBuilder):
|
| 10 |
+
def _info(self):
|
| 11 |
+
return datasets.DatasetInfo(
|
| 12 |
+
description='Quotes submitted to FSTDT',
|
| 13 |
+
features=datasets.Features({
|
| 14 |
+
'id': datasets.Value('string'),
|
| 15 |
+
'submitter': datasets.Value('string'),
|
| 16 |
+
'timestamp': datasets.Value('string')
|
| 17 |
+
'name': datasets.Value('string'),
|
| 18 |
+
'src_url': datasets.Value('string'),
|
| 19 |
+
'tags': datasets.Sequence(datasets.Value('string')),
|
| 20 |
+
'quote': datasets.Value('string')
|
| 21 |
+
})
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
def _split_generators(self, dl_manager):
|
| 25 |
+
data_files = dl_manager.download_and_extract({
|
| 26 |
+
'train': 'train.json',
|
| 27 |
+
'validation': 'validation.json',
|
| 28 |
+
'test': 'test.json'
|
| 29 |
+
})
|
| 30 |
+
return [
|
| 31 |
+
datasets.SplitGenerator(
|
| 32 |
+
name=datasets.Split.TRAIN,
|
| 33 |
+
gen_kwargs={'filepath': data_files['train']}
|
| 34 |
+
),
|
| 35 |
+
datasets.SplitGenerator(
|
| 36 |
+
name=datasets.Split.VALIDATION,
|
| 37 |
+
gen_kwargs={'filepath': data_files['validation']}
|
| 38 |
+
),
|
| 39 |
+
datasets.SplitGenerator(
|
| 40 |
+
name=datasets.Split.TEST,
|
| 41 |
+
gen_kwargs={'filepath': data_files['test']}
|
| 42 |
+
)
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
def _generate_examples(self, filepath):
|
| 46 |
+
with open(filepath) as f:
|
| 47 |
+
for key, row in enumerate(f):
|
| 48 |
+
yield key, json.loads(row)
|
test.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49483643a175ce020279d9ef266dd8ac4555229c17ffdd75aadf87d16d5b0057
|
| 3 |
+
size 11323720
|
train.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b27696b691dab5397b9f1f498d7db54eddf46e5eb3a62b20a50adcb480942041
|
| 3 |
+
size 86980160
|
validation.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:73bd3d38a7357fb917ac61efda342bb56a6df4c4d00b06fbf86fd5e35c2362ba
|
| 3 |
+
size 10978950
|