Upload AraSum.py with huggingface_hub
Browse files
AraSum.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import datasets
|
| 4 |
+
from glob import glob
|
| 5 |
+
import zipfile
|
| 6 |
+
|
| 7 |
+
class AraSum(datasets.GeneratorBasedBuilder):
|
| 8 |
+
def _info(self):
|
| 9 |
+
return datasets.DatasetInfo(features=datasets.Features({'index':datasets.Value('string'),'summary':datasets.Value('string'),'article':datasets.Value('string')}))
|
| 10 |
+
|
| 11 |
+
def extract_all(self, dir):
|
| 12 |
+
zip_files = glob(dir+'/**/**.zip', recursive=True)
|
| 13 |
+
for file in zip_files:
|
| 14 |
+
with zipfile.ZipFile(file) as item:
|
| 15 |
+
item.extractall('/'.join(file.split('/')[:-1]))
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_all_files(self, dir):
|
| 19 |
+
files = []
|
| 20 |
+
valid_file_ext = ['txt', 'csv', 'tsv', 'xlsx', 'xls', 'xml', 'json', 'jsonl', 'html', 'wav', 'mp3', 'jpg', 'png']
|
| 21 |
+
for ext in valid_file_ext:
|
| 22 |
+
files += glob(f"{dir}/**/**.{ext}", recursive = True)
|
| 23 |
+
return files
|
| 24 |
+
|
| 25 |
+
def _split_generators(self, dl_manager):
|
| 26 |
+
url = ['https://raw.githubusercontent.com/ppke-nlpg/AraSum/main/AbsArSumCorpus_csv_v1.zip']
|
| 27 |
+
downloaded_files = dl_manager.download_and_extract(url)
|
| 28 |
+
files = self.get_all_files(downloaded_files[0])
|
| 29 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':files} })]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _generate_examples(self, filepaths):
|
| 33 |
+
_id = 0
|
| 34 |
+
for i,filepath in enumerate(filepaths['inputs']):
|
| 35 |
+
df = pd.read_csv(filepath, sep = r'\t', skiprows = 0, error_bad_lines = False, header = 0, engine = 'python')
|
| 36 |
+
if len(df.columns) != 3:
|
| 37 |
+
continue
|
| 38 |
+
df.columns = ['index', 'summary', 'article']
|
| 39 |
+
for _, record in df.iterrows():
|
| 40 |
+
yield str(_id), {'index':record['index'],'summary':record['summary'],'article':record['article']}
|
| 41 |
+
_id += 1
|
| 42 |
+
|