parquet-converter commited on
Commit
b16d43b
·
1 Parent(s): 116aa8a

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,29 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- train_data.csv filter=lfs diff=lfs merge=lfs -text
29
- validation_data.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
train_data.csv → Summarization Part Data/summarization_optimization-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63aa4cabc7c14d9c42586f06b12b095beb3469ab2bd7154abd97d3f6bf09d9e2
3
- size 178145961
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470db01da13bab3c917bca061f057a2c1b34aa857641f81b80294b8df18c1887
3
+ size 105965117
validation_data.csv → Summarization Part Data/summarization_optimization-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8f3ebe76e1f6f25406a0c951bede30fe1ce738ad83a239b2a08256cba54a964
3
- size 44623623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7dadde2c99d2f5b82f8b3345392e9988317602ae415b3d8c3b6b1f6989f51f4
3
+ size 26535081
summarization_optimization.py DELETED
@@ -1,73 +0,0 @@
1
- import json
2
- import pandas as pd
3
- import datasets
4
- import csv
5
- from datasets.tasks import Summarization
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
-
9
-
10
- _DESCRIPTION = """\
11
- Aihub Document summarization data
12
- """
13
- _URL = "https://huggingface.co/datasets/metamong1/summarization_optimization/resolve/main/"
14
- _URLS = {
15
- "train_data": _URL + "train_data.csv",
16
- "validation_data": _URL + "validation_data.csv",
17
- }
18
-
19
- class SummarizationOptimization(datasets.GeneratorBasedBuilder):
20
-
21
- BUILDER_CONFIGS = [
22
- datasets.BuilderConfig(
23
- name="Summarization Part Data",
24
- version=datasets.Version("1.0.0", ""),
25
- description="Text Summarization & Generation Title for optimization",
26
- ),
27
- ]
28
-
29
- def _info(self):
30
- return datasets.DatasetInfo(
31
- description=_DESCRIPTION,
32
- features=datasets.Features(
33
- {
34
- "doc_id": datasets.Value("string"),
35
- "title": datasets.Value("string"),
36
- "text": datasets.Value("string"),
37
- "doc_type": datasets.Value("string"),
38
- "file": datasets.Value("string"),
39
- }
40
- ),
41
- # No default supervised_keys (as we have to pass both question
42
- # and context as input).
43
- supervised_keys=None,
44
- homepage="https://huggingface.co/datasets/metamong1/summarization_optimization",
45
- )
46
-
47
- def _split_generators(self, dl_manager):
48
- downloaded_files = dl_manager.download_and_extract(_URLS)
49
-
50
- return [
51
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
52
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
53
- ]
54
-
55
- def _generate_examples(self, filepath):
56
- """This function returns the examples in the raw (text) form."""
57
- logger.info("generating examples from = %s", filepath)
58
- with open(filepath, newline='', encoding="utf-8") as csvfile:
59
- reader = csv.reader(csvfile, delimiter=",")
60
- feature_name = next(reader)
61
-
62
- idx = 0
63
- for row in reader:
64
- features = {
65
- "doc_id" : row[1],
66
- "title" : row[2],
67
- "text" : row[3],
68
- "doc_type" : row[4],
69
- "file" : row[5],
70
- }
71
-
72
- yield idx, features
73
- idx += 1