Convert dataset to Parquet
#2
by
babotrojka
- opened
- README.md +29 -0
- flickr_annotations_30k.csv → TEST/test-00000-of-00009.parquet +2 -2
- flickr30k-images.zip → TEST/test-00001-of-00009.parquet +2 -2
- TEST/test-00002-of-00009.parquet +3 -0
- TEST/test-00003-of-00009.parquet +3 -0
- TEST/test-00004-of-00009.parquet +3 -0
- TEST/test-00005-of-00009.parquet +3 -0
- TEST/test-00006-of-00009.parquet +3 -0
- TEST/test-00007-of-00009.parquet +3 -0
- TEST/test-00008-of-00009.parquet +3 -0
- flickr30k.py +0 -71
README.md
CHANGED
|
@@ -1,3 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# Flickr30k
|
| 2 |
|
| 3 |
Original paper: [From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions](https://aclanthology.org/Q14-1006)
|
|
|
|
| 1 |
+
---
|
| 2 |
+
dataset_info:
|
| 3 |
+
config_name: TEST
|
| 4 |
+
features:
|
| 5 |
+
- name: image
|
| 6 |
+
dtype: image
|
| 7 |
+
- name: caption
|
| 8 |
+
list: string
|
| 9 |
+
- name: sentids
|
| 10 |
+
list: string
|
| 11 |
+
- name: split
|
| 12 |
+
dtype: string
|
| 13 |
+
- name: img_id
|
| 14 |
+
dtype: string
|
| 15 |
+
- name: filename
|
| 16 |
+
dtype: string
|
| 17 |
+
splits:
|
| 18 |
+
- name: test
|
| 19 |
+
num_bytes: 4325959287.86
|
| 20 |
+
num_examples: 31014
|
| 21 |
+
download_size: 4305504040
|
| 22 |
+
dataset_size: 4325959287.86
|
| 23 |
+
configs:
|
| 24 |
+
- config_name: TEST
|
| 25 |
+
data_files:
|
| 26 |
+
- split: test
|
| 27 |
+
path: TEST/test-*
|
| 28 |
+
default: true
|
| 29 |
+
---
|
| 30 |
# Flickr30k
|
| 31 |
|
| 32 |
Original paper: [From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions](https://aclanthology.org/Q14-1006)
|
flickr_annotations_30k.csv → TEST/test-00000-of-00009.parquet
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:684e8f49c9282a6b79218d4ace88f53e1917e967ff49d8dbbafd8463766e72ed
|
| 3 |
+
size 459231637
|
flickr30k-images.zip → TEST/test-00001-of-00009.parquet
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac73948ad3d95b16c5a884e80b23ef10237f6d47b48a6e839eab5048f6ef683c
|
| 3 |
+
size 463201812
|
TEST/test-00002-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36a2fca272995bed6a3ffc83326b8a969433f376fb59f020c6239e8c90f53f95
|
| 3 |
+
size 473895143
|
TEST/test-00003-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:86076b8f41c24b0962c7052bdd793d911314c217deef0426f66f901a3aaa512c
|
| 3 |
+
size 460539773
|
TEST/test-00004-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0310c6d4d8a884f4abe8d9f4c7352138c60c1dc5b580ec8be2500261f77a60bc
|
| 3 |
+
size 478918140
|
TEST/test-00005-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a49913ff5a8a0946178b5d839b574a5f55b8051e3c90e54db755088bed375d3
|
| 3 |
+
size 488993869
|
TEST/test-00006-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cff920b6501b74bb0c68dc893061f077b271bd89449856fbfc4de860eb1dd9cd
|
| 3 |
+
size 517475509
|
TEST/test-00007-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e2eb9a8ef09da318975729402fcae6a6e5446dd6deaec02e071ae0818a30603
|
| 3 |
+
size 497211494
|
TEST/test-00008-of-00009.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a063af15401cbdccb8d726a1a6e3c4f10aa8d3fa370f5afd23b4a32778ff7ca7
|
| 3 |
+
size 466036663
|
flickr30k.py
DELETED
|
@@ -1,71 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2022 the HuggingFace Datasets Authors.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
|
| 16 |
-
import os
|
| 17 |
-
import pandas as pd
|
| 18 |
-
import datasets
|
| 19 |
-
import json
|
| 20 |
-
from huggingface_hub import hf_hub_url
|
| 21 |
-
|
| 22 |
-
_INPUT_CSV = "flickr_annotations_30k.csv"
|
| 23 |
-
_INPUT_IMAGES = "flickr30k-images"
|
| 24 |
-
_REPO_ID = "nlphuji/flickr30k"
|
| 25 |
-
_JSON_KEYS = ['raw', 'sentids']
|
| 26 |
-
|
| 27 |
-
class Dataset(datasets.GeneratorBasedBuilder):
|
| 28 |
-
VERSION = datasets.Version("1.1.0")
|
| 29 |
-
BUILDER_CONFIGS = [
|
| 30 |
-
datasets.BuilderConfig(name="TEST", version=VERSION, description="test"),
|
| 31 |
-
]
|
| 32 |
-
|
| 33 |
-
def _info(self):
|
| 34 |
-
return datasets.DatasetInfo(
|
| 35 |
-
features=datasets.Features(
|
| 36 |
-
{
|
| 37 |
-
"image": datasets.Image(),
|
| 38 |
-
"caption": [datasets.Value('string')],
|
| 39 |
-
"sentids": [datasets.Value("string")],
|
| 40 |
-
"split": datasets.Value("string"),
|
| 41 |
-
"img_id": datasets.Value("string"),
|
| 42 |
-
"filename": datasets.Value("string"),
|
| 43 |
-
}
|
| 44 |
-
),
|
| 45 |
-
task_templates=[],
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
def _split_generators(self, dl_manager):
|
| 49 |
-
"""Returns SplitGenerators."""
|
| 50 |
-
|
| 51 |
-
repo_id = _REPO_ID
|
| 52 |
-
data_dir = dl_manager.download_and_extract({
|
| 53 |
-
"examples_csv": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=_INPUT_CSV),
|
| 54 |
-
"images_dir": hf_hub_url(repo_id=repo_id, repo_type='dataset', filename=f"{_INPUT_IMAGES}.zip")
|
| 55 |
-
})
|
| 56 |
-
|
| 57 |
-
return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)]
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
def _generate_examples(self, examples_csv, images_dir):
|
| 61 |
-
"""Yields examples."""
|
| 62 |
-
df = pd.read_csv(examples_csv)
|
| 63 |
-
for c in _JSON_KEYS:
|
| 64 |
-
df[c] = df[c].apply(json.loads)
|
| 65 |
-
|
| 66 |
-
for r_idx, r in df.iterrows():
|
| 67 |
-
r_dict = r.to_dict()
|
| 68 |
-
image_path = os.path.join(images_dir, _INPUT_IMAGES, r_dict['filename'])
|
| 69 |
-
r_dict['image'] = image_path
|
| 70 |
-
r_dict['caption'] = r_dict.pop('raw')
|
| 71 |
-
yield r_idx, r_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|