Datasets:
File size: 2,396 Bytes
6a4cefb | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | import json
import os
from tqdm import tqdm
from datasets import Dataset, load_dataset, Image
import pandas as pd
def load_jsonl(file_path):
"""
Loads a JSONL file and returns a list of Python dictionaries.
Each dictionary represents a JSON object from a line in the file.
"""
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
try:
# Parse each line as a JSON object
json_object = json.loads(line.strip())
data.append(json_object)
except json.JSONDecodeError as e:
print(f"Error decoding JSON on line: {line.strip()} - {e}")
return data
def main():
dsets = ["train", "val", "test"] # test val
workdir = "./flickr30k"
# Load annotations
annot_fn = os.path.join(workdir, "results.csv")
df = pd.read_csv(annot_fn, delimiter="|")
df = pd.DataFrame(df)
datadict = {}
for _, row in df.iterrows():
idx = row["image_name"].replace(".jpg", "")
if idx not in datadict:
datadict[idx] = {
"image_name": row["image_name"],
"image": os.path.join(workdir, "flickr30k_images", row["image_name"]),
"sentids": [],
"split": None,
"caption": [],
"narratives": []
}
datadict[idx]["sentids"].append(row[" comment_number"])
datadict[idx]["caption"].append(row[" comment"])
# Align to narratives splits.
for split in dsets:
narr = load_jsonl(os.path.join(workdir, "narratives", f"flickr30k_{split}_captions.jsonl"))
for item in narr:
idx = item["image_id"]
datadict[idx]["split"] = split
datadict[idx]["narratives"].append(item["caption"])
# make datasets
for split in dsets:
df = pd.DataFrame.from_dict(datadict, orient="index")
df = df[df["split"] == split]
ds = Dataset.from_pandas(df)
ds = ds.remove_columns(["__index_level_0__", "split"])
ds = ds.cast_column("image", Image())
ds.save_to_disk(os.path.join(workdir, "datasets", "data", split), max_shard_size="400MB")
return
def test_dataset():
ds = load_dataset("./flickr30k/datasets") # , split="val"
print(ds["train"][0])
if __name__ == "__main__":
# main()
test_dataset()
|