| | import json |
| | import os |
| | from tqdm import tqdm |
| |
|
| | from datasets import Dataset, load_dataset, Image |
| | import pandas as pd |
| |
|
| |
|
| | def load_jsonl(file_path): |
| | """ |
| | Loads a JSONL file and returns a list of Python dictionaries. |
| | Each dictionary represents a JSON object from a line in the file. |
| | """ |
| | data = [] |
| | with open(file_path, 'r', encoding='utf-8') as f: |
| | for line in f: |
| | try: |
| | |
| | json_object = json.loads(line.strip()) |
| | data.append(json_object) |
| | except json.JSONDecodeError as e: |
| | print(f"Error decoding JSON on line: {line.strip()} - {e}") |
| | return data |
| |
|
| |
|
| | def main(): |
| | dsets = ["train", "val", "test"] |
| | workdir = "./flickr30k" |
| |
|
| | |
| | annot_fn = os.path.join(workdir, "results.csv") |
| | df = pd.read_csv(annot_fn, delimiter="|") |
| | df = pd.DataFrame(df) |
| |
|
| | datadict = {} |
| | for _, row in df.iterrows(): |
| | idx = row["image_name"].replace(".jpg", "") |
| | if idx not in datadict: |
| | datadict[idx] = { |
| | "image_name": row["image_name"], |
| | "image": os.path.join(workdir, "flickr30k_images", row["image_name"]), |
| | "sentids": [], |
| | "split": None, |
| | "caption": [], |
| | "narratives": [] |
| | } |
| |
|
| | datadict[idx]["sentids"].append(row[" comment_number"]) |
| | datadict[idx]["caption"].append(row[" comment"]) |
| |
|
| | |
| | for split in dsets: |
| | narr = load_jsonl(os.path.join(workdir, "narratives", f"flickr30k_{split}_captions.jsonl")) |
| | for item in narr: |
| | idx = item["image_id"] |
| | datadict[idx]["split"] = split |
| | datadict[idx]["narratives"].append(item["caption"]) |
| |
|
| | |
| | for split in dsets: |
| | df = pd.DataFrame.from_dict(datadict, orient="index") |
| | df = df[df["split"] == split] |
| | ds = Dataset.from_pandas(df) |
| | ds = ds.remove_columns(["__index_level_0__", "split"]) |
| | ds = ds.cast_column("image", Image()) |
| | ds.save_to_disk(os.path.join(workdir, "datasets", "data", split), max_shard_size="400MB") |
| |
|
| | return |
| |
|
| |
|
| | def test_dataset(): |
| | ds = load_dataset("./flickr30k/datasets") |
| | print(ds["train"][0]) |
| | |
| |
|
| | if __name__ == "__main__": |
| | |
| | test_dataset() |
| |
|