File size: 8,521 Bytes
6b911a1 c1c75bd 3eff37c 6b911a1 72d93d5 b924c82 72d93d5 ad47715 6b911a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import datasets
import pandas as pd
import os
from pathlib import Path
from tqdm import tqdm
_CITATION = """\
@article{liu2025causal3d,
title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
journal={arXiv preprint arXiv:2503.04852},
year={2025}
}
"""
_DESCRIPTION = """\
Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
"""
_HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
_LICENSE = "CC-BY-4.0"
class Causal3dDataset(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "Real_Water_flow"
BUILDER_CONFIGS = [
# hypothetical_scenes
datasets.BuilderConfig(name="Hypothetical_V2_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V2_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V2_nonlinear scene"),
datasets.BuilderConfig(name="Hypothetical_V3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V3_fully_connected_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V3_v_structure_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V3_v_structure_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V3_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V3_v_structure_nonlinear scene"),
datasets.BuilderConfig(name="Hypothetical_V4_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V4_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V4_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V4_v_structure_nonlinear scene"),
datasets.BuilderConfig(name="Hypothetical_V4_v_structure_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V4_v_structure_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V5_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V5_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V5_v_structure_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V5_v_structure_linear scene"),
datasets.BuilderConfig(name="Hypothetical_V5_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V5_v_structure_nonlinear scene"),
# real_scenes
datasets.BuilderConfig(name="Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
datasets.BuilderConfig(name="Real_Magnet", version=datasets.Version("1.0.0"), description="Real_Magnet scene"),
datasets.BuilderConfig(name="Real_Spring", version=datasets.Version("1.0.0"), description="Real_Spring scene"),
datasets.BuilderConfig(name="Real_Water_flow", version=datasets.Version("1.0.0"), description="Real_Water_flow scene"),
datasets.BuilderConfig(name="Real_Seesaw", version=datasets.Version("1.0.0"), description="Real_Seesaw scene"),
datasets.BuilderConfig(name="Real_Reflection", version=datasets.Version("1.0.0"), description="Real_Reflection scene"),
datasets.BuilderConfig(name="Real_Pendulum", version=datasets.Version("1.0.0"), description="Real_Pendulum scene"),
datasets.BuilderConfig(name="Real_Convex_len", version=datasets.Version("1.0.0"), description="Real_Convex_len scene"),
# multi_view_scenes
datasets.BuilderConfig(name="MV_Pendulum", version=datasets.Version("1.0.0"), description="Multi_View_Real_Pendulum scene"),
datasets.BuilderConfig(name="MV_H3_v_structure_linear", version=datasets.Version("1.0.0"), description="MV_H3_v_structure_linear scene"),
datasets.BuilderConfig(name="MV_H2_linear", version=datasets.Version("1.0.0"), description="MV_H2_linear scene"),
datasets.BuilderConfig(name="MV_H2_nonlinear", version=datasets.Version("1.0.0"), description="MV_H2_nonlinear scene"),
datasets.BuilderConfig(name="MV_H4_fully_connected_linear", version=datasets.Version("1.0.0"), description="MV_H4_fully_connected_linear scene"),
datasets.BuilderConfig(name="MV_H4_v_structure_linear", version=datasets.Version("1.0.0"), description="MV_H4_v_structure_linear scene"),
datasets.BuilderConfig(name="MV_H4_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="MV_H4_v_structure_nonlinear scene"),
datasets.BuilderConfig(name="MV_H5_fully_connected_linear", version=datasets.Version("1.0.0"), description="MV_H5_fully_connected_linear scene"),
datasets.BuilderConfig(name="MV_H5_v_structure_linear", version=datasets.Version("1.0.0"), description="MV_H5_v_structure_linear scene"),
datasets.BuilderConfig(name="MV_H5_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="MV_H5_v_structure_nonlinear scene"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"file_name": datasets.Value("string"),
"metadata": datasets.Value("string"), # optionally replace with structured fields
}),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
print(">>>>>>>>>>>>>>>>>>>>>>> Starting to load dataset <<<<<<<<<<<<<<<<<<<<<<<")
parts = self.config.name.split("_", 1) # 🚩 Real_Parabola -> ["Real", "Parabola"]
category = parts[0]
scene = parts[1]
local_scene_dir = os.path.join(category, scene)
if os.path.exists(local_scene_dir):
data_dir = local_scene_dir
print(f"Using local folder: {data_dir}")
else:
zip_name = f"{self.config.name}.zip"
archive_path = dl_manager.download_and_extract(zip_name)
data_dir = archive_path
print(f"Downloaded and extracted: {zip_name}")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir},
)
]
def _generate_examples(self, data_dir):
image_files = {}
for ext in ("*.png", "*.jpg", "*.jpeg"):
for img_path in Path(data_dir).rglob(ext):
relative = str(img_path.relative_to(data_dir))
image_files[relative] = str(img_path)
csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
df = pd.read_csv(csv_files[0]) if csv_files else None
if df is not None and "imgs" in df.columns:
images = df["imgs"].tolist()
else:
images = []
for idx, row in tqdm(df.iterrows(), total=len(df)) if df is not None else enumerate(image_files):
if df is not None:
fname = row["imgs"] if "imgs" in row else str(idx)
# catch error if happen
try:
image_name = images[idx].split("/")[-1].split(".")[0] if images else ""
record_img_path = next((key for key in image_files if image_name in key), None)
except Exception as e:
print(f"Error: {e} in row {idx}, using index as file name")
print(images[idx])
record_img_path = None
break
# raise error if the path does not exist
# check the path existance
if not os.path.exists(image_files[record_img_path]) if record_img_path else None:
raise FileNotFoundError(f"Image file not found: {image_files[record_img_path]}")
yield idx, {
"image": image_files[record_img_path] if record_img_path else None,
"file_name": fname,
"metadata": row.to_json(),
}
else:
fname = Path(image_files[idx]).stem
yield idx, {
"image": image_files[idx],
"file_name": fname,
"metadata": None,
}
|