DsL commited on
Commit
6b911a1
·
1 Parent(s): 73e7854
Files changed (1) hide show
  1. Causal3D_Dataset.py +203 -0
Causal3D_Dataset.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import pandas as pd
3
+ import os
4
+ from pathlib import Path
5
+ from tqdm import tqdm
6
+
7
+ _CITATION = """\
8
+ @article{liu2025causal3d,
9
+ title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
10
+ author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
11
+ journal={arXiv preprint arXiv:2503.04852},
12
+ year={2025}
13
+ }
14
+ """
15
+
16
+ _DESCRIPTION = """\
17
+ Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
18
+ It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
19
+ """
20
+
21
+ _HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
22
+ _LICENSE = "CC-BY-4.0"
23
+
24
+ class Causal3dDataset(datasets.GeneratorBasedBuilder):
25
+ DEFAULT_CONFIG_NAME = "Real_Water_flow"
26
+ BUILDER_CONFIGS = [
27
+ # hypothetical_scenes
28
+ datasets.BuilderConfig(name="Hypothetical_V2_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V2_linear scene"),
29
+ datasets.BuilderConfig(name="Hypothetical_V2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V2_nonlinear scene"),
30
+ datasets.BuilderConfig(name="Hypothetical_V3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V3_fully_connected_linear scene"),
31
+ datasets.BuilderConfig(name="Hypothetical_V3_v_structure_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V3_v_structure_linear scene"),
32
+ datasets.BuilderConfig(name="Hypothetical_V3_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V3_v_structure_nonlinear scene"),
33
+ datasets.BuilderConfig(name="Hypothetical_V4_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V4_linear scene"),
34
+ datasets.BuilderConfig(name="Hypothetical_V4_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V4_v_structure_nonlinear scene"),
35
+ datasets.BuilderConfig(name="Hypothetical_V4_v_structure_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V4_v_structure_linear scene"),
36
+ datasets.BuilderConfig(name="Hypothetical_V5_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V5_linear scene"),
37
+ datasets.BuilderConfig(name="Hypothetical_V5_v_structure_linear", version=datasets.Version("1.0.0"), description="Hypothetical_V5_v_structure_linear scene"),
38
+ datasets.BuilderConfig(name="Hypothetical_V5_v_structure_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetical_V5_v_structure_nonlinear scene"),
39
+
40
+ # real_scenes
41
+ datasets.BuilderConfig(name="Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
42
+ datasets.BuilderConfig(name="Real_Magnet", version=datasets.Version("1.0.0"), description="Real_Magnet scene"),
43
+ datasets.BuilderConfig(name="Real_Spring", version=datasets.Version("1.0.0"), description="Real_Spring scene"),
44
+ datasets.BuilderConfig(name="Real_Water_flow", version=datasets.Version("1.0.0"), description="Real_Water_flow scene"),
45
+ datasets.BuilderConfig(name="Real_Seesaw", version=datasets.Version("1.0.0"), description="Real_Seesaw scene"),
46
+ datasets.BuilderConfig(name="Real_Reflection", version=datasets.Version("1.0.0"), description="Real_Reflection scene"),
47
+ datasets.BuilderConfig(name="Real_Pendulum", version=datasets.Version("1.0.0"), description="Real_Pendulum scene"),
48
+ datasets.BuilderConfig(name="Real_Convex_len", version=datasets.Version("1.0.0"), description="Real_Convex_len scene"),
49
+ ]
50
+
51
+ def _info(self):
52
+ return datasets.DatasetInfo(
53
+ description=_DESCRIPTION,
54
+ features=datasets.Features({
55
+ "image": datasets.Image(),
56
+ "file_name": datasets.Value("string"),
57
+ "metadata": datasets.Value("string"), # optionally replace with structured fields
58
+ }),
59
+ homepage=_HOMEPAGE,
60
+ license=_LICENSE,
61
+ citation=_CITATION,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ print(">>>>>>>>>>>>>>>>>>>>>>> Starting to load dataset <<<<<<<<<<<<<<<<<<<<<<<")
66
+ parts = self.config.name.split("_", 1) # 🚩 Real_Parabola -> ["Real", "Parabola"]
67
+ category = parts[0]
68
+ scene = parts[1]
69
+
70
+ local_scene_dir = os.path.join(category, scene)
71
+
72
+ if os.path.exists(local_scene_dir):
73
+ data_dir = local_scene_dir
74
+ print(f"Using local folder: {data_dir}")
75
+ else:
76
+ zip_name = f"{self.config.name}.zip"
77
+ archive_path = dl_manager.download_and_extract(zip_name)
78
+ data_dir = archive_path
79
+ print(f"Downloaded and extracted: {zip_name}")
80
+
81
+ return [
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TRAIN,
84
+ gen_kwargs={"data_dir": data_dir},
85
+ )
86
+ ]
87
+
88
+ def _generate_examples(self, data_dir):
89
+ print(f"Generating examples from: {data_dir}")
90
+ image_files = {}
91
+ for ext in ("*.png", "*.jpg", "*.jpeg"):
92
+ for img_path in Path(data_dir).rglob(ext):
93
+ relative = str(img_path.relative_to(data_dir))
94
+ image_files[relative] = str(img_path)
95
+
96
+ csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
97
+ df = pd.read_csv(csv_files[0]) if csv_files else None
98
+
99
+ if df is not None and "imgs" in df.columns:
100
+ images = df["imgs"].tolist()
101
+ else:
102
+ images = []
103
+
104
+ for idx, row in tqdm(df.iterrows(), total=len(df)) if df is not None else enumerate(image_files):
105
+ if df is not None:
106
+ fname = row["imgs"] if "imgs" in row else str(idx)
107
+ image_name = images[idx].split("/")[-1].split(".")[0] if images else ""
108
+ record_img_path = next((key for key in image_files if image_name in key), None)
109
+ yield idx, {
110
+ "image": image_files[record_img_path] if record_img_path else None,
111
+ "file_name": fname,
112
+ "metadata": row.to_json(),
113
+ }
114
+ else:
115
+ fname = Path(image_files[idx]).stem
116
+ yield idx, {
117
+ "image": image_files[idx],
118
+ "file_name": fname,
119
+ "metadata": None,
120
+ }
121
+
122
+
123
+ # def _generate_examples(self, data_dir):
124
+ # def color(text, code):
125
+ # return f"\033[{code}m{text}\033[0m"
126
+ # print("load data from {}".format(data_dir))
127
+ # try:
128
+ # image_files = {}
129
+ # for ext in ("*.png", "*.jpg", "*.jpeg"):
130
+ # for img_path in Path(data_dir).rglob(ext):
131
+ # relative_path = str(img_path.relative_to(data_dir))
132
+ # image_files[relative_path] = str(img_path)
133
+ # parts = [i.split('/')[0] for i in list(image_files.keys())]
134
+ # parts = set(parts)
135
+ # if "part_000" not in parts:
136
+ # parts= ['']
137
+
138
+
139
+ # except Exception as e:
140
+ # print(color(f"Error loading images: {e}", "31")) # Red
141
+ # return
142
+
143
+ # # Find the .csv file
144
+ # csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
145
+ # if not csv_files:
146
+ # # print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
147
+ # pass
148
+ # # print(f"\033[33m[INFO] Found CSV: {csv_files}\033[0m")
149
+ # csv_path = csv_files[0] if csv_files else None
150
+ # df = pd.read_csv(csv_path) if csv_path else None
151
+ # image_col_exists = True
152
+ # if df is not None and "imgs" not in df.columns:
153
+ # image_col_exists = False
154
+
155
+ # images = df["imgs"].tolist() if image_col_exists and df is not None else []
156
+ # images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
157
+
158
+ # try:
159
+ # # Match CSV rows with image paths
160
+ # if df is None:
161
+ # for i, j in tqdm(image_files.items(), desc="Processing images", unit="image"):
162
+ # yield i, {
163
+ # "image": j,
164
+ # "file_name": i,
165
+ # "metadata": None,
166
+ # }
167
+
168
+ # else:
169
+ # for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
170
+ # fname = row["imgs"]
171
+ # raw_record_img_path = row["imgs"] #images[idx] if images else "" #row["image"]
172
+ # record_img_name = raw_record_img_path.split('/')[-1]
173
+ # render_img_path = record_img_name
174
+
175
+
176
+ # # for part in parts:
177
+ # # if part == '':
178
+ # # record_img_path = record_img_name
179
+ # # else:
180
+ # # record_img_path = "/".join([part, record_img_name.strip()])
181
+ # # if "Water_flow_scene_render" in data_dir:
182
+ # # record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
183
+ # # if record_img_path in image_files:
184
+ # # # print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
185
+ # # yield idx, {
186
+ # # "image": image_files[record_img_path],
187
+ # # "file_name": fname,
188
+ # # "metadata": row.to_json(),
189
+ # # }
190
+ # # break
191
+
192
+ # # else:
193
+ # # yield idx, {
194
+ # # # "image": "",
195
+ # # "file_name": fname,
196
+ # # "metadata": row.to_json(),
197
+ # # }
198
+ # # break
199
+
200
+
201
+ # except Exception as e:
202
+ # print(color(f"Error processing CSV rows: {e}", "31"))
203
+