File size: 1,801 Bytes
d412622 9052768 af51dab b03eae4 af51dab d412622 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | #
import os
import torch
from datasets import DatasetInfo, GeneratorBasedBuilder, BuilderConfig, Split, SplitGenerator
class LPBFConfig(BuilderConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class LPBFDataset(GeneratorBasedBuilder):
VERSION = "1.0.0"
BUILDER_CONFIGS = [
LPBFConfig(name="default", version=VERSION, description="Laser powder bed fusion additive manufacturing dataset")
]
# No fixed schema because we return a PyG graph object
def _info(self):
return DatasetInfo(
description="Laser powder bed fusion additive manufacturing dataset",
features=None,
)
def _split_generators(self, dl_manager):
"""
After extraction, the dataset structure should look like:
LPBF/train/*.pt
LPBF/test/*.pt
"""
data_dir = dl_manager.download_and_extract(
"https://huggingface.co/datasets/vedantpuri/LPBF_FLARE/resolve/main/LPBF.tar.gz"
)
# if the tar contains a subdir called LPBF/
# then dl_manager extracts to something like: [...]/LPBF/
split_train = os.path.join(data_dir, "LPBF", "train")
split_test = os.path.join(data_dir, "LPBF", "test")
return [
SplitGenerator(name=Split.TRAIN, gen_kwargs={"data_dir": split_train}),
SplitGenerator(name=Split.TEST, gen_kwargs={"data_dir": split_test}),
]
def _generate_examples(self, data_dir):
files = sorted([f for f in os.listdir(data_dir) if f.endswith(".pt")])
for idx, fname in enumerate(files):
path = os.path.join(data_dir, fname)
obj = torch.load(path, map_location="cpu")
# Return a dict so HF can wrap it
yield idx, {"graph": obj}
|