frozen_lake / frozen_lake.py
NathanGavenski's picture
Add back the argument and passing the data to _generate_examples
62e87dc verified
raw
history blame
2.22 kB
import datasets
import json
import tarfile
from os import listdir, makedirs
from os.path import isfile, join
from datasets import DatasetInfo, GeneratorBasedBuilder, Split, SplitGenerator
_CITATION = ""
_DESCRIPTION = "Expert Dataset for the Frozen Lake Custom Environment. See paper for more details."
_HOMEPAGE = "https://huggingface.co/datasets/Ptisni/frozen_lake"
_LICENSE = ""
_REPO = "https://huggingface.co/datasets/Ptisni/frozen_lake"
class ImageSet(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"obs": datasets.Value("string"),
"actions": datasets.Value("int32"),
"rewards": datasets.Value("float32"),
"episode_starts": datasets.Value("bool"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
info_path = dl_manager.download_and_extract(f"{_REPO}/resolve/main/fl_dataset.tar.gz")
image_path = dl_manager.download_and_extract(f"{_REPO}/resolve/main/images.tar.gz")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"images_paths": f"{image_path}/images",
"infos": f"{info_path}/dataset/FrozenLakeExpert/teacher.jsonl"
}
)
]
def _generate_examples(self, images_paths, infos):
images = [join(images_paths, f) for f in listdir(images_paths) if isfile(join(images_paths, f))]
images_dict = {}
for image in images:
images_dict[image.split("/")[-1].split(".")[0]] = image
with open(infos, encoding="utf-8") as data:
for idx, line in enumerate(data):
record = json.loads(line)
index = record["obs"].split(".")[0]
yield idx, {
"obs": images_dict[index],
"actions": record["actions"],
"rewards": record["rewards"],
"episode_starts": record["episode_starts"],
}