|
|
import datasets |
|
|
import json |
|
|
import logging |
|
|
import os |
|
|
import pandas as pd |
|
|
from typing import List |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
_CITATION = "TBD" |
|
|
|
|
|
_DESCRIPTION = "TBD" |
|
|
|
|
|
_HOMEPAGE = "TBD" |
|
|
|
|
|
_LICENSE = "TBD" |
|
|
|
|
|
_URL = "https://raw.githubusercontent.com/arthuractivemodeling/sandbox-data/main/electricity/data/" |
|
|
|
|
|
_URLS = { |
|
|
"train": _URL + "electricity_train.csv", |
|
|
"val": _URL + "electricity_val.csv" |
|
|
} |
|
|
|
|
|
|
|
|
class Electricity(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
datasets.BuilderConfig(name="default", version=VERSION, description="TBD") |
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
|
|
|
|
|
FEATURES = ["date", "day", "period", "nswprice", "nswdemand", "vicprice", "vicdemand", "transfer"] |
|
|
GROUND_TRUTH = ["price_increase"] |
|
|
|
|
|
|
|
|
FLOAT_INPUTS = ["date", "period", "nswprice", "nswdemand", "vicprice", "vicdemand", "transfer"] |
|
|
INT_INPUTS = ["day"] |
|
|
|
|
|
def _info(self): |
|
|
features = datasets.Features( |
|
|
{ |
|
|
**{f: datasets.Value("float64") for f in self.FLOAT_INPUTS}, |
|
|
**{f: datasets.Value("int64") for f in self.INT_INPUTS}, |
|
|
**{f: datasets.Value("int64") for f in self.GROUND_TRUTH} |
|
|
} |
|
|
) |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": "electricity_train.csv"}), |
|
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": "electricity_val.csv"}), |
|
|
] |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
|
data = pd.read_csv(_URL + filepath, index_col=0) |
|
|
for row_id, row in data.iterrows(): |
|
|
yield row_id, dict(row) |
|
|
|
|
|
|