geolife / geolife.py
Julia Moska
changed trajectories_ids
bf759a3
import datasets
import pyarrow as pa
import pyarrow.parquet as pq
_URLS = {"geolife": "https://huggingface.co/datasets/kraina/geolife/resolve/main/data/geolife.parquet"}
DESCRIPTION = "This GPS trajectory dataset was collected in (Microsoft Research Asia) Geolife project by 182 users in a period of \
over five years (from April 2007 to August 2012). A GPS trajectory of this dataset is represented by sequence of time-stamped points \
each of which contains the information of altitude, longitude, latitude. This dataset contains 17,784 trajectories, ~25M Points \
with a total distance of 1,292,951 kilometers and a total duration of 50,176 hours. \
These trajectories were recorded by different GPS loggers and GPS phones, and have a variety of sampling rates. \
91.5 percent of the trajectories are logged in a dense representation, e.g. every 1~5 seconds or every 5~10 meters per point.\
latitude: Latitude in decimal degrees.\
longitude: Longitude in decimal degrees.\
altitude: Altitude in feet (-777 if not valid).\
time: Date and time as a string.\
mode: Way of transportation e.g. walk, taxi.\
trajectory_id: ID of trajectory that Point belongs to.\
user_id: ID of user that reported Point. \
crs: WGS 84"
CITATION = "[1] Yu Zheng, Lizhu Zhang, Xing Xie, Wei-Ying Ma. Mining interesting locations and travel sequences from GPS trajectories. In \
Proceedings of International conference on World Wild Web (WWW 2009), Madrid Spain. ACM Press: 791-800.\n\
[2] Yu Zheng, Quannan Li, Yukun Chen, Xing Xie, Wei-Ying Ma. Understanding Mobility Based on GPS Data. In Proceedings of \
ACM conference on Ubiquitous Computing (UbiComp 2008), Seoul, Korea. ACM Press: 312-321.\n\
[3] Yu Zheng, Xing Xie, Wei-Ying Ma, GeoLife: A Collaborative Social Networking Service among User, \
location and trajectory. Invited paper, in IEEE Data Engineering Bulletin. 33, 2, 2010, pp. 32-40."
class GeolifeDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig """
def __init__(self, data_url, **kwargs):
"""BuilderConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(GeolifeDatasetConfig, self).__init__(**kwargs)
self.data_url = data_url
class GeolifeDataset(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = GeolifeDatasetConfig
DEFAULT_CONFIG_NAME = "HMP"
BUILDER_CONFIGS = [
GeolifeDatasetConfig(
name="all",
description="GPS trajectories that were collected in (Microsoft Research Asia) Geolife project by 182 users",
data_url={"train":"https://huggingface.co/datasets/kraina/geolife/resolve/main/data/raw/raw.parquet"}
),
GeolifeDatasetConfig(
name="TTE",
description="Official train-test split of Geolife dataset for Travel Time Estimation task. GPS trajectories that were collected in (Microsoft Research Asia) Geolife project by 182 users",
data_url={"train":"https://huggingface.co/datasets/kraina/geolife/resolve/main/data/tte/train.parquet",
"test":"https://huggingface.co/datasets/kraina/geolife/resolve/main/data/tte/test.parquet"}
),
GeolifeDatasetConfig(
name="HMP",
description="Official train-test split of Geolife dataset for Human Mobility Prediction task. GPS trajectories that were collected in (Microsoft Research Asia) Geolife project by 182 users",
data_url={"train":"https://huggingface.co/datasets/kraina/geolife/resolve/main/data/hmc/train.parquet",
"test":"https://huggingface.co/datasets/kraina/geolife/resolve/main/data/hmc/test.parquet"}
)
]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=DESCRIPTION,
homepage="http://research.microsoft.com/en-us/people/yuzheng/default.aspx",
citation=CITATION,
# This defines the different columns of the dataset and their types
features=datasets.Features(
{
# "latitude": datasets.Value(dtype="float64"),
# "longitude": datasets.Value(dtype="float64"),
# "altitude": datasets.Value(dtype="float64"),
# "time": datasets.Value(dtype="string"),
# "timestamp": datasets.Value(dtype="int64"),
# "mode": datasets.Value(dtype="string"),
"trajectory_id": datasets.Value(dtype="string"),
# "user_id": datasets.Value(dtype="string"),
# These are the features of your dataset like images, labels ...
}
),
)
def _split_generators(self, dl_manager: datasets.download.DownloadManager):
# files = _URLS[self.config.name]
downloaded_files = dl_manager.download(self.config.data_url)
if self.config.name == "all":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]})
]
else:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files["test"]})
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_tables(self, filepath):
with open(filepath, mode="rb") as f:
parquet_file = pq.ParquetFile(source=filepath)
for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
df = record_batch.to_pandas()
df.reset_index(drop=True, inplace=True)
pa_table = pa.Table.from_pandas(df)
yield f"{batch_idx}", pa_table