Repcak2000 commited on
Commit
412792d
·
1 Parent(s): 26f8116

Add train test split, benchmark split

Browse files
airbnb_multicity.py CHANGED
@@ -4,26 +4,43 @@ import pyarrow.parquet as pq
4
 
5
 
6
  DESCRIPTION = "The dataset contains Airbnb data from 80 capitals and major cities all around the world."
7
- DATA_URL="https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/all_airbnb.parquet"
 
 
 
8
 
9
  class AirbnbDatasetConfig(datasets.BuilderConfig):
10
  """BuilderConfig """
11
 
12
- def __init__(self, **kwargs):
13
  """BuilderConfig.
14
  Args:
15
  **kwargs: keyword arguments forwarded to super.
16
  """
17
  super(AirbnbDatasetConfig, self).__init__(**kwargs)
 
18
 
19
 
20
  class AirbnbDataset(datasets.ArrowBasedBuilder):
21
  BUILDER_CONFIG_CLASS = AirbnbDatasetConfig
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def _info(self):
24
  return datasets.DatasetInfo(
25
  # This is the description that will appear on the datasets page.
26
- description=DESCRIPTION,
27
  homepage="https://insideairbnb.com/",
28
  citation="",
29
  # This defines the different columns of the dataset and their types
@@ -53,15 +70,18 @@ class AirbnbDataset(datasets.ArrowBasedBuilder):
53
 
54
 
55
  def _split_generators(self, dl_manager: datasets.download.DownloadManager):
56
- downloaded_files = dl_manager.download(DATA_URL)
57
- return [
58
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files})
59
- ]
60
 
 
 
 
 
61
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
62
  def _generate_tables(self, filepath):
63
  with open(filepath, mode="rb") as f:
64
  parquet_file = pq.ParquetFile(source=filepath)
65
  for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
66
- pa_table = pa.Table.from_batches([record_batch])
 
 
67
  yield f"{batch_idx}", pa_table
 
4
 
5
 
6
  DESCRIPTION = "The dataset contains Airbnb data from 80 capitals and major cities all around the world."
7
+ # DATA_URL="https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/all_airbnb.parquet"
8
+
9
+ DATA_DIRS = ["benchmark", "all"]
10
+
11
 
12
  class AirbnbDatasetConfig(datasets.BuilderConfig):
13
  """BuilderConfig """
14
 
15
+ def __init__(self, data_url, **kwargs):
16
  """BuilderConfig.
17
  Args:
18
  **kwargs: keyword arguments forwarded to super.
19
  """
20
  super(AirbnbDatasetConfig, self).__init__(**kwargs)
21
+ self.data_url = data_url
22
 
23
 
24
  class AirbnbDataset(datasets.ArrowBasedBuilder):
25
  BUILDER_CONFIG_CLASS = AirbnbDatasetConfig
26
+ DEFAULT_CONFIG_NAME = "benchmark"
27
+
28
+ BUILDER_CONFIGS = [
29
+ AirbnbDatasetConfig(
30
+ name = dir_name,
31
+ description = "This is the official train test split for Airbnb Datatset in h3 resolution = 8. Benchmark cities are: Paris, London, Rome, Melbourne, New York City, Amsterdam."+DESCRIPTION,
32
+ data_url={
33
+ "train": f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/{dir_name}/airbnb_train.parquet",
34
+ "test": f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/{dir_name}/airbnb_test.parquet"
35
+ }
36
+ )
37
+ for dir_name in DATA_DIRS
38
+ ]
39
 
40
  def _info(self):
41
  return datasets.DatasetInfo(
42
  # This is the description that will appear on the datasets page.
43
+ description=self.config.description,
44
  homepage="https://insideairbnb.com/",
45
  citation="",
46
  # This defines the different columns of the dataset and their types
 
70
 
71
 
72
  def _split_generators(self, dl_manager: datasets.download.DownloadManager):
73
+ downloaded_files = dl_manager.download(self.config.data_url)
 
 
 
74
 
75
+ return [
76
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]}),
77
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files["test"]})
78
+ ]
79
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
80
  def _generate_tables(self, filepath):
81
  with open(filepath, mode="rb") as f:
82
  parquet_file = pq.ParquetFile(source=filepath)
83
  for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
84
+ df = record_batch.to_pandas()
85
+ df.reset_index(drop=True, inplace=True)
86
+ pa_table = pa.Table.from_pandas(df)
87
  yield f"{batch_idx}", pa_table
data/all/airbnb_test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321df503893d2d07d49a0d776f1cfe1b0cfc6df664d6909bee89bad4972f54d2
3
+ size 49758512
data/all/airbnb_train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41f9754acce3d42c509294ccddca707be57dc74d5e3eeda99974012c8c649e4d
3
+ size 199355005
data/benchmark/airbnb_test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62e71598833914a3a19967b49d89d1995fdd4cb93e51744ea9fabcf2538eacda
3
+ size 10729670
data/benchmark/airbnb_train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61c480b8839a307c711eaebe6b4c612cf7fb57376fddee0d714795664f2bf8dd
3
+ size 39544306