marscr84 commited on
Commit
2384fae
·
1 Parent(s): c1f789a

Use compressed CSV files and Git LFS for large files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ *.csv.gz filter=lfs diff=lfs merge=lfs -text
61
+ data/flat/train/flat-training.csv.gz filter=lfs diff=lfs merge=lfs -text
62
+ data/sequential/train/sequential-training.csv.gz filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -35,15 +35,15 @@ Train a generative model that generalizes well, using any open-source tools (Syn
35
 
36
  ## Dataset Description
37
 
38
- This dataset consists of two CSV files used in the MOSTLY AI Prize competition:
39
 
40
  ### Flat Data
41
- - File: `flat-training.csv.gz` (7.4MB)
42
  - 100,000 records
43
  - 80 data columns: 60 numeric, 20 categorical
44
 
45
  ### Sequential Data
46
- - File: `sequential-training.csv.gz` (1.3MB)
47
  - 20,000 groups
48
  - Each group contains 5-10 records
49
  - 11 data columns: 7 numeric, 3 categorical + 1 group ID
@@ -56,12 +56,68 @@ The files are compressed using gzip. You can load them directly using pandas:
56
  import pandas as pd
57
 
58
  # Load flat data
59
- flat_df = pd.read_csv('data/flat-training.csv.gz', compression='gzip')
60
 
61
  # Load sequential data
62
- sequential_df = pd.read_csv('data/sequential-training.csv.gz', compression='gzip')
63
  ```
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  ### Column Description
66
 
67
  Note: Detailed column descriptions are intentionally not provided as part of the competition challenge. The task is to generate synthetic data that preserves the statistical properties of the original data without needing to understand the semantic meaning of each column.
@@ -84,14 +140,12 @@ Your synthetic data generation approach should generalize well to this unseen da
84
 
85
  ## Usage with Hugging Face Datasets
86
 
87
- The dataset can be loaded using the Hugging Face Datasets library:
88
 
89
  ```python
90
  from datasets import load_dataset
91
 
92
- # Load the flat dataset (default)
93
- flat_dataset = load_dataset("mostlyaiprize")
94
- # or explicitly specify the flat config
95
  flat_dataset = load_dataset("mostlyaiprize", "flat")
96
 
97
  # Load the sequential dataset
@@ -100,6 +154,8 @@ sequential_dataset = load_dataset("mostlyaiprize", "sequential")
100
  # Access the data
101
  flat_data = flat_dataset["train"]
102
  sequential_data = sequential_dataset["train"]
 
 
103
  ```
104
 
105
  ## Dataset Schema
@@ -109,103 +165,103 @@ The schema for each dataset is as follows:
109
  ### Flat Dataset Schema (80 columns)
110
  ```python
111
  {
112
- "dog": Value("int64"),
113
- "cat": Value("string"),
114
- "rabbit": Value("string"),
115
- "deer": Value("float32"),
116
- "panda": Value("int64"),
117
- "koala": Value("string"),
118
- "otter": Value("string"),
119
- "hedgehog": Value("float32"),
120
- "squirrel": Value("int64"),
121
- "dolphin": Value("int64"),
122
- "penguin": Value("float32"),
123
- "turtle": Value("float32"),
124
- "elephant": Value("string"),
125
- "giraffe": Value("int64"),
126
- "lamb": Value("string"),
127
- "goat": Value("string"),
128
- "cow": Value("string"),
129
- "horse": Value("string"),
130
- "donkey": Value("string"),
131
- "pony": Value("int64"),
132
- "llama": Value("string"),
133
- "mouse": Value("string"),
134
- "hamster": Value("string"),
135
- "guinea": Value("int64"),
136
- "duck": Value("string"),
137
- "chicken": Value("float32"),
138
- "sparrow": Value("int64"),
139
- "parrot": Value("int64"),
140
- "finch": Value("int64"),
141
- "canary": Value("int64"),
142
- "bee": Value("float32"),
143
- "butterfly": Value("string"),
144
- "ladybug": Value("int64"),
145
- "snail": Value("float32"),
146
- "frog": Value("int64"),
147
- "cricket": Value("int64"),
148
- "tamarin": Value("string"),
149
- "wallaby": Value("string"),
150
- "wombat": Value("int64"),
151
- "zebra": Value("int64"),
152
- "flamingo": Value("float32"),
153
- "peacock": Value("int64"),
154
- "bat": Value("int64"),
155
- "fox": Value("int64"),
156
- "beaver": Value("int64"),
157
- "monkey": Value("int64"),
158
- "seal": Value("int64"),
159
- "robin": Value("int64"),
160
- "loon": Value("string"),
161
- "swan": Value("int64"),
162
- "goldfish": Value("int64"),
163
- "minnow": Value("string"),
164
- "mole": Value("float32"),
165
- "shrew": Value("int64"),
166
- "puffin": Value("float32"),
167
- "owl": Value("int64"),
168
- "bunny": Value("int64"),
169
- "bear": Value("int64"),
170
- "chipmunk": Value("int64"),
171
- "cub": Value("string"),
172
- "acorn": Value("float32"),
173
- "leaf": Value("string"),
174
- "cloud": Value("float32"),
175
- "rainbow": Value("int64"),
176
- "puddle": Value("string"),
177
- "berry": Value("float32"),
178
- "apple": Value("int64"),
179
- "honey": Value("int64"),
180
- "pumpkin": Value("string"),
181
- "teddy": Value("string"),
182
- "blanket": Value("string"),
183
- "button": Value("string"),
184
- "whistle": Value("float32"),
185
- "marble": Value("int64"),
186
- "wagon": Value("string"),
187
- "storybook": Value("string"),
188
- "candle": Value("float32"),
189
- "clover": Value("float32"),
190
- "bubble": Value("int64"),
191
- "cookie": Value("string")
192
  }
193
  ```
194
 
195
  ### Sequential Dataset Schema (11 columns)
196
  ```python
197
  {
198
- "group_id": Value("string"),
199
- "alice": Value("string"),
200
- "david": Value("float32"),
201
- "emily": Value("string"),
202
- "jacob": Value("string"),
203
- "james": Value("float32"),
204
- "john": Value("string"),
205
- "mike": Value("int64"),
206
- "lucas": Value("float32"),
207
- "mary": Value("float32"),
208
- "sarah": Value("float32")
209
  }
210
  ```
211
 
 
35
 
36
  ## Dataset Description
37
 
38
+ This dataset consists of two compressed CSV files used in the MOSTLY AI Prize competition:
39
 
40
  ### Flat Data
41
+ - File: `data/flat/train/flat-training.csv.gz` (7.4MB)
42
  - 100,000 records
43
  - 80 data columns: 60 numeric, 20 categorical
44
 
45
  ### Sequential Data
46
+ - File: `data/sequential/train/sequential-training.csv.gz` (1.3MB)
47
  - 20,000 groups
48
  - Each group contains 5-10 records
49
  - 11 data columns: 7 numeric, 3 categorical + 1 group ID
 
56
  import pandas as pd
57
 
58
  # Load flat data
59
+ flat_df = pd.read_csv('data/flat/train/flat-training.csv.gz', compression='gzip')
60
 
61
  # Load sequential data
62
+ sequential_df = pd.read_csv('data/sequential/train/sequential-training.csv.gz', compression='gzip')
63
  ```
64
 
65
+ ### Dataset Visualizations
66
+
67
+ #### Flat Dataset Visualizations
68
+
69
+ Here's a preview of some data distributions in the flat dataset:
70
+
71
+ <div class="flex flex-col space-y-4">
72
+ <div class="flex flex-row space-x-4">
73
+ <div class="w-1/2">
74
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/flat_dog_hist.png" alt="Distribution of 'dog' values" />
75
+ <p class="text-center">Distribution of 'dog' values</p>
76
+ </div>
77
+ <div class="w-1/2">
78
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/flat_deer_hist.png" alt="Distribution of 'deer' values" />
79
+ <p class="text-center">Distribution of 'deer' values</p>
80
+ </div>
81
+ </div>
82
+ <div class="flex flex-row space-x-4">
83
+ <div class="w-1/2">
84
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/flat_cat_bar.png" alt="Count of 'cat' categories" />
85
+ <p class="text-center">Count of 'cat' categories</p>
86
+ </div>
87
+ <div class="w-1/2">
88
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/flat_correlation.png" alt="Correlation heatmap" />
89
+ <p class="text-center">Correlation heatmap</p>
90
+ </div>
91
+ </div>
92
+ </div>
93
+
94
+ #### Sequential Dataset Visualizations
95
+
96
+ Here's a preview of some data distributions in the sequential dataset:
97
+
98
+ <div class="flex flex-col space-y-4">
99
+ <div class="flex flex-row space-x-4">
100
+ <div class="w-1/2">
101
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/seq_mike_hist.png" alt="Distribution of 'mike' values" />
102
+ <p class="text-center">Distribution of 'mike' values</p>
103
+ </div>
104
+ <div class="w-1/2">
105
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/seq_david_hist.png" alt="Distribution of 'david' values" />
106
+ <p class="text-center">Distribution of 'david' values</p>
107
+ </div>
108
+ </div>
109
+ <div class="flex flex-row space-x-4">
110
+ <div class="w-1/2">
111
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/seq_alice_bar.png" alt="Count of 'alice' categories" />
112
+ <p class="text-center">Count of 'alice' categories</p>
113
+ </div>
114
+ <div class="w-1/2">
115
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/seq_correlation.png" alt="Correlation heatmap" />
116
+ <p class="text-center">Correlation heatmap</p>
117
+ </div>
118
+ </div>
119
+ </div>
120
+
121
  ### Column Description
122
 
123
  Note: Detailed column descriptions are intentionally not provided as part of the competition challenge. The task is to generate synthetic data that preserves the statistical properties of the original data without needing to understand the semantic meaning of each column.
 
140
 
141
  ## Usage with Hugging Face Datasets
142
 
143
+ The dataset can be loaded using the Hugging Face Datasets library directly from the compressed CSV files:
144
 
145
  ```python
146
  from datasets import load_dataset
147
 
148
+ # Load the flat dataset
 
 
149
  flat_dataset = load_dataset("mostlyaiprize", "flat")
150
 
151
  # Load the sequential dataset
 
154
  # Access the data
155
  flat_data = flat_dataset["train"]
156
  sequential_data = sequential_dataset["train"]
157
+
158
+ # Note: Hugging Face Datasets will automatically handle the gzip compression
159
  ```
160
 
161
  ## Dataset Schema
 
165
  ### Flat Dataset Schema (80 columns)
166
  ```python
167
  {
168
+ "dog": "int64",
169
+ "cat": "string",
170
+ "rabbit": "string",
171
+ "deer": "float32",
172
+ "panda": "int64",
173
+ "koala": "string",
174
+ "otter": "string",
175
+ "hedgehog": "float32",
176
+ "squirrel": "int64",
177
+ "dolphin": "int64",
178
+ "penguin": "float32",
179
+ "turtle": "float32",
180
+ "elephant": "string",
181
+ "giraffe": "int64",
182
+ "lamb": "string",
183
+ "goat": "string",
184
+ "cow": "string",
185
+ "horse": "string",
186
+ "donkey": "string",
187
+ "pony": "int64",
188
+ "llama": "string",
189
+ "mouse": "string",
190
+ "hamster": "string",
191
+ "guinea": "int64",
192
+ "duck": "string",
193
+ "chicken": "float32",
194
+ "sparrow": "int64",
195
+ "parrot": "int64",
196
+ "finch": "int64",
197
+ "canary": "int64",
198
+ "bee": "float32",
199
+ "butterfly": "string",
200
+ "ladybug": "int64",
201
+ "snail": "float32",
202
+ "frog": "int64",
203
+ "cricket": "int64",
204
+ "tamarin": "string",
205
+ "wallaby": "string",
206
+ "wombat": "int64",
207
+ "zebra": "int64",
208
+ "flamingo": "float32",
209
+ "peacock": "int64",
210
+ "bat": "int64",
211
+ "fox": "int64",
212
+ "beaver": "int64",
213
+ "monkey": "int64",
214
+ "seal": "int64",
215
+ "robin": "int64",
216
+ "loon": "string",
217
+ "swan": "int64",
218
+ "goldfish": "int64",
219
+ "minnow": "string",
220
+ "mole": "float32",
221
+ "shrew": "int64",
222
+ "puffin": "float32",
223
+ "owl": "int64",
224
+ "bunny": "int64",
225
+ "bear": "int64",
226
+ "chipmunk": "int64",
227
+ "cub": "string",
228
+ "acorn": "float32",
229
+ "leaf": "string",
230
+ "cloud": "float32",
231
+ "rainbow": "int64",
232
+ "puddle": "string",
233
+ "berry": "float32",
234
+ "apple": "int64",
235
+ "honey": "int64",
236
+ "pumpkin": "string",
237
+ "teddy": "string",
238
+ "blanket": "string",
239
+ "button": "string",
240
+ "whistle": "float32",
241
+ "marble": "int64",
242
+ "wagon": "string",
243
+ "storybook": "string",
244
+ "candle": "float32",
245
+ "clover": "float32",
246
+ "bubble": "int64",
247
+ "cookie": "string"
248
  }
249
  ```
250
 
251
  ### Sequential Dataset Schema (11 columns)
252
  ```python
253
  {
254
+ "group_id": "string",
255
+ "alice": "string",
256
+ "david": "float32",
257
+ "emily": "string",
258
+ "jacob": "string",
259
+ "james": "float32",
260
+ "john": "string",
261
+ "mike": "int64",
262
+ "lucas": "float32",
263
+ "mary": "float32",
264
+ "sarah": "float32"
265
  }
266
  ```
267
 
data/flat/train/flat-training.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d1a8f9b8b4e7d211269f37a95283e96c77145165a09bc892a9b178c9f1f8060
3
+ size 7737713
data/sequential/train/sequential-training.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6c61106dd5a706ed88f646c52bcd010abd105bf617ba46e7de3579cc1bb28e
3
+ size 1374441
dataset_infos.json CHANGED
@@ -1,24 +1,91 @@
1
  {
2
  "flat": {
3
- "description": "This dataset contains the data used in the MOSTLY AI Prize competition.\nThe competition focuses on synthetic data generation and evaluation.\nIt contains two datasets:\n- flat-training.csv.gz: A flat (non-sequential) dataset\n- sequential-training.csv.gz: A sequential dataset",
4
- "citation": "@dataset{mostlyaiprize,\n author = {MOSTLY AI},\n title = {MOSTLY AI Prize Dataset},\n year = {2023},\n url = {https://www.mostlyaiprize.com/},\n}\n",
5
  "homepage": "https://www.mostlyaiprize.com/",
6
  "license": "Apache License 2.0",
7
  "features": {
8
- "dog": {"_type": "Value", "dtype": "int64"},
9
- "cat": {"_type": "Value", "dtype": "string"},
10
- "rabbit": {"_type": "Value", "dtype": "string"},
11
- "deer": {"_type": "Value", "dtype": "float32"},
12
- "panda": {"_type": "Value", "dtype": "int64"},
13
- "koala": {"_type": "Value", "dtype": "string"},
14
- "otter": {"_type": "Value", "dtype": "string"},
15
- "hedgehog": {"_type": "Value", "dtype": "float32"},
16
- "squirrel": {"_type": "Value", "dtype": "int64"},
17
- "dolphin": {"_type": "Value", "dtype": "int64"},
18
- "penguin": {"_type": "Value", "dtype": "int64"},
19
- "turtle": {"_type": "Value", "dtype": "float32"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  },
21
- "builder_name": "mostlyaiprize",
22
  "config_name": "flat",
23
  "version": {
24
  "version_str": "1.0.0",
@@ -31,30 +98,30 @@
31
  "train": {
32
  "name": "train",
33
  "num_bytes": 7864320,
34
- "num_examples": null,
35
- "dataset_name": "mostlyaiprize"
 
36
  }
37
  }
38
  },
39
  "sequential": {
40
- "description": "This dataset contains the data used in the MOSTLY AI Prize competition.\nThe competition focuses on synthetic data generation and evaluation.\nIt contains two datasets:\n- flat-training.csv.gz: A flat (non-sequential) dataset\n- sequential-training.csv.gz: A sequential dataset",
41
- "citation": "@dataset{mostlyaiprize,\n author = {MOSTLY AI},\n title = {MOSTLY AI Prize Dataset},\n year = {2023},\n url = {https://www.mostlyaiprize.com/},\n}\n",
42
  "homepage": "https://www.mostlyaiprize.com/",
43
  "license": "Apache License 2.0",
44
  "features": {
45
- "group_id": {"_type": "Value", "dtype": "string"},
46
- "alice": {"_type": "Value", "dtype": "string"},
47
- "david": {"_type": "Value", "dtype": "float32"},
48
- "emily": {"_type": "Value", "dtype": "string"},
49
- "jacob": {"_type": "Value", "dtype": "string"},
50
- "james": {"_type": "Value", "dtype": "float32"},
51
- "john": {"_type": "Value", "dtype": "string"},
52
- "mike": {"_type": "Value", "dtype": "int64"},
53
- "lucas": {"_type": "Value", "dtype": "float32"},
54
- "mary": {"_type": "Value", "dtype": "float32"},
55
- "sarah": {"_type": "Value", "dtype": "float32"}
56
  },
57
- "builder_name": "mostlyaiprize",
58
  "config_name": "sequential",
59
  "version": {
60
  "version_str": "1.0.0",
@@ -67,8 +134,9 @@
67
  "train": {
68
  "name": "train",
69
  "num_bytes": 1363149,
70
- "num_examples": null,
71
- "dataset_name": "mostlyaiprize"
 
72
  }
73
  }
74
  }
 
1
  {
2
  "flat": {
3
+ "description": "This dataset contains the data used in the MOSTLY AI Prize competition.\nThe competition focuses on synthetic data generation and evaluation.\nIt contains two datasets:\n- flat-training.csv.gz: A flat (non-sequential) dataset with 100,000 records and 80 columns\n- sequential-training.csv.gz: A sequential dataset with 20,000 groups and 11 columns",
4
+ "citation": "@dataset{mostlyaiprize,\n author = {MOSTLY AI},\n title = {MOSTLY AI Prize Dataset},\n year = {2025},\n url = {https://www.mostlyaiprize.com/},\n}\n",
5
  "homepage": "https://www.mostlyaiprize.com/",
6
  "license": "Apache License 2.0",
7
  "features": {
8
+ "dog": {"dtype": "int64"},
9
+ "cat": {"dtype": "string"},
10
+ "rabbit": {"dtype": "string"},
11
+ "deer": {"dtype": "float32"},
12
+ "panda": {"dtype": "int64"},
13
+ "koala": {"dtype": "string"},
14
+ "otter": {"dtype": "string"},
15
+ "hedgehog": {"dtype": "float32"},
16
+ "squirrel": {"dtype": "int64"},
17
+ "dolphin": {"dtype": "int64"},
18
+ "penguin": {"dtype": "float32"},
19
+ "turtle": {"dtype": "float32"},
20
+ "elephant": {"dtype": "string"},
21
+ "giraffe": {"dtype": "int64"},
22
+ "lamb": {"dtype": "string"},
23
+ "goat": {"dtype": "string"},
24
+ "cow": {"dtype": "string"},
25
+ "horse": {"dtype": "string"},
26
+ "donkey": {"dtype": "string"},
27
+ "pony": {"dtype": "int64"},
28
+ "llama": {"dtype": "string"},
29
+ "mouse": {"dtype": "string"},
30
+ "hamster": {"dtype": "string"},
31
+ "guinea": {"dtype": "int64"},
32
+ "duck": {"dtype": "string"},
33
+ "chicken": {"dtype": "float32"},
34
+ "sparrow": {"dtype": "int64"},
35
+ "parrot": {"dtype": "int64"},
36
+ "finch": {"dtype": "int64"},
37
+ "canary": {"dtype": "int64"},
38
+ "bee": {"dtype": "float32"},
39
+ "butterfly": {"dtype": "string"},
40
+ "ladybug": {"dtype": "int64"},
41
+ "snail": {"dtype": "float32"},
42
+ "frog": {"dtype": "int64"},
43
+ "cricket": {"dtype": "int64"},
44
+ "tamarin": {"dtype": "string"},
45
+ "wallaby": {"dtype": "string"},
46
+ "wombat": {"dtype": "int64"},
47
+ "zebra": {"dtype": "int64"},
48
+ "flamingo": {"dtype": "float32"},
49
+ "peacock": {"dtype": "int64"},
50
+ "bat": {"dtype": "int64"},
51
+ "fox": {"dtype": "int64"},
52
+ "beaver": {"dtype": "int64"},
53
+ "monkey": {"dtype": "int64"},
54
+ "seal": {"dtype": "int64"},
55
+ "robin": {"dtype": "int64"},
56
+ "loon": {"dtype": "string"},
57
+ "swan": {"dtype": "int64"},
58
+ "goldfish": {"dtype": "int64"},
59
+ "minnow": {"dtype": "string"},
60
+ "mole": {"dtype": "float32"},
61
+ "shrew": {"dtype": "int64"},
62
+ "puffin": {"dtype": "float32"},
63
+ "owl": {"dtype": "int64"},
64
+ "bunny": {"dtype": "int64"},
65
+ "bear": {"dtype": "int64"},
66
+ "chipmunk": {"dtype": "int64"},
67
+ "cub": {"dtype": "string"},
68
+ "acorn": {"dtype": "float32"},
69
+ "leaf": {"dtype": "string"},
70
+ "cloud": {"dtype": "float32"},
71
+ "rainbow": {"dtype": "int64"},
72
+ "puddle": {"dtype": "string"},
73
+ "berry": {"dtype": "float32"},
74
+ "apple": {"dtype": "int64"},
75
+ "honey": {"dtype": "int64"},
76
+ "pumpkin": {"dtype": "string"},
77
+ "teddy": {"dtype": "string"},
78
+ "blanket": {"dtype": "string"},
79
+ "button": {"dtype": "string"},
80
+ "whistle": {"dtype": "float32"},
81
+ "marble": {"dtype": "int64"},
82
+ "wagon": {"dtype": "string"},
83
+ "storybook": {"dtype": "string"},
84
+ "candle": {"dtype": "float32"},
85
+ "clover": {"dtype": "float32"},
86
+ "bubble": {"dtype": "int64"},
87
+ "cookie": {"dtype": "string"}
88
  },
 
89
  "config_name": "flat",
90
  "version": {
91
  "version_str": "1.0.0",
 
98
  "train": {
99
  "name": "train",
100
  "num_bytes": 7864320,
101
+ "num_examples": 100000,
102
+ "dataset_name": "mostlyaiprize",
103
+ "file_format": "gz"
104
  }
105
  }
106
  },
107
  "sequential": {
108
+ "description": "This dataset contains the data used in the MOSTLY AI Prize competition.\nThe competition focuses on synthetic data generation and evaluation.\nIt contains two datasets:\n- flat-training.csv.gz: A flat (non-sequential) dataset with 100,000 records and 80 columns\n- sequential-training.csv.gz: A sequential dataset with 20,000 groups and 11 columns",
109
+ "citation": "@dataset{mostlyaiprize,\n author = {MOSTLY AI},\n title = {MOSTLY AI Prize Dataset},\n year = {2025},\n url = {https://www.mostlyaiprize.com/},\n}\n",
110
  "homepage": "https://www.mostlyaiprize.com/",
111
  "license": "Apache License 2.0",
112
  "features": {
113
+ "group_id": {"dtype": "string"},
114
+ "alice": {"dtype": "string"},
115
+ "david": {"dtype": "float32"},
116
+ "emily": {"dtype": "string"},
117
+ "jacob": {"dtype": "string"},
118
+ "james": {"dtype": "float32"},
119
+ "john": {"dtype": "string"},
120
+ "mike": {"dtype": "int64"},
121
+ "lucas": {"dtype": "float32"},
122
+ "mary": {"dtype": "float32"},
123
+ "sarah": {"dtype": "float32"}
124
  },
 
125
  "config_name": "sequential",
126
  "version": {
127
  "version_str": "1.0.0",
 
134
  "train": {
135
  "name": "train",
136
  "num_bytes": 1363149,
137
+ "num_examples": 140000,
138
+ "dataset_name": "mostlyaiprize",
139
+ "file_format": "gz"
140
  }
141
  }
142
  }
mostlyaiprize.py DELETED
@@ -1,259 +0,0 @@
1
- import os
2
- import pandas as pd
3
- import datasets
4
- from datasets import Dataset, DatasetDict, Features, Value
5
-
6
- _CITATION = """
7
- @dataset{mostlyaiprize,
8
- author = {MOSTLY AI},
9
- title = {MOSTLY AI Prize Dataset},
10
- year = {2025},
11
- url = {https://www.mostlyaiprize.com/},
12
- }
13
- """
14
-
15
- _DESCRIPTION = """
16
- This dataset contains the data used in the MOSTLY AI Prize competition.
17
- The competition focuses on synthetic data generation and evaluation.
18
- It contains two datasets:
19
- - flat-training.csv.gz: A flat (non-sequential) dataset with 100,000 records and 80 columns (60 numeric, 20 categorical)
20
- - sequential-training.csv.gz: A sequential dataset with 20,000 groups and 11 columns
21
- """
22
-
23
- _HOMEPAGE = "https://www.mostlyaiprize.com/"
24
- _LICENSE = "Apache License 2.0"
25
-
26
- # Define the features for each dataset
27
- _FLAT_FEATURES = {
28
- "dog": Value("int64"),
29
- "cat": Value("string"),
30
- "rabbit": Value("string"),
31
- "deer": Value("float32"),
32
- "panda": Value("int64"),
33
- "koala": Value("string"),
34
- "otter": Value("string"),
35
- "hedgehog": Value("float32"),
36
- "squirrel": Value("int64"),
37
- "dolphin": Value("int64"),
38
- "penguin": Value("float32"),
39
- "turtle": Value("float32"),
40
- "elephant": Value("string"),
41
- "giraffe": Value("int64"),
42
- "lamb": Value("string"),
43
- "goat": Value("string"),
44
- "cow": Value("string"),
45
- "horse": Value("string"),
46
- "donkey": Value("string"),
47
- "pony": Value("int64"),
48
- "llama": Value("string"),
49
- "mouse": Value("string"),
50
- "hamster": Value("string"),
51
- "guinea": Value("int64"),
52
- "duck": Value("string"),
53
- "chicken": Value("float32"),
54
- "sparrow": Value("int64"),
55
- "parrot": Value("int64"),
56
- "finch": Value("int64"),
57
- "canary": Value("int64"),
58
- "bee": Value("float32"),
59
- "butterfly": Value("string"),
60
- "ladybug": Value("int64"),
61
- "snail": Value("float32"),
62
- "frog": Value("int64"),
63
- "cricket": Value("int64"),
64
- "tamarin": Value("string"),
65
- "wallaby": Value("string"),
66
- "wombat": Value("int64"),
67
- "zebra": Value("int64"),
68
- "flamingo": Value("float32"),
69
- "peacock": Value("int64"),
70
- "bat": Value("int64"),
71
- "fox": Value("int64"),
72
- "beaver": Value("int64"),
73
- "monkey": Value("int64"),
74
- "seal": Value("int64"),
75
- "robin": Value("int64"),
76
- "loon": Value("string"),
77
- "swan": Value("int64"),
78
- "goldfish": Value("int64"),
79
- "minnow": Value("string"),
80
- "mole": Value("float32"),
81
- "shrew": Value("int64"),
82
- "puffin": Value("float32"),
83
- "owl": Value("int64"),
84
- "bunny": Value("int64"),
85
- "bear": Value("int64"),
86
- "chipmunk": Value("int64"),
87
- "cub": Value("string"),
88
- "acorn": Value("float32"),
89
- "leaf": Value("string"),
90
- "cloud": Value("float32"),
91
- "rainbow": Value("int64"),
92
- "puddle": Value("string"),
93
- "berry": Value("float32"),
94
- "apple": Value("int64"),
95
- "honey": Value("int64"),
96
- "pumpkin": Value("string"),
97
- "teddy": Value("string"),
98
- "blanket": Value("string"),
99
- "button": Value("string"),
100
- "whistle": Value("float32"),
101
- "marble": Value("int64"),
102
- "wagon": Value("string"),
103
- "storybook": Value("string"),
104
- "candle": Value("float32"),
105
- "clover": Value("float32"),
106
- "bubble": Value("int64"),
107
- "cookie": Value("string")
108
- }
109
-
110
- _SEQUENTIAL_FEATURES = {
111
- "group_id": Value("string"),
112
- "alice": Value("string"),
113
- "david": Value("float32"),
114
- "emily": Value("string"),
115
- "jacob": Value("string"),
116
- "james": Value("float32"),
117
- "john": Value("string"),
118
- "mike": Value("int64"),
119
- "lucas": Value("float32"),
120
- "mary": Value("float32"),
121
- "sarah": Value("float32")
122
- }
123
-
124
- class MostlyAIPrizeConfig(datasets.BuilderConfig):
125
- """BuilderConfig for MOSTLY AI Prize dataset."""
126
-
127
- def __init__(self, features, data_file, **kwargs):
128
- """BuilderConfig for MOSTLY AI Prize.
129
- Args:
130
- features: Features of the dataset
131
- data_file: The data file to load
132
- **kwargs: keyword arguments forwarded to super.
133
- """
134
- super(MostlyAIPrizeConfig, self).__init__(**kwargs)
135
- self.features = features
136
- self.data_file = data_file
137
-
138
- class MostlyAIPrize(datasets.GeneratorBasedBuilder):
139
- """MOSTLY AI Prize dataset for synthetic data generation competition."""
140
-
141
- VERSION = datasets.Version("1.0.0")
142
-
143
- BUILDER_CONFIGS = [
144
- MostlyAIPrizeConfig(
145
- name="flat",
146
- description="Flat dataset with 100,000 records and 80 columns (60 numeric, 20 categorical)",
147
- features=_FLAT_FEATURES,
148
- data_file="flat-training.csv.gz",
149
- ),
150
- MostlyAIPrizeConfig(
151
- name="sequential",
152
- description="Sequential dataset with 20,000 groups and 11 columns",
153
- features=_SEQUENTIAL_FEATURES,
154
- data_file="sequential-training.csv.gz",
155
- ),
156
- ]
157
-
158
- DEFAULT_CONFIG_NAME = "flat"
159
-
160
- def _info(self):
161
- return datasets.DatasetInfo(
162
- description=_DESCRIPTION,
163
- features=Features(self.config.features),
164
- supervised_keys=None,
165
- homepage=_HOMEPAGE,
166
- license=_LICENSE,
167
- citation=_CITATION,
168
- )
169
-
170
- def _split_generators(self, dl_manager):
171
- data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
172
- data_file = os.path.join(data_dir, self.config.data_file)
173
-
174
- return [
175
- datasets.SplitGenerator(
176
- name=datasets.Split.TRAIN,
177
- gen_kwargs={
178
- "filepath": data_file,
179
- },
180
- ),
181
- ]
182
-
183
- def _generate_examples(self, filepath):
184
- """Generate examples from the dataset file."""
185
- df = pd.read_csv(filepath, compression="gzip")
186
-
187
- for idx, row in df.iterrows():
188
- yield idx, {col: row[col] for col in self.config.features}
189
-
190
- # Add a method to provide dataset visualization information
191
- @classmethod
192
- def get_visualization_config(cls, config_name="flat"):
193
- """Return configuration for dataset visualization on Hugging Face.
194
-
195
- This helps enhance the dataset preview with more than just a flat table.
196
- """
197
- if config_name == "flat":
198
- return {
199
- "type": "table-and-charts",
200
- "charts": [
201
- {
202
- "type": "histogram",
203
- "column": "dog",
204
- "title": "Distribution of 'dog' values"
205
- },
206
- {
207
- "type": "histogram",
208
- "column": "deer",
209
- "title": "Distribution of 'deer' values"
210
- },
211
- {
212
- "type": "histogram",
213
- "column": "chicken",
214
- "title": "Distribution of 'chicken' values"
215
- },
216
- {
217
- "type": "bar",
218
- "column": "cat",
219
- "title": "Count of 'cat' categories"
220
- },
221
- {
222
- "type": "bar",
223
- "column": "koala",
224
- "title": "Count of 'koala' categories"
225
- }
226
- ]
227
- }
228
- elif config_name == "sequential":
229
- return {
230
- "type": "table-and-charts",
231
- "charts": [
232
- {
233
- "type": "histogram",
234
- "column": "mike",
235
- "title": "Distribution of 'mike' values"
236
- },
237
- {
238
- "type": "histogram",
239
- "column": "david",
240
- "title": "Distribution of 'david' values"
241
- },
242
- {
243
- "type": "histogram",
244
- "column": "james",
245
- "title": "Distribution of 'james' values"
246
- },
247
- {
248
- "type": "bar",
249
- "column": "alice",
250
- "title": "Count of 'alice' categories"
251
- },
252
- {
253
- "type": "bar",
254
- "column": "john",
255
- "title": "Count of 'john' categories"
256
- }
257
- ]
258
- }
259
- return {"type": "table"}