cestwc commited on
Commit
6c75fac
·
verified ·
1 Parent(s): 1d79be0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +106 -0
README.md CHANGED
@@ -170,3 +170,109 @@ configs:
170
  - split: test
171
  path: data/test-*
172
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  - split: test
171
  path: data/test-*
172
  ---
173
+
174
+ # Dataset Card for Census Income (Adult)
175
+
176
+ <!-- Provide a quick summary of the dataset. -->
177
+
178
+ This dataset is a precise version of [Adult](https://archive.ics.uci.edu/dataset/2/adult) or [Census Income](https://archive.ics.uci.edu/dataset/20/census+income). This dataset from UCI somehow happens to occupy two links, but we checked and confirm that they are identical.
179
+
180
+
181
+ We used the following python script to create this Hugging Face dataset.
182
+ ```python
183
+ import pandas as pd
184
+ from datasets import Dataset, DatasetDict, Features, Value, ClassLabel
185
+
186
+ # URLs
187
+ url1 = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
188
+ url2 = "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
189
+
190
+ # Column names
191
+ columns = [
192
+ "age", "workclass", "fnlwgt", "education", "education-num", "marital-status",
193
+ "occupation", "relationship", "race", "sex", "capital-gain", "capital-loss",
194
+ "hours-per-week", "native-country", "income"
195
+ ]
196
+
197
+
198
+ # Load datasets
199
+ df_train = pd.read_csv(url1, names=columns, skipinitialspace=True)
200
+ df_test = pd.read_csv(url2, names=columns, skipinitialspace=True, skiprows=1)
201
+
202
+ # Convert continuous columns to float
203
+ continuous_columns = ["age", "fnlwgt", "education-num", "capital-gain", "capital-loss", "hours-per-week"]
204
+ for col in continuous_columns:
205
+ df_train[col] = pd.to_numeric(df_train[col], errors='coerce')
206
+ df_test[col] = pd.to_numeric(df_test[col], errors='coerce')
207
+
208
+ df_test['income'] = df_test['income'].str.rstrip('.') # This is somewhat critical.
209
+
210
+ # Define categorical columns
211
+ categorical_columns = [
212
+ "workclass", "education", "marital-status", "occupation", "relationship",
213
+ "race", "sex", "native-country", "income"
214
+ ]
215
+
216
+ # Dictionary to store category mappings
217
+ category_mappings = {}
218
+
219
+ for col in categorical_columns:
220
+ # Convert train column to category and extract categories
221
+ df_train[col] = df_train[col].astype("category")
222
+ category_mappings[col] = df_train[col].cat.categories.to_list() # Store category order
223
+
224
+ # Apply the same category mapping to test
225
+ df_test[col] = pd.Categorical(df_test[col], categories=category_mappings[col])
226
+
227
+ # Convert to integer codes
228
+ df_train[col] = df_train[col].cat.codes
229
+ df_test[col] = df_test[col].cat.codes
230
+
231
+ # Define Hugging Face dataset schema
232
+ hf_features = Features({
233
+ "age": Value("int64"),
234
+ "workclass": ClassLabel(names=category_mappings["workclass"]),
235
+ "fnlwgt": Value("int64"),
236
+ "education": ClassLabel(names=category_mappings["education"]),
237
+ "education-num": Value("int64"),
238
+ "marital-status": ClassLabel(names=category_mappings["marital-status"]),
239
+ "occupation": ClassLabel(names=category_mappings["occupation"]),
240
+ "relationship": ClassLabel(names=category_mappings["relationship"]),
241
+ "race": ClassLabel(names=category_mappings["race"]),
242
+ "sex": ClassLabel(names=category_mappings["sex"]),
243
+ "capital-gain": Value("int64"),
244
+ "capital-loss": Value("int64"),
245
+ "hours-per-week": Value("int64"),
246
+ "native-country": ClassLabel(names=category_mappings["native-country"]),
247
+ "income": ClassLabel(names=category_mappings["income"])
248
+ })
249
+
250
+ # Convert pandas DataFrame to Hugging Face Dataset
251
+ hf_train = Dataset.from_pandas(df_train, features=hf_features)
252
+ hf_test = Dataset.from_pandas(df_test, features=hf_features)
253
+
254
+ # Create a dataset dictionary
255
+ hf_dataset = DatasetDict({
256
+ "train": hf_train,
257
+ "test": hf_test
258
+ })
259
+
260
+ # Print dataset structure
261
+ print(hf_dataset)
262
+ ```
263
+
264
+ The printed output could look like
265
+
266
+ ```
267
+ DatasetDict({
268
+ train: Dataset({
269
+ features: ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'],
270
+ num_rows: 32561
271
+ })
272
+ test: Dataset({
273
+ features: ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'],
274
+ num_rows: 16281
275
+ })
276
+ })
277
+ ```
278
+