mstz commited on
Commit
983788b
·
1 Parent(s): 9cb6a00

updated to datasets 4.*

Browse files
Files changed (3) hide show
  1. README.md +10 -9
  2. haberman.py +0 -72
  3. haberman.data → survival/train.csv +0 -0
README.md CHANGED
@@ -1,19 +1,20 @@
1
  ---
2
- language:
3
- - en
 
 
 
 
 
 
 
 
4
  tags:
5
- - haberman
6
  - tabular_classification
7
  - binary_classification
8
  - multiclass_classification
9
- pretty_name: Haberman
10
- size_categories:
11
- - n<1K
12
  task_categories:
13
  - tabular-classification
14
- configs:
15
- - survival
16
- license: cc
17
  ---
18
  # Haberman
19
  The [Haberman dataset](https://archive.ics.uci.edu/ml/datasets/Haberman) from the [UCI ML repository](https://archive.ics.uci.edu/ml/datasets).
 
1
  ---
2
+ configs:
3
+ - config_name: survival
4
+ data_files:
5
+ - path: survival/train.csv
6
+ split: train
7
+ default: true
8
+ language: en
9
+ license: cc
10
+ pretty_name: Haberman
11
+ size_categories: 1M<n<10M
12
  tags:
 
13
  - tabular_classification
14
  - binary_classification
15
  - multiclass_classification
 
 
 
16
  task_categories:
17
  - tabular-classification
 
 
 
18
  ---
19
  # Haberman
20
  The [Haberman dataset](https://archive.ics.uci.edu/ml/datasets/Haberman) from the [UCI ML repository](https://archive.ics.uci.edu/ml/datasets).
haberman.py DELETED
@@ -1,72 +0,0 @@
1
- """Haberman"""
2
-
3
- from typing import List
4
-
5
- import datasets
6
-
7
- import pandas
8
-
9
-
10
- VERSION = datasets.Version("1.0.0")
11
-
12
- DESCRIPTION = "Haberman dataset from the UCI ML repository."
13
- _HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Haberman"
14
- _URLS = ("https://archive.ics.uci.edu/ml/datasets/Haberman")
15
- _CITATION = """
16
- @misc{misc_haberman's_survival_43,
17
- author = {Haberman,S.},
18
- title = {{Haberman's Survival}},
19
- year = {1999},
20
- howpublished = {UCI Machine Learning Repository},
21
- note = {{DOI}: \\url{10.24432/C5XK51}}
22
- }"""
23
-
24
- # Dataset info
25
- urls_per_split = {
26
- "train": "https://huggingface.co/datasets/mstz/haberman/raw/main/haberman.data"
27
- }
28
- features_types_per_config = {
29
- "survival": {
30
- "age": datasets.Value("int32"),
31
- "year_of_operation": datasets.Value("int32"),
32
- "number_of_axillary_nodes": datasets.Value("int32"),
33
- "has_survived_5_years": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
34
- }
35
- }
36
- features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
37
-
38
-
39
- class HabermanConfig(datasets.BuilderConfig):
40
- def __init__(self, **kwargs):
41
- super(HabermanConfig, self).__init__(version=VERSION, **kwargs)
42
- self.features = features_per_config[kwargs["name"]]
43
-
44
-
45
- class Haberman(datasets.GeneratorBasedBuilder):
46
- # dataset versions
47
- DEFAULT_CONFIG = "survival"
48
- BUILDER_CONFIGS = [
49
- HabermanConfig(name="survival",
50
- description="Haberman for binary classification.")
51
- ]
52
-
53
- def _info(self):
54
- info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
55
- features=features_per_config[self.config.name])
56
-
57
- return info
58
-
59
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
60
- downloads = dl_manager.download_and_extract(urls_per_split)
61
-
62
- return [
63
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
64
- ]
65
-
66
- def _generate_examples(self, filepath: str):
67
- data = pandas.read_csv(filepath)
68
-
69
- for row_id, row in data.iterrows():
70
- data_row = dict(row)
71
-
72
- yield row_id, data_row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
haberman.data → survival/train.csv RENAMED
File without changes