sameer505 commited on
Commit
49ba146
·
1 Parent(s): eaa1b1b

notebook and _info

Browse files
milling_processes_LUH__testing_propuses.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """this is loading script for milling processes files"""
2
+
3
+ import csv
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+ # TODO: Add BibTeX citation
10
+ # Find for instance the citation on arxiv or on the dataset repo/website
11
+ _CITATION = """\
12
+ @InProceedings{huggingface:dataset,
13
+ title = {Multivariate time series data of milling processes with varying tool wear and machine tools},
14
+ author={Tobias Stiehl},
15
+ year={2023}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ """
21
+
22
+ _HOMEPAGE = "https://data.mendeley.com/datasets/zpxs87bjt8/3"
23
+
24
+ _LICENSE = "CC BY 4.0"
25
+
26
+ # TODO: Add link to the official dataset URLs here
27
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
28
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
29
+ _URLS = {
30
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
31
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
32
+ }
33
+
34
+
35
+ class MillingProcessesLUH(datasets.GeneratorBasedBuilder):
36
+ """TODO: Short description of my dataset."""
37
+
38
+ VERSION = datasets.Version("3")
39
+
40
+ # This is an example of a dataset with multiple configurations.
41
+ # If you don't want/need to define several sub-sets in your dataset,
42
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
43
+
44
+ # If you need to make complex sub-parts in the datasets with configurable options
45
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
46
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
47
+
48
+ # You will be able to load one or the other configurations in the following list with
49
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
50
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
51
+ BUILDER_CONFIGS = [
52
+
53
+ ]
54
+
55
+ DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
56
+
57
+ def _info(self):
58
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
59
+ features = datasets.Features(
60
+ {
61
+ "cumulated_tool_contact_time":datasets.features.Value("float32"), #float64
62
+ "machine":datasets.features.Value("float32"),#float64
63
+ "run":datasets.features.Value("float32"),#float64
64
+ "tool":datasets.features.Value("float32"),#float64
65
+ "wear":datasets.features.Value("float32"),#float64
66
+ "position_control_deviation_axis_x":datasets.Sequence(datasets.Value("float32")), #object
67
+ "position_control_deviation_axis_y":datasets.Sequence(datasets.Value("float32")), #object
68
+ "time_machine":datasets.Sequence(datasets.Value("float32")), #object
69
+ "tool_position_x":datasets.Sequence(datasets.Value("float32")),#object
70
+ "tool_position_y":datasets.Sequence(datasets.Value("float32")), #object
71
+ "tool_position_z":datasets.Sequence(datasets.Value("float32")), # object
72
+ "torque_axis_x":datasets.Sequence(datasets.Value("float32")), #object
73
+ "torque_axis_y":datasets.Sequence(datasets.Value("float32")), #object
74
+ "torque_axis_z":datasets.Sequence(datasets.Value("float32")), #object
75
+ "torque_spindle":datasets.Sequence(datasets.Value("float32")), #object
76
+ "force_sensor_x":datasets.Sequence(datasets.Value("float32")), #object
77
+ "force_sensor_y":datasets.Sequence(datasets.Value("float32")), #object
78
+ "force_sensor_z":datasets.Sequence(datasets.Value("float32")), #object
79
+ "time_sensor":datasets.Sequence(datasets.Value("float32")) #object
80
+ }
81
+ )
82
+
83
+ return datasets.DatasetInfo(
84
+ # This is the description that will appear on the datasets page.
85
+ description=_DESCRIPTION,
86
+ # This defines the different columns of the dataset and their types
87
+ features=features, # Here we define them above because they are different between the two configurations
88
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
89
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
90
+ # supervised_keys=("sentence", "label"),
91
+ # Homepage of the dataset for documentation
92
+ homepage=_HOMEPAGE,
93
+ # License for the dataset if available
94
+ license=_LICENSE,
95
+ # Citation for the dataset
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
101
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
102
+
103
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
104
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
105
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
106
+ urls = _URLS[self.config.name]
107
+ data_dir = dl_manager.download_and_extract(urls)
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ # These kwargs will be passed to _generate_examples
112
+ gen_kwargs={
113
+ "filepath": os.path.join(data_dir, "train.jsonl"),
114
+ "split": "train",
115
+ },
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.VALIDATION,
119
+ # These kwargs will be passed to _generate_examples
120
+ gen_kwargs={
121
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
122
+ "split": "dev",
123
+ },
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.TEST,
127
+ # These kwargs will be passed to _generate_examples
128
+ gen_kwargs={
129
+ "filepath": os.path.join(data_dir, "test.jsonl"),
130
+ "split": "test"
131
+ },
132
+ ),
133
+ ]
134
+
135
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
136
+ def _generate_examples(self, filepath, split):
137
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
138
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
139
+ with open(filepath, encoding="utf-8") as f:
140
+ for key, row in enumerate(f):
141
+ data = json.loads(row)
142
+ if self.config.name == "first_domain":
143
+ # Yields examples as (key, example) tuples
144
+ yield key, {
145
+ "sentence": data["sentence"],
146
+ "option1": data["option1"],
147
+ "answer": "" if split == "test" else data["answer"],
148
+ }
149
+ else:
150
+ yield key, {
151
+ "sentence": data["sentence"],
152
+ "option2": data["option2"],
153
+ "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
154
+ }
notebook.ipynb ADDED
File without changes