File size: 3,411 Bytes
ae06a35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
"""TODO: Add a description here."""


import os
import pathlib as Path
import pandas as pd
import datasets


_CITATION = """\
Wißbrock, P. (2024). Lenze Gearmotor Degradation Dataset (Lenze-GD) (1.0) [Data set]. Lenze SE.
"""

_DESCRIPTION = """\
A run-to-failure experiment for geared motors is introduced. A geared motor is installed in healthy condition and operated until it fails. Throughout the experiment, a data acquisition system is active to monitor the signals of all degradation states. In order to complete the experiment in limited time, the geared motors nominal torque is exceeded. The experiment is conducted three times in total and each with multiple operation states during measurement.
"""

_HOMEPAGE = "https://zenodo.org/records/11162448"

_LICENSE = ""

class LenzeDataset(datasets.GeneratorBasedBuilder):

	VERSION = datasets.Version("1.0.0")

	folders = os.listdir("data") 
	BUILDER_CONFIGS = []
	for folder in folders:
		BUILDER_CONFIGS.append(datasets.BuilderConfig(name=folder, version=VERSION))



	def _info(self):
		features = datasets.Features(
			{
				"Frequency_Shaft_1": datasets.Value("float64"),
				"Frequency_Shaft_2": datasets.Value("float64"),
				"Frequency_Shaft_3": datasets.Value("float64"),
				"Label": datasets.Value("string"),
				"name": datasets.Value("string"),
				"timestamp": datasets.Value("string"),
				"sr": datasets.Value("float64"),
				"Ch1": datasets.Sequence(datasets.Value("float32")),
				"Ch2": datasets.Sequence(datasets.Value("float32")),
				"Ch3": datasets.Sequence(datasets.Value("float32")),
				"Ch4": datasets.Sequence(datasets.Value("float32")),
				"Ch5": datasets.Sequence(datasets.Value("float32")),
				"Ch6": datasets.Sequence(datasets.Value("float32")),
				"Ch7": datasets.Sequence(datasets.Value("float32")),
				"Ch8": datasets.Sequence(datasets.Value("float32")),
			}
		)
		return datasets.DatasetInfo(
			description=_DESCRIPTION,
			# This defines the different columns of the dataset and their types
			features=features,  # Here we define them above because they are different between the two configurations
			homepage=_HOMEPAGE,
			license=_LICENSE,
			citation=_CITATION,
		)

	def _split_generators(self, dl_manager):

		data_dir = "data"	
		return [
			datasets.SplitGenerator(
				name=datasets.Split.TRAIN,
				# These kwargs will be passed to _generate_examples
				gen_kwargs={
					"data_dir": data_dir,
					"id_start": 0,
					"id_end": 3,
				},
			),
			datasets.SplitGenerator(
				name=datasets.Split.VALIDATION,
				# These kwargs will be passed to _generate_examples
				gen_kwargs={
					"data_dir": data_dir,
					"id_start": 3,
					"id_end": 4,
				},
			),
			datasets.SplitGenerator(
				name=datasets.Split.TEST,
				# These kwargs will be passed to _generate_examples
				gen_kwargs={
					"data_dir": data_dir,
					"id_start": 4,
					"id_end": 5,
				},
			),
		]

	# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
	def _generate_examples(self, data_dir, id_start, id_end):

		data_path = Path.Path(data_dir)	
		meta_path = data_path / self.config.name / "Meta_Data.pickle"
		signal_path = data_path / self.config.name / "Signal_Data.pickle"	

		meta_df=pd.read_pickle(meta_path)
		signal_df=pd.read_pickle(signal_path)
		df = pd.concat([meta_df,signal_df],axis=1)[id_start:id_end]
		
		for index,row in df.iterrows():
			yield index,row.to_dict()