created a functional loading script
Browse files
Lenze_dataset.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""TODO: Add a description here."""
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import pathlib as Path
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import datasets
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
_CITATION = """\
|
| 11 |
+
Wißbrock, P. (2024). Lenze Gearmotor Degradation Dataset (Lenze-GD) (1.0) [Data set]. Lenze SE.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
_DESCRIPTION = """\
|
| 15 |
+
A run-to-failure experiment for geared motors is introduced. A geared motor is installed in healthy condition and operated until it fails. Throughout the experiment, a data acquisition system is active to monitor the signals of all degradation states. In order to complete the experiment in limited time, the geared motors nominal torque is exceeded. The experiment is conducted three times in total and each with multiple operation states during measurement.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
_HOMEPAGE = "https://zenodo.org/records/11162448"
|
| 19 |
+
|
| 20 |
+
_LICENSE = ""
|
| 21 |
+
|
| 22 |
+
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
| 23 |
+
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
| 24 |
+
# _URLS = {
|
| 25 |
+
# "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
|
| 26 |
+
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
| 27 |
+
# }
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
| 31 |
+
class LenzeDataset(datasets.GeneratorBasedBuilder):
|
| 32 |
+
|
| 33 |
+
VERSION = datasets.Version("1.0.0")
|
| 34 |
+
|
| 35 |
+
# This is an example of a dataset with multiple configurations.
|
| 36 |
+
# If you don't want/need to define several sub-sets in your dataset,
|
| 37 |
+
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
| 38 |
+
|
| 39 |
+
# If you need to make complex sub-parts in the datasets with configurable options
|
| 40 |
+
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
| 41 |
+
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
| 42 |
+
|
| 43 |
+
# You will be able to load one or the other configurations in the following list with
|
| 44 |
+
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
| 45 |
+
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
| 46 |
+
folders = os.listdir("data")
|
| 47 |
+
BUILDER_CONFIGS = []
|
| 48 |
+
for folder in folders:
|
| 49 |
+
BUILDER_CONFIGS.append(datasets.BuilderConfig(name=folder, version=VERSION))
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# BUILDER_CONFIGS = [
|
| 54 |
+
# datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
|
| 55 |
+
# datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
|
| 56 |
+
# ]
|
| 57 |
+
|
| 58 |
+
# DEFAULT_CONFIG_NAME = folders[0] # It's not mandatory to have a default configuration. Just use one if it make sense.
|
| 59 |
+
|
| 60 |
+
def _info(self):
|
| 61 |
+
features = datasets.Features(
|
| 62 |
+
{
|
| 63 |
+
"Frequency_Shaft_1": datasets.Value("float64"),
|
| 64 |
+
"Frequency_Shaft_2": datasets.Value("float64"),
|
| 65 |
+
"Frequency_Shaft_3": datasets.Value("float64"),
|
| 66 |
+
"Label": datasets.Value("string"),
|
| 67 |
+
"name": datasets.Value("string"),
|
| 68 |
+
"timestamp": datasets.Value("string"),
|
| 69 |
+
"sr": datasets.Value("float64"),
|
| 70 |
+
"Ch1": datasets.Sequence(datasets.Value("float32")),
|
| 71 |
+
"Ch2": datasets.Sequence(datasets.Value("float32")),
|
| 72 |
+
"Ch3": datasets.Sequence(datasets.Value("float32")),
|
| 73 |
+
"Ch4": datasets.Sequence(datasets.Value("float32")),
|
| 74 |
+
"Ch5": datasets.Sequence(datasets.Value("float32")),
|
| 75 |
+
"Ch6": datasets.Sequence(datasets.Value("float32")),
|
| 76 |
+
"Ch7": datasets.Sequence(datasets.Value("float32")),
|
| 77 |
+
"Ch8": datasets.Sequence(datasets.Value("float32")),
|
| 78 |
+
}
|
| 79 |
+
)
|
| 80 |
+
return datasets.DatasetInfo(
|
| 81 |
+
description=_DESCRIPTION,
|
| 82 |
+
# This defines the different columns of the dataset and their types
|
| 83 |
+
features=features, # Here we define them above because they are different between the two configurations
|
| 84 |
+
homepage=_HOMEPAGE,
|
| 85 |
+
license=_LICENSE,
|
| 86 |
+
citation=_CITATION,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
def _split_generators(self, dl_manager):
|
| 90 |
+
|
| 91 |
+
data_dir = "data"
|
| 92 |
+
return [
|
| 93 |
+
datasets.SplitGenerator(
|
| 94 |
+
name=datasets.Split.TRAIN,
|
| 95 |
+
# These kwargs will be passed to _generate_examples
|
| 96 |
+
gen_kwargs={
|
| 97 |
+
"data_dir": data_dir,
|
| 98 |
+
"id_start": 0,
|
| 99 |
+
"id_end": 3,
|
| 100 |
+
},
|
| 101 |
+
),
|
| 102 |
+
datasets.SplitGenerator(
|
| 103 |
+
name=datasets.Split.VALIDATION,
|
| 104 |
+
# These kwargs will be passed to _generate_examples
|
| 105 |
+
gen_kwargs={
|
| 106 |
+
"data_dir": data_dir,
|
| 107 |
+
"id_start": 3,
|
| 108 |
+
"id_end": 4,
|
| 109 |
+
},
|
| 110 |
+
),
|
| 111 |
+
datasets.SplitGenerator(
|
| 112 |
+
name=datasets.Split.TEST,
|
| 113 |
+
# These kwargs will be passed to _generate_examples
|
| 114 |
+
gen_kwargs={
|
| 115 |
+
"data_dir": data_dir,
|
| 116 |
+
"id_start": 4,
|
| 117 |
+
"id_end": 5,
|
| 118 |
+
},
|
| 119 |
+
),
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
| 123 |
+
def _generate_examples(self, data_dir, id_start, id_end):
|
| 124 |
+
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
| 125 |
+
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
| 126 |
+
|
| 127 |
+
data_path = Path.Path(data_dir)
|
| 128 |
+
meta_path = data_path / self.config.name / "Meta_Data.pickle"
|
| 129 |
+
signal_path = data_path / self.config.name / "Signal_Data.pickle"
|
| 130 |
+
|
| 131 |
+
meta_df=pd.read_pickle(meta_path)
|
| 132 |
+
signal_df=pd.read_pickle(signal_path)
|
| 133 |
+
df = pd.concat([meta_df,signal_df],axis=1)[id_start:id_end]
|
| 134 |
+
|
| 135 |
+
for index,row in df.iterrows():
|
| 136 |
+
yield index,row.to_dict()
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
|
data/B045_I_ccw_withLoad/Meta_Data.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f4f57ed28647644712c78f595ef060ac6f0ecb099c6bfa2613aed4fb02b9b28
|
| 3 |
+
size 1425
|
data/B045_I_ccw_withLoad/Signal_Data.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bcf4ccac554d11514329205d09592c7f715af6cefcf49ec65d46f26eca8297df
|
| 3 |
+
size 10487962
|
data/H045_I_ccw_withoutLoad/Meta_Data.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8c177a1b0b684e84eb09fe60ca38828dbafd7cd189c03bb74ebd9f87242eb21
|
| 3 |
+
size 1425
|
data/H045_I_ccw_withoutLoad/Signal_Data.pickle
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b00f3087aac9b6bd6f2872ae08c8b225949a4cbbda1d2af8de7c22f7d195416a
|
| 3 |
+
size 10487962
|
notebook.ipynb
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 27,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import pandas as pd\n",
|
| 10 |
+
"import os \n",
|
| 11 |
+
"import pathlib as Path"
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"cell_type": "code",
|
| 16 |
+
"execution_count": null,
|
| 17 |
+
"metadata": {},
|
| 18 |
+
"outputs": [
|
| 19 |
+
{
|
| 20 |
+
"data": {
|
| 21 |
+
"text/html": [
|
| 22 |
+
"<div>\n",
|
| 23 |
+
"<style scoped>\n",
|
| 24 |
+
" .dataframe tbody tr th:only-of-type {\n",
|
| 25 |
+
" vertical-align: middle;\n",
|
| 26 |
+
" }\n",
|
| 27 |
+
"\n",
|
| 28 |
+
" .dataframe tbody tr th {\n",
|
| 29 |
+
" vertical-align: top;\n",
|
| 30 |
+
" }\n",
|
| 31 |
+
"\n",
|
| 32 |
+
" .dataframe thead th {\n",
|
| 33 |
+
" text-align: right;\n",
|
| 34 |
+
" }\n",
|
| 35 |
+
"</style>\n",
|
| 36 |
+
"<table border=\"1\" class=\"dataframe\">\n",
|
| 37 |
+
" <thead>\n",
|
| 38 |
+
" <tr style=\"text-align: right;\">\n",
|
| 39 |
+
" <th></th>\n",
|
| 40 |
+
" <th>Ch1</th>\n",
|
| 41 |
+
" <th>Ch2</th>\n",
|
| 42 |
+
" <th>Ch3</th>\n",
|
| 43 |
+
" <th>Ch4</th>\n",
|
| 44 |
+
" <th>Ch5</th>\n",
|
| 45 |
+
" <th>Ch6</th>\n",
|
| 46 |
+
" <th>Ch7</th>\n",
|
| 47 |
+
" <th>Ch8</th>\n",
|
| 48 |
+
" </tr>\n",
|
| 49 |
+
" </thead>\n",
|
| 50 |
+
" <tbody>\n",
|
| 51 |
+
" <tr>\n",
|
| 52 |
+
" <th>1</th>\n",
|
| 53 |
+
" <td>[-111321426.0, -111654495.0, -112009801.0, -11...</td>\n",
|
| 54 |
+
" <td>[103018081.0, 103132762.0, 103205819.0, 103230...</td>\n",
|
| 55 |
+
" <td>[151607336.0, 151918362.0, 152218710.0, 152309...</td>\n",
|
| 56 |
+
" <td>[2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317....</td>\n",
|
| 57 |
+
" <td>[351167548.0, 351167548.0, 351167548.0, 351167...</td>\n",
|
| 58 |
+
" <td>[-135673176.0, -138442132.0, -140947490.0, -14...</td>\n",
|
| 59 |
+
" <td>[9631514.0, 15436964.0, 21242286.0, 27047610.0...</td>\n",
|
| 60 |
+
" <td>[126522230.0, 123491662.0, 120329330.0, 116640...</td>\n",
|
| 61 |
+
" </tr>\n",
|
| 62 |
+
" <tr>\n",
|
| 63 |
+
" <th>2</th>\n",
|
| 64 |
+
" <td>[-113038606.0, -113189450.0, -113324950.0, -11...</td>\n",
|
| 65 |
+
" <td>[103214543.0, 102753899.0, 102391051.0, 102286...</td>\n",
|
| 66 |
+
" <td>[152960076.0, 152767040.0, 152628058.0, 152641...</td>\n",
|
| 67 |
+
" <td>[2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2317....</td>\n",
|
| 68 |
+
" <td>[351167548.0, 351167548.0, 353956162.0, 353956...</td>\n",
|
| 69 |
+
" <td>[132660080.0, 129627276.0, 126462612.0, 123034...</td>\n",
|
| 70 |
+
" <td>[-132467908.0, -134974806.0, -137349636.0, -13...</td>\n",
|
| 71 |
+
" <td>[294276.0, 5828356.0, 11494072.0, 17291554.0, ...</td>\n",
|
| 72 |
+
" </tr>\n",
|
| 73 |
+
" <tr>\n",
|
| 74 |
+
" <th>3</th>\n",
|
| 75 |
+
" <td>[-113094566.0, -112948690.0, -112728272.0, -11...</td>\n",
|
| 76 |
+
" <td>[102569207.0, 102932501.0, 103309646.0, 103446...</td>\n",
|
| 77 |
+
" <td>[152578336.0, 152710768.0, 152799860.0, 152793...</td>\n",
|
| 78 |
+
" <td>[2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317....</td>\n",
|
| 79 |
+
" <td>[353956162.0, 353956162.0, 351167548.0, 351167...</td>\n",
|
| 80 |
+
" <td>[-116289860.0, -120377424.0, -124333256.0, -12...</td>\n",
|
| 81 |
+
" <td>[-27047738.0, -20714648.0, -14381556.0, -83123...</td>\n",
|
| 82 |
+
" <td>[143914672.0, 141806578.0, 139303064.0, 136536...</td>\n",
|
| 83 |
+
" </tr>\n",
|
| 84 |
+
" <tr>\n",
|
| 85 |
+
" <th>4</th>\n",
|
| 86 |
+
" <td>[-111870924.0, -112523505.0, -113160018.0, -11...</td>\n",
|
| 87 |
+
" <td>[102843637.0, 102631651.0, 102524970.0, 102459...</td>\n",
|
| 88 |
+
" <td>[151882852.0, 152209684.0, 152596548.0, 152794...</td>\n",
|
| 89 |
+
" <td>[2315.0, 2315.0, 2315.0, 2315.0, 2315.0, 2315....</td>\n",
|
| 90 |
+
" <td>[351167548.0, 351167548.0, 351167548.0, 353956...</td>\n",
|
| 91 |
+
" <td>[142813126.0, 144922902.0, 146900818.0, 148615...</td>\n",
|
| 92 |
+
" <td>[-26124144.0, -31401826.0, -36811324.0, -42352...</td>\n",
|
| 93 |
+
" <td>[-116446684.0, -113152588.0, -109726728.0, -10...</td>\n",
|
| 94 |
+
" </tr>\n",
|
| 95 |
+
" <tr>\n",
|
| 96 |
+
" <th>5</th>\n",
|
| 97 |
+
" <td>[-113644003.0, -113838059.0, -114077127.0, -11...</td>\n",
|
| 98 |
+
" <td>[102545197.0, 102503508.0, 102137408.0, 102081...</td>\n",
|
| 99 |
+
" <td>[152958508.0, 153071616.0, 153006696.0, 153096...</td>\n",
|
| 100 |
+
" <td>[2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2316....</td>\n",
|
| 101 |
+
" <td>[351167548.0, 351167548.0, 353956162.0, 353956...</td>\n",
|
| 102 |
+
" <td>[-147935998.0, -149386470.0, -150441358.0, -15...</td>\n",
|
| 103 |
+
" <td>[40109746.0, 45783126.0, 51060680.0, 56602246....</td>\n",
|
| 104 |
+
" <td>[107943786.0, 103859108.0, 99642792.0, 9516282...</td>\n",
|
| 105 |
+
" </tr>\n",
|
| 106 |
+
" </tbody>\n",
|
| 107 |
+
"</table>\n",
|
| 108 |
+
"</div>"
|
| 109 |
+
],
|
| 110 |
+
"text/plain": [
|
| 111 |
+
" Ch1 \\\n",
|
| 112 |
+
"1 [-111321426.0, -111654495.0, -112009801.0, -11... \n",
|
| 113 |
+
"2 [-113038606.0, -113189450.0, -113324950.0, -11... \n",
|
| 114 |
+
"3 [-113094566.0, -112948690.0, -112728272.0, -11... \n",
|
| 115 |
+
"4 [-111870924.0, -112523505.0, -113160018.0, -11... \n",
|
| 116 |
+
"5 [-113644003.0, -113838059.0, -114077127.0, -11... \n",
|
| 117 |
+
"\n",
|
| 118 |
+
" Ch2 \\\n",
|
| 119 |
+
"1 [103018081.0, 103132762.0, 103205819.0, 103230... \n",
|
| 120 |
+
"2 [103214543.0, 102753899.0, 102391051.0, 102286... \n",
|
| 121 |
+
"3 [102569207.0, 102932501.0, 103309646.0, 103446... \n",
|
| 122 |
+
"4 [102843637.0, 102631651.0, 102524970.0, 102459... \n",
|
| 123 |
+
"5 [102545197.0, 102503508.0, 102137408.0, 102081... \n",
|
| 124 |
+
"\n",
|
| 125 |
+
" Ch3 \\\n",
|
| 126 |
+
"1 [151607336.0, 151918362.0, 152218710.0, 152309... \n",
|
| 127 |
+
"2 [152960076.0, 152767040.0, 152628058.0, 152641... \n",
|
| 128 |
+
"3 [152578336.0, 152710768.0, 152799860.0, 152793... \n",
|
| 129 |
+
"4 [151882852.0, 152209684.0, 152596548.0, 152794... \n",
|
| 130 |
+
"5 [152958508.0, 153071616.0, 153006696.0, 153096... \n",
|
| 131 |
+
"\n",
|
| 132 |
+
" Ch4 \\\n",
|
| 133 |
+
"1 [2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317.... \n",
|
| 134 |
+
"2 [2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2317.... \n",
|
| 135 |
+
"3 [2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317.... \n",
|
| 136 |
+
"4 [2315.0, 2315.0, 2315.0, 2315.0, 2315.0, 2315.... \n",
|
| 137 |
+
"5 [2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2316.... \n",
|
| 138 |
+
"\n",
|
| 139 |
+
" Ch5 \\\n",
|
| 140 |
+
"1 [351167548.0, 351167548.0, 351167548.0, 351167... \n",
|
| 141 |
+
"2 [351167548.0, 351167548.0, 353956162.0, 353956... \n",
|
| 142 |
+
"3 [353956162.0, 353956162.0, 351167548.0, 351167... \n",
|
| 143 |
+
"4 [351167548.0, 351167548.0, 351167548.0, 353956... \n",
|
| 144 |
+
"5 [351167548.0, 351167548.0, 353956162.0, 353956... \n",
|
| 145 |
+
"\n",
|
| 146 |
+
" Ch6 \\\n",
|
| 147 |
+
"1 [-135673176.0, -138442132.0, -140947490.0, -14... \n",
|
| 148 |
+
"2 [132660080.0, 129627276.0, 126462612.0, 123034... \n",
|
| 149 |
+
"3 [-116289860.0, -120377424.0, -124333256.0, -12... \n",
|
| 150 |
+
"4 [142813126.0, 144922902.0, 146900818.0, 148615... \n",
|
| 151 |
+
"5 [-147935998.0, -149386470.0, -150441358.0, -15... \n",
|
| 152 |
+
"\n",
|
| 153 |
+
" Ch7 \\\n",
|
| 154 |
+
"1 [9631514.0, 15436964.0, 21242286.0, 27047610.0... \n",
|
| 155 |
+
"2 [-132467908.0, -134974806.0, -137349636.0, -13... \n",
|
| 156 |
+
"3 [-27047738.0, -20714648.0, -14381556.0, -83123... \n",
|
| 157 |
+
"4 [-26124144.0, -31401826.0, -36811324.0, -42352... \n",
|
| 158 |
+
"5 [40109746.0, 45783126.0, 51060680.0, 56602246.... \n",
|
| 159 |
+
"\n",
|
| 160 |
+
" Ch8 \n",
|
| 161 |
+
"1 [126522230.0, 123491662.0, 120329330.0, 116640... \n",
|
| 162 |
+
"2 [294276.0, 5828356.0, 11494072.0, 17291554.0, ... \n",
|
| 163 |
+
"3 [143914672.0, 141806578.0, 139303064.0, 136536... \n",
|
| 164 |
+
"4 [-116446684.0, -113152588.0, -109726728.0, -10... \n",
|
| 165 |
+
"5 [107943786.0, 103859108.0, 99642792.0, 9516282... "
|
| 166 |
+
]
|
| 167 |
+
},
|
| 168 |
+
"execution_count": 33,
|
| 169 |
+
"metadata": {},
|
| 170 |
+
"output_type": "execute_result"
|
| 171 |
+
}
|
| 172 |
+
],
|
| 173 |
+
"source": [
|
| 174 |
+
"folders = os.listdir(\"data\")\n",
|
| 175 |
+
"data_path = Path.Path(\"data\")\n",
|
| 176 |
+
"path = data_path / folders[0]\n",
|
| 177 |
+
"\n",
|
| 178 |
+
"meta_df = pd.read_pickle(path / \"Meta_Data.pickle\") \n",
|
| 179 |
+
"signal_df = pd.read_pickle(path / \"Signal_Data.pickle\")"
|
| 180 |
+
]
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"cell_type": "code",
|
| 184 |
+
"execution_count": null,
|
| 185 |
+
"metadata": {},
|
| 186 |
+
"outputs": [],
|
| 187 |
+
"source": [
|
| 188 |
+
"Ch_Mapper = {\"Ch1\": \"Direct Current\",\n",
|
| 189 |
+
" \"Ch2\": \"Quadrature Current\",\n",
|
| 190 |
+
" \"Ch3\": \"Effective Current\",\n",
|
| 191 |
+
" \"Ch4\": \"Effective Voltage\",\n",
|
| 192 |
+
" \"Ch5\": \"Quadrature Voltage\",\n",
|
| 193 |
+
" \"Ch6\": \"Phase Current U\",\n",
|
| 194 |
+
" \"Ch7\": \"Phase Current V\",\n",
|
| 195 |
+
" \"Ch8\": \"Phase Current W\",}"
|
| 196 |
+
]
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"cell_type": "markdown",
|
| 200 |
+
"metadata": {},
|
| 201 |
+
"source": [
|
| 202 |
+
"creating a small dataset"
|
| 203 |
+
]
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"cell_type": "code",
|
| 207 |
+
"execution_count": null,
|
| 208 |
+
"metadata": {},
|
| 209 |
+
"outputs": [
|
| 210 |
+
{
|
| 211 |
+
"name": "stdout",
|
| 212 |
+
"output_type": "stream",
|
| 213 |
+
"text": [
|
| 214 |
+
"c:\\Arbeit\\bei Jonas\\Aufgaben Okt Nov\\Lenze_dataset\\Lenze_dataset\n"
|
| 215 |
+
]
|
| 216 |
+
}
|
| 217 |
+
],
|
| 218 |
+
"source": [
|
| 219 |
+
"# file_path = r\"C:\\Arbeit\\bei Jonas\\Aufgaben Okt Nov\\Lenze_dataset\\Lenze-GD\\B045_I_ccw_withLoad\"\n",
|
| 220 |
+
"# signal_df = pd.read_pickle(file_path+ r'\\Signal_Data.pickle')\n",
|
| 221 |
+
"# meta_df = pd.read_pickle(file_path + r'\\Meta_Data.pickle')\n",
|
| 222 |
+
"\n",
|
| 223 |
+
"# file_path = r\"C:\\Arbeit\\bei Jonas\\Aufgaben Okt Nov\\Lenze_dataset\\Lenze-GD\\B045_I_ccw_withLoad\"\n",
|
| 224 |
+
"# signal_df = pd.read_pickle(file_path+ r'\\Signal_Data.pickle')\n",
|
| 225 |
+
"# meta_df = pd.read_pickle(file_path + r'\\Meta_Data.pickle')\n",
|
| 226 |
+
"\n",
|
| 227 |
+
"# import os\n",
|
| 228 |
+
"# mini_signal_df = signal_df[0:5]\n",
|
| 229 |
+
"# mini_meta_df = meta_df[0:5]\n",
|
| 230 |
+
"\n",
|
| 231 |
+
"# mini_signal_df.to_pickle(\"data/B045_I_ccw_withLoad/Signal_Data.pickle\")\n",
|
| 232 |
+
"# mini_meta_df.to_pickle(\"data/B045_I_ccw_withLoad/Meta_Data.pickle\")\n",
|
| 233 |
+
"\n",
|
| 234 |
+
"# file_path = r\"C:\\Arbeit\\bei Jonas\\Aufgaben Okt Nov\\Lenze_dataset\\Lenze-GD\\H045_I_ccw_withoutLoad\"\n",
|
| 235 |
+
"# signal_df = pd.read_pickle(file_path+ r'\\Signal_Data.pickle')\n",
|
| 236 |
+
"# meta_df = pd.read_pickle(file_path + r'\\Meta_Data.pickle')\n",
|
| 237 |
+
"\n",
|
| 238 |
+
"# import os\n",
|
| 239 |
+
"# mini_signal_df = signal_df[0:5]\n",
|
| 240 |
+
"# mini_meta_df = meta_df[0:5]\n",
|
| 241 |
+
"\n",
|
| 242 |
+
"# mini_signal_df.to_pickle( \"data/H045_I_ccw_withoutLoad/Signal_Data.pickle\")\n",
|
| 243 |
+
"# mini_meta_df.to_pickle(\"data/H045_I_ccw_withoutLoad/Meta_Data.pickle\")"
|
| 244 |
+
]
|
| 245 |
+
}
|
| 246 |
+
],
|
| 247 |
+
"metadata": {
|
| 248 |
+
"kernelspec": {
|
| 249 |
+
"display_name": "base",
|
| 250 |
+
"language": "python",
|
| 251 |
+
"name": "python3"
|
| 252 |
+
},
|
| 253 |
+
"language_info": {
|
| 254 |
+
"codemirror_mode": {
|
| 255 |
+
"name": "ipython",
|
| 256 |
+
"version": 3
|
| 257 |
+
},
|
| 258 |
+
"file_extension": ".py",
|
| 259 |
+
"mimetype": "text/x-python",
|
| 260 |
+
"name": "python",
|
| 261 |
+
"nbconvert_exporter": "python",
|
| 262 |
+
"pygments_lexer": "ipython3",
|
| 263 |
+
"version": "3.12.2"
|
| 264 |
+
}
|
| 265 |
+
},
|
| 266 |
+
"nbformat": 4,
|
| 267 |
+
"nbformat_minor": 2
|
| 268 |
+
}
|