sameer505 commited on
Commit
aef20de
·
1 Parent(s): ae06a35

readme and cleaning code

Browse files
Files changed (4) hide show
  1. Lenze_dataset.py +0 -29
  2. README.md +10 -0
  3. data/quick_start_notebook.ipynb +0 -0
  4. notebook.ipynb +9 -164
Lenze_dataset.py CHANGED
@@ -19,43 +19,16 @@ _HOMEPAGE = "https://zenodo.org/records/11162448"
19
 
20
  _LICENSE = ""
21
 
22
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
23
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
24
- # _URLS = {
25
- # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
26
- # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
27
- # }
28
-
29
-
30
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
31
  class LenzeDataset(datasets.GeneratorBasedBuilder):
32
 
33
  VERSION = datasets.Version("1.0.0")
34
 
35
- # This is an example of a dataset with multiple configurations.
36
- # If you don't want/need to define several sub-sets in your dataset,
37
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
38
-
39
- # If you need to make complex sub-parts in the datasets with configurable options
40
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
41
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
42
-
43
- # You will be able to load one or the other configurations in the following list with
44
- # data = datasets.load_dataset('my_dataset', 'first_domain')
45
- # data = datasets.load_dataset('my_dataset', 'second_domain')
46
  folders = os.listdir("data")
47
  BUILDER_CONFIGS = []
48
  for folder in folders:
49
  BUILDER_CONFIGS.append(datasets.BuilderConfig(name=folder, version=VERSION))
50
 
51
-
52
-
53
- # BUILDER_CONFIGS = [
54
- # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
55
- # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
56
- # ]
57
 
58
- # DEFAULT_CONFIG_NAME = folders[0] # It's not mandatory to have a default configuration. Just use one if it make sense.
59
 
60
  def _info(self):
61
  features = datasets.Features(
@@ -121,8 +94,6 @@ class LenzeDataset(datasets.GeneratorBasedBuilder):
121
 
122
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
123
  def _generate_examples(self, data_dir, id_start, id_end):
124
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
125
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
126
 
127
  data_path = Path.Path(data_dir)
128
  meta_path = data_path / self.config.name / "Meta_Data.pickle"
 
19
 
20
  _LICENSE = ""
21
 
 
 
 
 
 
 
 
 
 
22
  class LenzeDataset(datasets.GeneratorBasedBuilder):
23
 
24
  VERSION = datasets.Version("1.0.0")
25
 
 
 
 
 
 
 
 
 
 
 
 
26
  folders = os.listdir("data")
27
  BUILDER_CONFIGS = []
28
  for folder in folders:
29
  BUILDER_CONFIGS.append(datasets.BuilderConfig(name=folder, version=VERSION))
30
 
 
 
 
 
 
 
31
 
 
32
 
33
  def _info(self):
34
  features = datasets.Features(
 
94
 
95
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
96
  def _generate_examples(self, data_dir, id_start, id_end):
 
 
97
 
98
  data_path = Path.Path(data_dir)
99
  meta_path = data_path / self.config.name / "Meta_Data.pickle"
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # overview
2
+ - the purpose of this repository is to load a certain dataset
3
+ - the dataset can be found in https://zenodo.org/records/11162448
4
+ - this repository contains a small sample of data from the original dataset
5
+
6
+ # how to load dataset
7
+ - download the dataset from https://zenodo.org/records/11162448. the folder data should contain this data
8
+ - define train, test and validation splits by redefining *id_start* and *id_end* in the loading script (Lenze_dataset.py)
9
+ - you should define a config_name when calling the function *load_dataset*. the config_name is the name of a folder like H045_I_ccw_withoutLoad
10
+ example: *dataset = load_dataset("Lenze_dataset.py","H045_I_ccw_withoutLoad",trust_remote_code=True)*
data/quick_start_notebook.ipynb ADDED
File without changes
notebook.ipynb CHANGED
@@ -2,181 +2,25 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 27,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
  "import os \n",
11
- "import pathlib as Path"
 
12
  ]
13
  },
14
  {
15
  "cell_type": "code",
16
  "execution_count": null,
17
  "metadata": {},
18
- "outputs": [
19
- {
20
- "data": {
21
- "text/html": [
22
- "<div>\n",
23
- "<style scoped>\n",
24
- " .dataframe tbody tr th:only-of-type {\n",
25
- " vertical-align: middle;\n",
26
- " }\n",
27
- "\n",
28
- " .dataframe tbody tr th {\n",
29
- " vertical-align: top;\n",
30
- " }\n",
31
- "\n",
32
- " .dataframe thead th {\n",
33
- " text-align: right;\n",
34
- " }\n",
35
- "</style>\n",
36
- "<table border=\"1\" class=\"dataframe\">\n",
37
- " <thead>\n",
38
- " <tr style=\"text-align: right;\">\n",
39
- " <th></th>\n",
40
- " <th>Ch1</th>\n",
41
- " <th>Ch2</th>\n",
42
- " <th>Ch3</th>\n",
43
- " <th>Ch4</th>\n",
44
- " <th>Ch5</th>\n",
45
- " <th>Ch6</th>\n",
46
- " <th>Ch7</th>\n",
47
- " <th>Ch8</th>\n",
48
- " </tr>\n",
49
- " </thead>\n",
50
- " <tbody>\n",
51
- " <tr>\n",
52
- " <th>1</th>\n",
53
- " <td>[-111321426.0, -111654495.0, -112009801.0, -11...</td>\n",
54
- " <td>[103018081.0, 103132762.0, 103205819.0, 103230...</td>\n",
55
- " <td>[151607336.0, 151918362.0, 152218710.0, 152309...</td>\n",
56
- " <td>[2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317....</td>\n",
57
- " <td>[351167548.0, 351167548.0, 351167548.0, 351167...</td>\n",
58
- " <td>[-135673176.0, -138442132.0, -140947490.0, -14...</td>\n",
59
- " <td>[9631514.0, 15436964.0, 21242286.0, 27047610.0...</td>\n",
60
- " <td>[126522230.0, 123491662.0, 120329330.0, 116640...</td>\n",
61
- " </tr>\n",
62
- " <tr>\n",
63
- " <th>2</th>\n",
64
- " <td>[-113038606.0, -113189450.0, -113324950.0, -11...</td>\n",
65
- " <td>[103214543.0, 102753899.0, 102391051.0, 102286...</td>\n",
66
- " <td>[152960076.0, 152767040.0, 152628058.0, 152641...</td>\n",
67
- " <td>[2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2317....</td>\n",
68
- " <td>[351167548.0, 351167548.0, 353956162.0, 353956...</td>\n",
69
- " <td>[132660080.0, 129627276.0, 126462612.0, 123034...</td>\n",
70
- " <td>[-132467908.0, -134974806.0, -137349636.0, -13...</td>\n",
71
- " <td>[294276.0, 5828356.0, 11494072.0, 17291554.0, ...</td>\n",
72
- " </tr>\n",
73
- " <tr>\n",
74
- " <th>3</th>\n",
75
- " <td>[-113094566.0, -112948690.0, -112728272.0, -11...</td>\n",
76
- " <td>[102569207.0, 102932501.0, 103309646.0, 103446...</td>\n",
77
- " <td>[152578336.0, 152710768.0, 152799860.0, 152793...</td>\n",
78
- " <td>[2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317....</td>\n",
79
- " <td>[353956162.0, 353956162.0, 351167548.0, 351167...</td>\n",
80
- " <td>[-116289860.0, -120377424.0, -124333256.0, -12...</td>\n",
81
- " <td>[-27047738.0, -20714648.0, -14381556.0, -83123...</td>\n",
82
- " <td>[143914672.0, 141806578.0, 139303064.0, 136536...</td>\n",
83
- " </tr>\n",
84
- " <tr>\n",
85
- " <th>4</th>\n",
86
- " <td>[-111870924.0, -112523505.0, -113160018.0, -11...</td>\n",
87
- " <td>[102843637.0, 102631651.0, 102524970.0, 102459...</td>\n",
88
- " <td>[151882852.0, 152209684.0, 152596548.0, 152794...</td>\n",
89
- " <td>[2315.0, 2315.0, 2315.0, 2315.0, 2315.0, 2315....</td>\n",
90
- " <td>[351167548.0, 351167548.0, 351167548.0, 353956...</td>\n",
91
- " <td>[142813126.0, 144922902.0, 146900818.0, 148615...</td>\n",
92
- " <td>[-26124144.0, -31401826.0, -36811324.0, -42352...</td>\n",
93
- " <td>[-116446684.0, -113152588.0, -109726728.0, -10...</td>\n",
94
- " </tr>\n",
95
- " <tr>\n",
96
- " <th>5</th>\n",
97
- " <td>[-113644003.0, -113838059.0, -114077127.0, -11...</td>\n",
98
- " <td>[102545197.0, 102503508.0, 102137408.0, 102081...</td>\n",
99
- " <td>[152958508.0, 153071616.0, 153006696.0, 153096...</td>\n",
100
- " <td>[2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2316....</td>\n",
101
- " <td>[351167548.0, 351167548.0, 353956162.0, 353956...</td>\n",
102
- " <td>[-147935998.0, -149386470.0, -150441358.0, -15...</td>\n",
103
- " <td>[40109746.0, 45783126.0, 51060680.0, 56602246....</td>\n",
104
- " <td>[107943786.0, 103859108.0, 99642792.0, 9516282...</td>\n",
105
- " </tr>\n",
106
- " </tbody>\n",
107
- "</table>\n",
108
- "</div>"
109
- ],
110
- "text/plain": [
111
- " Ch1 \\\n",
112
- "1 [-111321426.0, -111654495.0, -112009801.0, -11... \n",
113
- "2 [-113038606.0, -113189450.0, -113324950.0, -11... \n",
114
- "3 [-113094566.0, -112948690.0, -112728272.0, -11... \n",
115
- "4 [-111870924.0, -112523505.0, -113160018.0, -11... \n",
116
- "5 [-113644003.0, -113838059.0, -114077127.0, -11... \n",
117
- "\n",
118
- " Ch2 \\\n",
119
- "1 [103018081.0, 103132762.0, 103205819.0, 103230... \n",
120
- "2 [103214543.0, 102753899.0, 102391051.0, 102286... \n",
121
- "3 [102569207.0, 102932501.0, 103309646.0, 103446... \n",
122
- "4 [102843637.0, 102631651.0, 102524970.0, 102459... \n",
123
- "5 [102545197.0, 102503508.0, 102137408.0, 102081... \n",
124
- "\n",
125
- " Ch3 \\\n",
126
- "1 [151607336.0, 151918362.0, 152218710.0, 152309... \n",
127
- "2 [152960076.0, 152767040.0, 152628058.0, 152641... \n",
128
- "3 [152578336.0, 152710768.0, 152799860.0, 152793... \n",
129
- "4 [151882852.0, 152209684.0, 152596548.0, 152794... \n",
130
- "5 [152958508.0, 153071616.0, 153006696.0, 153096... \n",
131
- "\n",
132
- " Ch4 \\\n",
133
- "1 [2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317.... \n",
134
- "2 [2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2317.... \n",
135
- "3 [2317.0, 2317.0, 2317.0, 2317.0, 2317.0, 2317.... \n",
136
- "4 [2315.0, 2315.0, 2315.0, 2315.0, 2315.0, 2315.... \n",
137
- "5 [2316.0, 2316.0, 2316.0, 2316.0, 2316.0, 2316.... \n",
138
- "\n",
139
- " Ch5 \\\n",
140
- "1 [351167548.0, 351167548.0, 351167548.0, 351167... \n",
141
- "2 [351167548.0, 351167548.0, 353956162.0, 353956... \n",
142
- "3 [353956162.0, 353956162.0, 351167548.0, 351167... \n",
143
- "4 [351167548.0, 351167548.0, 351167548.0, 353956... \n",
144
- "5 [351167548.0, 351167548.0, 353956162.0, 353956... \n",
145
- "\n",
146
- " Ch6 \\\n",
147
- "1 [-135673176.0, -138442132.0, -140947490.0, -14... \n",
148
- "2 [132660080.0, 129627276.0, 126462612.0, 123034... \n",
149
- "3 [-116289860.0, -120377424.0, -124333256.0, -12... \n",
150
- "4 [142813126.0, 144922902.0, 146900818.0, 148615... \n",
151
- "5 [-147935998.0, -149386470.0, -150441358.0, -15... \n",
152
- "\n",
153
- " Ch7 \\\n",
154
- "1 [9631514.0, 15436964.0, 21242286.0, 27047610.0... \n",
155
- "2 [-132467908.0, -134974806.0, -137349636.0, -13... \n",
156
- "3 [-27047738.0, -20714648.0, -14381556.0, -83123... \n",
157
- "4 [-26124144.0, -31401826.0, -36811324.0, -42352... \n",
158
- "5 [40109746.0, 45783126.0, 51060680.0, 56602246.... \n",
159
- "\n",
160
- " Ch8 \n",
161
- "1 [126522230.0, 123491662.0, 120329330.0, 116640... \n",
162
- "2 [294276.0, 5828356.0, 11494072.0, 17291554.0, ... \n",
163
- "3 [143914672.0, 141806578.0, 139303064.0, 136536... \n",
164
- "4 [-116446684.0, -113152588.0, -109726728.0, -10... \n",
165
- "5 [107943786.0, 103859108.0, 99642792.0, 9516282... "
166
- ]
167
- },
168
- "execution_count": 33,
169
- "metadata": {},
170
- "output_type": "execute_result"
171
- }
172
- ],
173
  "source": [
174
- "folders = os.listdir(\"data\")\n",
175
- "data_path = Path.Path(\"data\")\n",
176
- "path = data_path / folders[0]\n",
177
- "\n",
178
- "meta_df = pd.read_pickle(path / \"Meta_Data.pickle\") \n",
179
- "signal_df = pd.read_pickle(path / \"Signal_Data.pickle\")"
180
  ]
181
  },
182
  {
@@ -185,6 +29,7 @@
185
  "metadata": {},
186
  "outputs": [],
187
  "source": [
 
188
  "Ch_Mapper = {\"Ch1\": \"Direct Current\",\n",
189
  " \"Ch2\": \"Quadrature Current\",\n",
190
  " \"Ch3\": \"Effective Current\",\n",
@@ -199,7 +44,7 @@
199
  "cell_type": "markdown",
200
  "metadata": {},
201
  "source": [
202
- "creating a small dataset"
203
  ]
204
  },
205
  {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 52,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
  "import pandas as pd\n",
10
  "import os \n",
11
+ "import pathlib as Path\n",
12
+ "from datasets import load_dataset"
13
  ]
14
  },
15
  {
16
  "cell_type": "code",
17
  "execution_count": null,
18
  "metadata": {},
19
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  "source": [
21
+ "# TODO make sure to add config_name (like H045_I_ccw_withoutLoad), which is the name of the folder in data folder\n",
22
+ "dataset = load_dataset(\"Lenze_dataset.py\",\"H045_I_ccw_withoutLoad\",trust_remote_code=True)\n",
23
+ "dataset"
 
 
 
24
  ]
25
  },
26
  {
 
29
  "metadata": {},
30
  "outputs": [],
31
  "source": [
32
+ "# mapping for channels defined in the dataset\n",
33
  "Ch_Mapper = {\"Ch1\": \"Direct Current\",\n",
34
  " \"Ch2\": \"Quadrature Current\",\n",
35
  " \"Ch3\": \"Effective Current\",\n",
 
44
  "cell_type": "markdown",
45
  "metadata": {},
46
  "source": [
47
+ "creating a small dataset out of the original dataset"
48
  ]
49
  },
50
  {