Spaces:
Build error
Build error
Upload 29 files
Browse files- 01_combining_dataframes_pipeline.py +20 -0
- 02_feature_extraction_pipeline.py +27 -0
- 03_training_model_pipeline.py +21 -0
- 04_analyzing_data_pipeline.py +24 -0
- 05_complete_trainmodel_pipeline.py +30 -0
- LICENSE +21 -0
- RawAccelerometerData.csv +0 -0
- Selfreports.csv +331 -0
- _config.py +57 -0
- pipeline_classes/__init__.py +10 -0
- pipeline_classes/__pycache__/__init__.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/classify_movementdata.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/create_combineddataframe.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/extract_features.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/import_data.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/lowpassfilter.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/pcahandler.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/scale_xyzdata.cpython-313.pyc +0 -0
- pipeline_classes/__pycache__/train_model.cpython-313.pyc +0 -0
- pipeline_classes/classify_movementdata.py +35 -0
- pipeline_classes/create_combineddataframe.py +83 -0
- pipeline_classes/extract_features.py +272 -0
- pipeline_classes/import_data.py +51 -0
- pipeline_classes/lowpassfilter.py +55 -0
- pipeline_classes/pcahandler.py +24 -0
- pipeline_classes/scale_xyzdata.py +28 -0
- pipeline_classes/train_model.py +179 -0
- requirements.txt +8 -0
- single_participant_positive_high.csv +0 -0
01_combining_dataframes_pipeline.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.pipeline import Pipeline
|
| 2 |
+
from pipeline_classes import ImportData, CreateCombinedDataFrame
|
| 3 |
+
from _config import config
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
# This pipeline combines the self-reports and accelerometer dataframes with a given timewindow into a single dataframe as a csv file
|
| 7 |
+
combining_dataframes_pipeline = Pipeline([
|
| 8 |
+
('import_data', ImportData(use_accel=True, use_reports=True, use_combined=False, use_features=False)), # input path to self-reports data),
|
| 9 |
+
('create_combined_dataframe', CreateCombinedDataFrame(time_window=config["time_window"], label_columns=config["label_columns"])),
|
| 10 |
+
])
|
| 11 |
+
|
| 12 |
+
# This will measure the time taken to run the pipeline
|
| 13 |
+
start_time = time.time()
|
| 14 |
+
|
| 15 |
+
# This will start the pipeline and return the combined dataframe
|
| 16 |
+
output_df = combining_dataframes_pipeline.fit_transform(None)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
end_time = time.time()
|
| 20 |
+
print(f"Time taken: {int((end_time - start_time) // 60)} minutes and {(end_time - start_time) % 60:.2f} seconds")
|
02_feature_extraction_pipeline.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.pipeline import Pipeline
|
| 2 |
+
from pipeline_classes import ImportData, LowPassFilter, ScaleXYZData, ExtractFeatures
|
| 3 |
+
from _config import config
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
# This pipeline extracts features from the combined dataframe and exports it to a csv file
|
| 7 |
+
feature_extraction_pipeline = Pipeline([
|
| 8 |
+
('import_data', ImportData(use_accel=False, use_reports=False, use_combined=True, use_features=False)), # input path to combined data
|
| 9 |
+
('low_pass_filter', LowPassFilter(cutoff_frequency=config["cutoff_frequency"], sampling_rate=config["data_frequency"], order=config["order"])),
|
| 10 |
+
('scale_xyz_data', ScaleXYZData(scaler_type=config["scaler_type"])),
|
| 11 |
+
('extract_features', ExtractFeatures(window_length=config["window_length"],
|
| 12 |
+
window_step_size=config["window_step_size"],
|
| 13 |
+
data_frequency=config["data_frequency"],
|
| 14 |
+
selected_domains=config["selected_domains"],
|
| 15 |
+
include_magnitude=config["include_magnitude"],
|
| 16 |
+
label_columns=config["label_columns"])),
|
| 17 |
+
])
|
| 18 |
+
|
| 19 |
+
# This will measure the time taken to run the pipeline
|
| 20 |
+
start_time = time.time()
|
| 21 |
+
|
| 22 |
+
# This will start the pipeline and return the feature dataframe
|
| 23 |
+
output_df = feature_extraction_pipeline.fit_transform(None)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
end_time = time.time()
|
| 27 |
+
print(f"Time taken: {int((end_time - start_time) // 60)} minutes and {(end_time - start_time) % 60:.2f} seconds")
|
03_training_model_pipeline.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.pipeline import Pipeline
|
| 2 |
+
from pipeline_classes import ImportData, PCAHandler, TrainModel
|
| 3 |
+
from _config import config
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
# This pipeline trains a model on the feature dataframe and export the model to a pickle file and general information to a json file
|
| 7 |
+
training_model_pipeline = Pipeline([
|
| 8 |
+
('import_data', ImportData(use_accel=False, use_reports=False, use_combined=False, use_features=True)),
|
| 9 |
+
('pca_handler', PCAHandler(apply_pca=config["apply_pca"], variance=config["pca_variance"])),
|
| 10 |
+
('train_model', TrainModel(config=config)),
|
| 11 |
+
])
|
| 12 |
+
|
| 13 |
+
# This will measure the time taken to run the pipeline
|
| 14 |
+
start_time = time.time()
|
| 15 |
+
|
| 16 |
+
# This will start the pipeline and return the model and a report
|
| 17 |
+
output_df = training_model_pipeline.fit_transform(None)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
end_time = time.time()
|
| 21 |
+
print(f"Time taken: {int((end_time - start_time) // 60)} minutes and {(end_time - start_time) % 60:.2f} seconds")
|
04_analyzing_data_pipeline.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sklearn.pipeline import Pipeline
|
| 2 |
+
from pipeline_classes import ImportData, LowPassFilter, ScaleXYZData, ExtractFeatures, ClassifyMovementData
|
| 3 |
+
from _config import config
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
# This is the pipeline that will be used to analyze data which hasnt been classified yet and export the classified dataframe as a csv file
|
| 7 |
+
analyzing_data_pipeline = Pipeline([
|
| 8 |
+
('import_data', ImportData(use_accel=True, use_reports=False, use_combined=False, use_features=False)), # input path to accelerometer data)
|
| 9 |
+
('low_pass_filter', LowPassFilter(cutoff_frequency=config["cutoff_frequency"], sampling_rate=config["data_frequency"], order=config["order"])),
|
| 10 |
+
('scale_xyz_data', ScaleXYZData(scaler_type=config["scaler_type"])),
|
| 11 |
+
('extract_features', ExtractFeatures(window_length=config['window_length'], window_step_size=config["window_step_size"], data_frequency=config["data_frequency"],
|
| 12 |
+
selected_domains=config['selected_domains'], include_magnitude=config['include_magnitude'])),
|
| 13 |
+
('classify_movement_data', ClassifyMovementData()),
|
| 14 |
+
])
|
| 15 |
+
|
| 16 |
+
# This will measure the time taken to run the pipeline
|
| 17 |
+
start_time = time.time()
|
| 18 |
+
|
| 19 |
+
# This will start the pipeline and return the classified dataframe
|
| 20 |
+
output_df = analyzing_data_pipeline.fit_transform(None)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
end_time = time.time()
|
| 24 |
+
print(f"Time taken: {int((end_time - start_time) // 60)} minutes and {(end_time - start_time) % 60:.2f} seconds")
|
05_complete_trainmodel_pipeline.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pipeline_classes import ImportData, LowPassFilter, ScaleXYZData, ExtractFeatures, CreateCombinedDataFrame, TrainModel, PCAHandler
|
| 2 |
+
from _config import config
|
| 3 |
+
from sklearn.pipeline import Pipeline
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
# This is the complete pipeline that will be used to train a model on the combined dataframe and export the model to a pickle file and general information to a json file
|
| 7 |
+
complete_training_model_pipeline = Pipeline([
|
| 8 |
+
('import_data', ImportData(use_accel=True, use_reports=True, use_combined=False, use_features=False)),
|
| 9 |
+
('create_combined_dataframe', CreateCombinedDataFrame(time_window=config["time_window"], label_columns=config["label_columns"])),
|
| 10 |
+
('low_pass_filter', LowPassFilter(cutoff_frequency=config["cutoff_frequency"], sampling_rate=config["data_frequency"], order=config["order"])),
|
| 11 |
+
('scale_xyz_data', ScaleXYZData(scaler_type=config["scaler_type"])),
|
| 12 |
+
('extract_features', ExtractFeatures(window_length=config["window_length"],
|
| 13 |
+
window_step_size=config["window_step_size"],
|
| 14 |
+
data_frequency=config["data_frequency"],
|
| 15 |
+
selected_domains=config["selected_domains"],
|
| 16 |
+
include_magnitude=config["include_magnitude"],
|
| 17 |
+
label_columns=config["label_columns"])),
|
| 18 |
+
('pca_handler', PCAHandler(apply_pca=config["apply_pca"], variance=config["pca_variance"])),
|
| 19 |
+
('train_model', TrainModel(config=config)),
|
| 20 |
+
])
|
| 21 |
+
|
| 22 |
+
# This will measure the time taken to run the pipeline
|
| 23 |
+
start_time = time.time()
|
| 24 |
+
|
| 25 |
+
# This will start the pipeline and return the model and a report
|
| 26 |
+
output_df = complete_training_model_pipeline.fit_transform(None)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
end_time = time.time()
|
| 30 |
+
print(f"Time taken: {int((end_time - start_time) // 60)} minutes and {(end_time - start_time) % 60:.2f} seconds")
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 mininato
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RawAccelerometerData.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Selfreports.csv
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
id,participantId,timeOfNotification,timeOfEngagement,timeOfContext,timeOfValence,timeOfArousal,valence,arousal,context
|
| 2 |
+
1,1,459163,464533,497941,481778,491757,NEUTRAL,LOW,VIEW_BOOTH
|
| 3 |
+
2,1,1116342,1123641,1147370,1137105,1142685,NEUTRAL,MEDIUM,CONVERSATION
|
| 4 |
+
5,1,3075708,3082136,3115554,3105558,3109606,NEUTRAL,LOW,WALKING
|
| 5 |
+
6,1,3562201,3582250,3603151,3593212,3597877,NEUTRAL,LOW,VIEW_BOOTH
|
| 6 |
+
9,2,364259,373046,397051,380931,386833,NEUTRAL,MEDIUM,WALKING
|
| 7 |
+
10,2,970402,983704,1000405,992294,996226,NEUTRAL,MEDIUM,CONVERSATION
|
| 8 |
+
11,2,1610269,1626103,1644209,1634845,1639029,NEUTRAL,MEDIUM,WALKING
|
| 9 |
+
15,3,418630,438014,474684,463238,467987,NEGATIVE,LOW,CONVERSATION
|
| 10 |
+
16,3,1011885,1031630,1048935,1039714,1043750,NEGATIVE,LOW,CONVERSATION
|
| 11 |
+
17,3,1764167,1768115,1794167,1777001,1790185,NEUTRAL,HIGH,CONVERSATION
|
| 12 |
+
20,3,3518972,3522724,3555492,3541222,3551571,NEUTRAL,HIGH,CONVERSATION
|
| 13 |
+
22,3,4852881,4929930,4974627,4963038,4970708,NEGATIVE,HIGH,CONVERSATION
|
| 14 |
+
23,3,5685790,5689597,5717615,5708431,5713871,NEGATIVE,HIGH,CONVERSATION
|
| 15 |
+
24,3,6169580,6191219,6207613,6200060,6203908,NEUTRAL,LOW,CONVERSATION
|
| 16 |
+
26,3,7485712,7503454,7530876,7517871,7527148,NEGATIVE,LOW,CONVERSATION
|
| 17 |
+
27,4,398322,418695,445787,432832,438441,NEUTRAL,MEDIUM,OTHER
|
| 18 |
+
28,4,1207854,1213214,1234830,1225417,1231059,POSITIVE,HIGH,CONVERSATION
|
| 19 |
+
29,4,1632001,1635069,1674502,1663489,1667863,NEGATIVE,LOW,CONVERSATION
|
| 20 |
+
33,4,4078936,4082602,4110423,4099608,4106566,POSITIVE,MEDIUM,CONVERSATION
|
| 21 |
+
34,4,4984332,4987509,5023409,5015376,5019515,NEUTRAL,LOW,CONVERSATION
|
| 22 |
+
36,4,6258034,6306827,6350439,6341905,6346349,POSITIVE,LOW,CONVERSATION
|
| 23 |
+
38,4,7537831,7560818,7584662,7576033,7579959,NEUTRAL,LOW,CONVERSATION
|
| 24 |
+
42,5,1033450,1037216,1061281,1047854,1053884,NEUTRAL,MEDIUM,OTHER
|
| 25 |
+
46,5,3810481,3814183,3832718,3824055,3828757,POSITIVE,MEDIUM,CONVERSATION
|
| 26 |
+
49,6,996095,1003869,1040125,1024551,1031703,POSITIVE,MEDIUM,OTHER
|
| 27 |
+
50,6,1984793,1987621,2006622,1996185,2001620,POSITIVE,MEDIUM,CONVERSATION
|
| 28 |
+
51,6,2223253,2241585,2264887,2252721,2259434,POSITIVE,LOW,WALKING
|
| 29 |
+
52,6,3257255,3285071,3302306,3294247,3298276,POSITIVE,MEDIUM,WALKING
|
| 30 |
+
53,6,3482852,3486170,3503682,3494330,3499793,POSITIVE,MEDIUM,WALKING
|
| 31 |
+
54,7,438176,457822,483704,468378,479593,NEUTRAL,MEDIUM,WALKING
|
| 32 |
+
56,7,1641094,1656884,1677728,1669702,1673799,POSITIVE,MEDIUM,CONVERSATION
|
| 33 |
+
58,7,2946746,2955282,2980581,2966335,2975176,NEUTRAL,LOW,VIEW_BOOTH
|
| 34 |
+
59,7,3743733,3748179,3766580,3758612,3762573,NEUTRAL,MEDIUM,WALKING
|
| 35 |
+
62,8,389047,420625,463986,436690,459735,POSITIVE,MEDIUM,CONVERSATION
|
| 36 |
+
63,8,1004645,1008855,1040595,1023803,1032378,POSITIVE,MEDIUM,OTHER
|
| 37 |
+
64,8,1919221,1931695,1992970,1969430,1988013,NEUTRAL,MEDIUM,CONVERSATION
|
| 38 |
+
65,8,2304132,2321452,2351306,2331563,2341606,POSITIVE,MEDIUM,OTHER
|
| 39 |
+
66,8,3121983,3131925,3486662,3157033,3482671,POSITIVE,MEDIUM,WALKING
|
| 40 |
+
67,8,3516191,3519846,3541834,3529143,3536563,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 41 |
+
68,9,440526,445315,465909,455321,459339,POSITIVE,HIGH,VIEW_BOOTH
|
| 42 |
+
69,9,1054085,1057909,1080160,1070152,1075098,POSITIVE,HIGH,CONVERSATION
|
| 43 |
+
71,9,2254651,2271819,3737241,2283233,2287237,NEUTRAL,HIGH,VIEW_BOOTH
|
| 44 |
+
74,10,396440,414965,433816,423861,429852,NEUTRAL,LOW,CONVERSATION
|
| 45 |
+
75,10,973814,991385,1015126,1005754,1010567,NEUTRAL,LOW,OTHER
|
| 46 |
+
76,10,1714211,1718463,1744852,1732967,1739303,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 47 |
+
77,10,2278818,2288071,2316326,2302447,2310891,NEUTRAL,MEDIUM,WALKING
|
| 48 |
+
79,10,3485331,3502674,3522581,3514834,3518716,POSITIVE,HIGH,VIEW_BOOTH
|
| 49 |
+
80,10,4139053,4143444,4183361,4172053,4178035,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 50 |
+
81,10,4728105,4731602,4748899,4740655,4744889,NEUTRAL,LOW,VIEW_BOOTH
|
| 51 |
+
82,10,5417150,5432962,5451095,5442474,5446660,NEGATIVE,LOW,OTHER
|
| 52 |
+
83,10,5942145,5951917,5968416,5960758,5964570,POSITIVE,MEDIUM,OTHER
|
| 53 |
+
84,10,6725895,6737449,6753352,6745452,6749503,NEUTRAL,MEDIUM,OTHER
|
| 54 |
+
85,10,7191467,7195461,7211190,7203423,7207195,NEGATIVE,LOW,OTHER
|
| 55 |
+
86,10,7952233,7968095,7984532,7976096,7980614,NEUTRAL,MEDIUM,OTHER
|
| 56 |
+
87,10,8492220,8499887,8522413,8514738,8518572,NEUTRAL,MEDIUM,OTHER
|
| 57 |
+
88,10,9170759,9187921,9212255,9203547,9207313,NEUTRAL,MEDIUM,WALKING
|
| 58 |
+
89,11,360955,389975,412833,398929,403465,POSITIVE,LOW,OTHER
|
| 59 |
+
90,11,972477,977166,997502,985754,989832,NEUTRAL,LOW,OTHER
|
| 60 |
+
91,11,1623997,1630973,1647457,1639451,1643935,NEUTRAL,MEDIUM,OTHER
|
| 61 |
+
92,11,2275129,2280181,2296938,2288590,2292975,NEUTRAL,LOW,OTHER
|
| 62 |
+
93,11,2911933,2918540,2934638,2926380,2930641,NEUTRAL,LOW,OTHER
|
| 63 |
+
94,11,3538928,3544360,3560998,3553040,3557121,NEUTRAL,MEDIUM,OTHER
|
| 64 |
+
95,12,471215,528562,557396,544878,551091,NEGATIVE,LOW,OTHER
|
| 65 |
+
96,12,1225258,1280326,1299757,1292099,1296126,POSITIVE,MEDIUM,OTHER
|
| 66 |
+
97,12,1836039,1840018,1861866,1850346,1857708,POSITIVE,LOW,OTHER
|
| 67 |
+
98,12,2486363,2492365,2512306,2503087,2507925,NEUTRAL,LOW,OTHER
|
| 68 |
+
99,12,3137287,3145520,3164396,3153542,3160267,POSITIVE,LOW,OTHER
|
| 69 |
+
100,12,3789210,3796685,3816312,3807352,3811878,NEGATIVE,LOW,OTHER
|
| 70 |
+
101,12,4440308,4510695,4535084,4524621,4531188,NEGATIVE,HIGH,WALKING
|
| 71 |
+
106,13,2224206,2240747,2256354,2248791,2252390,NEUTRAL,HIGH,WALKING
|
| 72 |
+
111,14,403464,420660,440265,429027,431958,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 73 |
+
112,14,969390,979347,998577,989891,992501,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 74 |
+
113,14,1682520,1698696,1712380,1706198,1709353,NEUTRAL,HIGH,OTHER
|
| 75 |
+
114,14,2210289,2218612,2236363,2231282,2233720,NEUTRAL,LOW,OTHER
|
| 76 |
+
115,14,2899497,2904801,2918423,2911879,2916203,NEUTRAL,MEDIUM,OTHER
|
| 77 |
+
116,14,3484157,3488140,3496411,3492407,3494872,NEUTRAL,MEDIUM,WALKING
|
| 78 |
+
117,14,4221518,4225648,4241502,4237724,4239472,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 79 |
+
118,14,4746426,4755810,4792302,4784322,4787508,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 80 |
+
119,14,5422344,5460655,5468614,5464395,5466205,POSITIVE,HIGH,OTHER
|
| 81 |
+
120,14,6037674,6058817,6066762,6062339,6064123,POSITIVE,HIGH,OTHER
|
| 82 |
+
121,14,6638455,6647724,6655649,6651931,6653654,POSITIVE,MEDIUM,OTHER
|
| 83 |
+
122,15,437376,441581,472237,454875,467599,NEUTRAL,MEDIUM,CONVERSATION
|
| 84 |
+
123,15,992469,996800,1027268,1012815,1023064,NEUTRAL,MEDIUM,WALKING
|
| 85 |
+
128,15,4195442,4199293,4216706,4208418,4212390,POSITIVE,HIGH,OTHER
|
| 86 |
+
129,15,4833093,4853849,4869948,4862464,4866272,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 87 |
+
130,15,5704140,5709092,5726903,5717912,5722864,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 88 |
+
131,15,6065767,6069742,6092620,6082637,6088334,NEUTRAL,LOW,VIEW_BOOTH
|
| 89 |
+
132,15,7212321,7217115,7235556,7227281,7231793,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 90 |
+
133,15,7322566,7326529,7370005,7336440,7340684,NEUTRAL,MEDIUM,OTHER
|
| 91 |
+
135,15,8649125,8656903,8675123,8665598,8670989,NEUTRAL,MEDIUM,OTHER
|
| 92 |
+
137,16,1032124,1044697,1087131,1058687,1070082,NEUTRAL,HIGH,VIEW_BOOTH
|
| 93 |
+
138,16,1656017,1660069,1686740,1674587,1678965,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 94 |
+
139,16,2346461,2366522,2392805,2383298,2388594,POSITIVE,LOW,VIEW_BOOTH
|
| 95 |
+
140,16,2864762,2875565,2901365,2889232,2895220,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 96 |
+
141,16,3580123,3587169,3607438,3597425,3602877,POSITIVE,LOW,VIEW_BOOTH
|
| 97 |
+
143,17,1177480,1187827,1207407,1197959,1202464,POSITIVE,MEDIUM,CONVERSATION
|
| 98 |
+
145,17,2378390,2395909,2414759,2405054,2409389,POSITIVE,HIGH,OTHER
|
| 99 |
+
148,17,4127579,4132459,4154750,4144095,4149272,NEUTRAL,HIGH,OTHER
|
| 100 |
+
150,17,5372478,5379120,5398635,5389985,5394418,POSITIVE,MEDIUM,OTHER
|
| 101 |
+
157,18,3190342,3215151,3242119,3226979,3235162,POSITIVE,MEDIUM,CONVERSATION
|
| 102 |
+
161,19,1294141,1415217,1431226,1424046,1426928,POSITIVE,MEDIUM,CONVERSATION
|
| 103 |
+
162,19,1955322,1961892,1972531,1966683,1969333,POSITIVE,HIGH,CONVERSATION
|
| 104 |
+
163,19,2601157,2629622,2651502,2641481,2646733,POSITIVE,MEDIUM,OTHER
|
| 105 |
+
164,19,3164532,3169232,3185048,3175434,3178806,NEUTRAL,MEDIUM,CONVERSATION
|
| 106 |
+
165,19,3921753,3925215,3939677,3931322,3934806,NEUTRAL,HIGH,WALKING
|
| 107 |
+
166,20,373771,386930,419180,397258,410188,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 108 |
+
170,20,3165605,3170422,3202474,3188909,3196662,NEUTRAL,MEDIUM,OTHER
|
| 109 |
+
171,20,3805263,3810431,3830273,3820936,3826839,NEUTRAL,MEDIUM,OTHER
|
| 110 |
+
173,20,6758600,6763765,6784057,6775562,6780228,NEGATIVE,MEDIUM,OTHER
|
| 111 |
+
174,21,385802,393372,410816,401501,405852,POSITIVE,HIGH,CONVERSATION
|
| 112 |
+
177,21,2532744,2539297,2561349,2550342,2554795,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 113 |
+
178,21,3172740,3178901,3198546,3190166,3194151,NEUTRAL,MEDIUM,OTHER
|
| 114 |
+
179,21,3827443,3834605,3853997,3844661,3849645,NEUTRAL,LOW,OTHER
|
| 115 |
+
180,21,4479785,4486212,4504725,4495182,4499348,NEUTRAL,LOW,OTHER
|
| 116 |
+
181,21,5131181,5139047,5159896,5146523,5150753,POSITIVE,MEDIUM,CONVERSATION
|
| 117 |
+
182,21,5711830,5714978,5737651,5729696,5733514,NEUTRAL,HIGH,CONVERSATION
|
| 118 |
+
183,21,6683983,6692843,6710683,6702965,6706728,POSITIVE,HIGH,OTHER
|
| 119 |
+
184,21,7019082,7026023,7048869,7035489,7045423,NEUTRAL,MEDIUM,CONVERSATION
|
| 120 |
+
185,22,428119,434457,454721,442558,446783,NEUTRAL,MEDIUM,OTHER
|
| 121 |
+
186,22,991572,1000928,1016396,1008673,1012737,NEUTRAL,LOW,OTHER
|
| 122 |
+
187,22,1669957,1673695,1689053,1680980,1684893,NEUTRAL,LOW,OTHER
|
| 123 |
+
189,22,3049656,3054862,3071589,3063222,3067288,NEUTRAL,MEDIUM,OTHER
|
| 124 |
+
190,22,3477857,3490396,3505673,3498249,3502057,NEUTRAL,LOW,OTHER
|
| 125 |
+
191,22,4315873,4320309,4335554,4327698,4331441,NEGATIVE,MEDIUM,OTHER
|
| 126 |
+
192,22,4722764,4730027,4746527,4738793,4742585,NEGATIVE,LOW,OTHER
|
| 127 |
+
193,22,5548660,5563012,5578725,5570634,5574928,NEUTRAL,MEDIUM,OTHER
|
| 128 |
+
194,22,5941373,5947167,5962518,5954889,5958740,NEUTRAL,MEDIUM,OTHER
|
| 129 |
+
195,22,6851905,6856347,6875883,6867202,6871749,NEGATIVE,LOW,OTHER
|
| 130 |
+
196,22,7186700,7191185,7206608,7198813,7202627,NEUTRAL,MEDIUM,OTHER
|
| 131 |
+
198,23,1047926,1055012,1102461,1071688,1083843,NEUTRAL,LOW,CONVERSATION
|
| 132 |
+
199,23,1772464,1792462,1835713,1804059,1832868,NEUTRAL,MEDIUM,CONVERSATION
|
| 133 |
+
203,23,4547430,4586395,4599023,4591266,4595541,POSITIVE,HIGH,CONVERSATION
|
| 134 |
+
204,23,4974672,4979399,4997970,4988056,4995167,POSITIVE,HIGH,WALKING
|
| 135 |
+
205,23,5817273,5821397,5836072,5829665,5834083,NEUTRAL,HIGH,CONVERSATION
|
| 136 |
+
207,23,7031913,7049611,7084672,7071116,7074149,POSITIVE,HIGH,CONVERSATION
|
| 137 |
+
208,23,7550340,7597681,7607995,7603163,7606027,NEUTRAL,MEDIUM,CONVERSATION
|
| 138 |
+
209,23,8409647,8415493,8437908,8424950,8427100,POSITIVE,MEDIUM,WALKING
|
| 139 |
+
210,24,375235,379813,393686,386003,389556,NEUTRAL,MEDIUM,WALKING
|
| 140 |
+
211,24,972962,976795,991889,984019,986248,POSITIVE,MEDIUM,OTHER
|
| 141 |
+
212,24,1591410,1612868,1622383,1618687,1620474,POSITIVE,MEDIUM,OTHER
|
| 142 |
+
213,24,2200900,2208166,2219359,2213865,2217156,POSITIVE,MEDIUM,OTHER
|
| 143 |
+
214,24,2857829,2860854,2869621,2865236,2866957,NEUTRAL,MEDIUM,CONVERSATION
|
| 144 |
+
215,24,3457354,3473420,3483402,3479330,3481058,NEUTRAL,MEDIUM,OTHER
|
| 145 |
+
216,24,4071613,4085051,4094890,4090372,4093367,POSITIVE,LOW,VIEW_BOOTH
|
| 146 |
+
217,24,4767647,4771718,4782830,4777874,4780207,NEUTRAL,LOW,CONVERSATION
|
| 147 |
+
218,24,5385676,5432938,5443650,5439714,5441897,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 148 |
+
220,24,6628946,6632140,6643607,6639370,6641237,POSITIVE,LOW,CONVERSATION
|
| 149 |
+
221,24,7474159,7489732,7496711,7493660,7494998,POSITIVE,HIGH,CONVERSATION
|
| 150 |
+
222,24,8131082,8135229,8142779,8139652,8141347,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 151 |
+
223,24,8687266,8867182,8892807,8889186,8890950,NEUTRAL,LOW,VIEW_BOOTH
|
| 152 |
+
224,25,374839,415756,436542,427045,432265,POSITIVE,HIGH,WALKING
|
| 153 |
+
225,25,1158058,1162323,1171044,1166086,1168793,NEUTRAL,HIGH,CONVERSATION
|
| 154 |
+
226,25,1783694,1790040,1800330,1795452,1797988,POSITIVE,MEDIUM,WALKING
|
| 155 |
+
227,25,2456368,2466635,2473758,2470022,2471922,POSITIVE,MEDIUM,OTHER
|
| 156 |
+
228,25,3016822,3024054,3031747,3027717,3029548,POSITIVE,MEDIUM,OTHER
|
| 157 |
+
229,25,3705334,3720473,3728658,3724873,3726649,NEUTRAL,MEDIUM,OTHER
|
| 158 |
+
230,25,4236064,4261939,4268459,4265131,4266954,NEUTRAL,MEDIUM,CONVERSATION
|
| 159 |
+
231,25,5139002,5142634,5152593,5148598,5150577,NEGATIVE,MEDIUM,VIEW_BOOTH
|
| 160 |
+
232,25,5439118,5516441,5522658,5519443,5521127,POSITIVE,HIGH,CONVERSATION
|
| 161 |
+
233,25,6438209,6457298,6465349,6460240,6463427,NEGATIVE,LOW,CONVERSATION
|
| 162 |
+
234,25,6739006,6803282,6812301,6806965,6808351,NEGATIVE,LOW,OTHER
|
| 163 |
+
237,26,1004099,1011612,1028117,1020592,1023931,POSITIVE,MEDIUM,WALKING
|
| 164 |
+
239,26,2232180,2250890,2260873,2255161,2257449,POSITIVE,MEDIUM,OTHER
|
| 165 |
+
241,26,3438317,3442612,3465619,3447877,3454150,NEUTRAL,MEDIUM,OTHER
|
| 166 |
+
245,26,5960595,5964255,5977394,5973014,5975368,POSITIVE,HIGH,CONVERSATION
|
| 167 |
+
246,27,536701,543426,572213,559651,565921,POSITIVE,MEDIUM,OTHER
|
| 168 |
+
247,27,1189638,1194293,1211103,1203143,1205334,NEUTRAL,MEDIUM,OTHER
|
| 169 |
+
248,27,1974812,1979228,1995853,1991659,1993797,NEUTRAL,MEDIUM,OTHER
|
| 170 |
+
249,27,2467550,2492439,2508884,2501964,2507068,NEUTRAL,LOW,OTHER
|
| 171 |
+
250,27,3196398,3200121,3210968,3204262,3208914,NEUTRAL,LOW,OTHER
|
| 172 |
+
251,27,3677141,3682487,3698420,3694521,3696439,POSITIVE,MEDIUM,WALKING
|
| 173 |
+
252,27,4737742,4743456,4765589,4759698,4763171,NEUTRAL,LOW,VIEW_BOOTH
|
| 174 |
+
253,27,4892215,4935051,4944043,4939719,4942123,NEUTRAL,MEDIUM,CONVERSATION
|
| 175 |
+
255,27,6492416,6496291,6511948,6505695,6509294,NEUTRAL,LOW,WALKING
|
| 176 |
+
257,27,7847871,7908945,7920353,7916658,7918473,NEGATIVE,LOW,CONVERSATION
|
| 177 |
+
260,28,476606,482399,495293,489895,492119,POSITIVE,MEDIUM,CONVERSATION
|
| 178 |
+
261,28,1001075,1006696,1022210,1013181,1018261,NEGATIVE,HIGH,OTHER
|
| 179 |
+
262,28,1799545,1805471,1814420,1810339,1812250,NEGATIVE,MEDIUM,WALKING
|
| 180 |
+
263,28,2334532,2339044,2350019,2343825,2345735,NEUTRAL,MEDIUM,CONVERSATION
|
| 181 |
+
264,28,3047103,3057355,3064855,3060992,3062786,NEUTRAL,MEDIUM,CONVERSATION
|
| 182 |
+
265,28,3643104,3660732,3669348,3665989,3667746,POSITIVE,HIGH,CONVERSATION
|
| 183 |
+
267,28,5114105,5121522,5131232,5126904,5129271,NEUTRAL,MEDIUM,WALKING
|
| 184 |
+
268,28,5572967,5590179,5596978,5593618,5595145,NEUTRAL,MEDIUM,WALKING
|
| 185 |
+
270,28,6996539,7028566,7037503,7032028,7034129,POSITIVE,HIGH,VIEW_BOOTH
|
| 186 |
+
273,29,458619,466826,485566,475687,478805,POSITIVE,MEDIUM,WALKING
|
| 187 |
+
274,29,1018280,1022240,1045166,1040684,1042630,POSITIVE,HIGH,CONVERSATION
|
| 188 |
+
275,29,1896030,1900823,1911410,1906091,1909245,NEUTRAL,HIGH,VIEW_BOOTH
|
| 189 |
+
277,29,3168088,3180806,3191350,3185135,3188772,NEGATIVE,LOW,VIEW_BOOTH
|
| 190 |
+
278,29,3799169,3963759,3979611,3970320,3974500,POSITIVE,MEDIUM,CONVERSATION
|
| 191 |
+
279,29,4595223,4599365,4610211,4604164,4607831,NEUTRAL,MEDIUM,CONVERSATION
|
| 192 |
+
280,29,5225253,5227892,5236898,5232521,5234335,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 193 |
+
282,29,6444878,6447815,6699311,6452365,6455099,POSITIVE,MEDIUM,OTHER
|
| 194 |
+
283,30,447369,466334,486285,476298,481761,NEUTRAL,LOW,WALKING
|
| 195 |
+
284,30,1014547,1018455,1030259,1023458,1026962,POSITIVE,HIGH,OTHER
|
| 196 |
+
285,30,1813364,1816159,1830888,1824864,1827496,NEUTRAL,LOW,CONVERSATION
|
| 197 |
+
286,30,2225313,2229694,2236292,2233304,2234671,POSITIVE,HIGH,CONVERSATION
|
| 198 |
+
287,30,3123141,3127754,3135059,3131141,3133218,NEUTRAL,LOW,CONVERSATION
|
| 199 |
+
288,30,3469211,3472891,3480588,3476766,3478485,POSITIVE,HIGH,CONVERSATION
|
| 200 |
+
289,30,4447626,4450357,4458639,4455220,4456848,POSITIVE,MEDIUM,CONVERSATION
|
| 201 |
+
290,30,4706883,4712173,4719855,4715992,4717904,POSITIVE,MEDIUM,CONVERSATION
|
| 202 |
+
291,30,5668802,5673738,5686133,5680486,5682460,POSITIVE,MEDIUM,CONVERSATION
|
| 203 |
+
292,30,5909192,5912558,5920920,5916664,5918603,NEUTRAL,MEDIUM,OTHER
|
| 204 |
+
293,30,6894924,6899530,6908985,6903811,6905595,NEGATIVE,MEDIUM,VIEW_BOOTH
|
| 205 |
+
294,30,7206549,7210380,7218136,7214044,7216234,POSITIVE,MEDIUM,CONVERSATION
|
| 206 |
+
295,30,8175968,8188976,8197301,8194089,8195626,NEUTRAL,MEDIUM,CONVERSATION
|
| 207 |
+
296,31,450055,456852,470741,464492,467490,POSITIVE,MEDIUM,WALKING
|
| 208 |
+
297,31,1027972,1031999,1041707,1036536,1039626,NEUTRAL,MEDIUM,CONVERSATION
|
| 209 |
+
298,31,1864189,1870609,1880504,1875238,1878668,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 210 |
+
299,31,2524865,2594119,2601479,2598005,2599792,POSITIVE,HIGH,VIEW_BOOTH
|
| 211 |
+
300,31,3095696,3108949,3118228,3113028,3115879,NEUTRAL,MEDIUM,CONVERSATION
|
| 212 |
+
301,31,3957981,3972387,3979397,3976233,3977939,POSITIVE,HIGH,VIEW_BOOTH
|
| 213 |
+
302,31,4585810,4627485,4634986,4630965,4632966,NEUTRAL,MEDIUM,WALKING
|
| 214 |
+
303,31,5232846,5241562,5257078,5245032,5255390,POSITIVE,HIGH,VIEW_BOOTH
|
| 215 |
+
304,31,5806903,5852567,5861047,5856299,5859070,NEUTRAL,MEDIUM,CONVERSATION
|
| 216 |
+
305,31,6473876,6491700,6499561,6495728,6497253,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 217 |
+
306,32,415875,465231,490166,482402,485624,NEUTRAL,MEDIUM,WALKING
|
| 218 |
+
307,32,1019863,1025362,1045056,1030184,1035107,POSITIVE,HIGH,OTHER
|
| 219 |
+
308,32,1815296,1817977,1833558,1823311,1829782,NEUTRAL,MEDIUM,CONVERSATION
|
| 220 |
+
309,32,2303442,2333455,2345887,2340387,2343551,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 221 |
+
310,32,3480594,4767318,4775586,4772166,4773762,NEUTRAL,MEDIUM,CONVERSATION
|
| 222 |
+
313,32,4930815,4940270,4949457,4944587,4946735,POSITIVE,HIGH,WALKING
|
| 223 |
+
314,32,5970857,5978016,5991748,5986972,5989422,NEGATIVE,MEDIUM,VIEW_BOOTH
|
| 224 |
+
315,32,6164148,6171884,6180697,6176540,6178445,NEUTRAL,MEDIUM,WALKING
|
| 225 |
+
317,32,7406065,7412599,7423023,7418435,7420389,NEUTRAL,MEDIUM,OTHER
|
| 226 |
+
319,33,413389,508962,521373,514056,516476,POSITIVE,MEDIUM,WALKING
|
| 227 |
+
320,33,1064795,1073304,1088584,1083644,1085745,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 228 |
+
321,33,1702340,1735244,1744899,1739908,1742719,POSITIVE,HIGH,VIEW_BOOTH
|
| 229 |
+
322,33,2354336,2426868,2435258,2430689,2433579,POSITIVE,HIGH,VIEW_BOOTH
|
| 230 |
+
323,33,2925085,2945096,2952927,2948622,2951113,POSITIVE,HIGH,WALKING
|
| 231 |
+
324,33,3568527,3713623,3721992,3717477,3719842,POSITIVE,HIGH,VIEW_BOOTH
|
| 232 |
+
328,33,6078250,6175510,6185420,6179357,6182878,POSITIVE,HIGH,VIEW_BOOTH
|
| 233 |
+
330,33,7428337,7546845,7554072,7550332,7552007,POSITIVE,HIGH,CONVERSATION
|
| 234 |
+
331,33,8042241,8096825,8103922,8100257,8102092,POSITIVE,HIGH,CONVERSATION
|
| 235 |
+
332,33,8755173,8778412,8786296,8781974,8783745,POSITIVE,HIGH,OTHER
|
| 236 |
+
333,34,447403,599386,611664,605102,607625,POSITIVE,HIGH,CONVERSATION
|
| 237 |
+
334,34,990543,1007641,1018160,1013479,1016629,NEUTRAL,MEDIUM,WALKING
|
| 238 |
+
335,34,1754661,1759063,1769011,1764134,1765991,NEUTRAL,MEDIUM,CONVERSATION
|
| 239 |
+
336,34,2376149,2380973,2393972,2386984,2391550,NEUTRAL,HIGH,WALKING
|
| 240 |
+
338,34,3691582,3695655,3709449,3703057,3705195,NEUTRAL,LOW,OTHER
|
| 241 |
+
340,35,469206,487847,516695,500694,503836,POSITIVE,MEDIUM,OTHER
|
| 242 |
+
341,35,1060038,1065202,1082202,1074460,1077200,NEUTRAL,MEDIUM,WALKING
|
| 243 |
+
342,35,1724995,1729403,1750704,1742459,1745993,NEUTRAL,MEDIUM,CONVERSATION
|
| 244 |
+
344,35,2937105,2942275,2956687,2949676,2951460,NEUTRAL,MEDIUM,CONVERSATION
|
| 245 |
+
346,35,4209228,4212499,4221547,4216521,4218310,NEUTRAL,MEDIUM,CONVERSATION
|
| 246 |
+
347,35,4971865,4979279,4989400,4984380,4986488,NEUTRAL,MEDIUM,CONVERSATION
|
| 247 |
+
348,35,5518036,5521107,5531601,5525542,5527340,NEUTRAL,MEDIUM,WALKING
|
| 248 |
+
349,35,6228270,6244592,6253785,6250388,6251858,NEUTRAL,MEDIUM,CONVERSATION
|
| 249 |
+
350,35,6798570,6806229,6814386,6810912,6812601,NEUTRAL,MEDIUM,WALKING
|
| 250 |
+
352,35,8075559,8093519,8100809,8096905,8098683,NEUTRAL,MEDIUM,CONVERSATION
|
| 251 |
+
353,35,8769411,8772863,8782066,8777050,8778714,NEUTRAL,MEDIUM,OTHER
|
| 252 |
+
354,36,447096,462169,473327,468716,470877,POSITIVE,HIGH,WALKING
|
| 253 |
+
355,36,1013861,1019472,1028712,1023114,1025523,POSITIVE,HIGH,OTHER
|
| 254 |
+
356,36,1807896,1813955,1821801,1817652,1819757,NEUTRAL,MEDIUM,WALKING
|
| 255 |
+
357,36,2220363,2224940,2232741,2228898,2230450,POSITIVE,HIGH,VIEW_BOOTH
|
| 256 |
+
358,36,3101101,3104983,3111671,3108253,3110076,POSITIVE,HIGH,VIEW_BOOTH
|
| 257 |
+
359,36,3512204,3528567,3535243,3532199,3533668,POSITIVE,HIGH,VIEW_BOOTH
|
| 258 |
+
360,36,4416861,4433129,4439381,4436250,4437921,POSITIVE,HIGH,VIEW_BOOTH
|
| 259 |
+
362,36,5645858,5664499,5670490,5667661,5669057,POSITIVE,HIGH,VIEW_BOOTH
|
| 260 |
+
363,36,6164371,6169127,6175286,6172464,6173816,NEUTRAL,MEDIUM,WALKING
|
| 261 |
+
365,36,7421739,7425039,7434355,7430565,7432070,POSITIVE,HIGH,WALKING
|
| 262 |
+
366,36,8227815,8230769,8237425,8233706,8235518,NEUTRAL,HIGH,WALKING
|
| 263 |
+
367,37,403144,408364,427858,420480,424330,POSITIVE,HIGH,WALKING
|
| 264 |
+
368,37,1012406,1165451,1184402,1170892,1172800,POSITIVE,HIGH,OTHER
|
| 265 |
+
369,37,1637832,1655297,1667069,1662090,1664478,NEUTRAL,MEDIUM,OTHER
|
| 266 |
+
370,37,2247122,2258451,2272945,2269396,2271412,NEGATIVE,LOW,OTHER
|
| 267 |
+
371,37,3055252,3058800,3068584,3064767,3066722,NEUTRAL,MEDIUM,OTHER
|
| 268 |
+
372,37,3460162,3475885,3491098,3481140,3483810,NEGATIVE,LOW,OTHER
|
| 269 |
+
374,37,4692798,4695962,4740135,4705077,4724049,POSITIVE,HIGH,CONVERSATION
|
| 270 |
+
375,37,5596301,5613971,5641687,5621133,5639784,NEUTRAL,LOW,WALKING
|
| 271 |
+
377,37,6892400,6898018,6920070,6903743,6912592,POSITIVE,MEDIUM,CONVERSATION
|
| 272 |
+
378,37,7490362,7493636,7517280,7511366,7513696,POSITIVE,MEDIUM,CONVERSATION
|
| 273 |
+
379,37,8142726,8146314,8155828,8150876,8152986,NEUTRAL,LOW,VIEW_BOOTH
|
| 274 |
+
380,37,8721545,8861944,8917474,8913390,8915337,POSITIVE,HIGH,CONVERSATION
|
| 275 |
+
381,38,379560,428257,453470,437070,447170,POSITIVE,MEDIUM,WALKING
|
| 276 |
+
386,38,3439806,3463018,3473817,3468198,3470588,NEUTRAL,MEDIUM,CONVERSATION
|
| 277 |
+
388,38,4669425,4697990,4708408,4701833,4704267,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 278 |
+
389,38,5470509,5482379,5496805,5492934,5494724,POSITIVE,LOW,OTHER
|
| 279 |
+
390,38,5984006,6025754,6039144,6035022,6037323,POSITIVE,MEDIUM,WALKING
|
| 280 |
+
391,38,6720527,6757685,6768349,6764563,6766228,NEUTRAL,MEDIUM,OTHER
|
| 281 |
+
392,38,7322979,7328006,7334304,7331241,7332799,NEUTRAL,MEDIUM,WALKING
|
| 282 |
+
393,39,411121,416854,439176,431119,434850,NEUTRAL,MEDIUM,WALKING
|
| 283 |
+
394,39,1018886,1031963,1047671,1037559,1042847,POSITIVE,HIGH,CONVERSATION
|
| 284 |
+
395,39,1682708,1692246,1701295,1697346,1699068,POSITIVE,HIGH,CONVERSATION
|
| 285 |
+
396,39,2226000,2233289,2248330,2239088,2240988,NEUTRAL,MEDIUM,OTHER
|
| 286 |
+
397,39,2898988,2904659,2913307,2908750,2910443,POSITIVE,HIGH,OTHER
|
| 287 |
+
398,39,3442347,3461449,3482733,3473144,3475355,NEUTRAL,LOW,OTHER
|
| 288 |
+
399,39,4156358,4185857,4197449,4193803,4195452,NEUTRAL,MEDIUM,CONVERSATION
|
| 289 |
+
400,39,4686398,4702686,4717298,4710614,4712251,NEUTRAL,MEDIUM,OTHER
|
| 290 |
+
401,39,5419179,5440077,5448412,5444012,5446784,POSITIVE,HIGH,CONVERSATION
|
| 291 |
+
402,39,5971099,5975971,5983904,5979615,5982259,POSITIVE,MEDIUM,CONVERSATION
|
| 292 |
+
403,39,6686024,6694008,6702015,6698583,6700281,NEUTRAL,MEDIUM,CONVERSATION
|
| 293 |
+
404,39,7249240,7253659,7266689,7262878,7264509,NEUTRAL,MEDIUM,CONVERSATION
|
| 294 |
+
405,40,385066,418772,438171,426505,432094,POSITIVE,MEDIUM,CONVERSATION
|
| 295 |
+
406,40,1046847,1188385,1200383,1192883,1195740,POSITIVE,MEDIUM,CONVERSATION
|
| 296 |
+
409,40,3252870,3259774,3272231,3266838,3270230,POSITIVE,MEDIUM,CONVERSATION
|
| 297 |
+
410,40,3625362,3628713,3642191,3637443,3639543,NEUTRAL,MEDIUM,CONVERSATION
|
| 298 |
+
411,40,4498835,4531239,4542755,4537150,4539153,POSITIVE,HIGH,WALKING
|
| 299 |
+
412,40,4837473,4892195,4907013,4897288,4904155,POSITIVE,HIGH,VIEW_BOOTH
|
| 300 |
+
413,40,6240001,6288875,6297303,6292953,6295268,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 301 |
+
414,40,6240028,6271951,6285074,6278257,6281971,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 302 |
+
416,40,7502843,7510644,7521914,7517482,7519338,POSITIVE,LOW,OTHER
|
| 303 |
+
419,41,415228,538718,569297,551726,556277,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 304 |
+
420,41,979467,983049,994709,989715,992779,POSITIVE,HIGH,CONVERSATION
|
| 305 |
+
421,41,1675554,1782475,1806678,1794142,1802146,NEUTRAL,MEDIUM,CONVERSATION
|
| 306 |
+
422,41,2187331,2192311,2201057,2197583,2199180,POSITIVE,HIGH,CONVERSATION
|
| 307 |
+
423,41,2968943,2972945,3053893,3016814,3041099,POSITIVE,LOW,CONVERSATION
|
| 308 |
+
424,41,3452560,3470742,3480266,3475005,3477156,POSITIVE,MEDIUM,OTHER
|
| 309 |
+
425,41,4175648,4180301,4194774,4186448,4191673,POSITIVE,LOW,CONVERSATION
|
| 310 |
+
426,41,4832504,5004090,5014444,5009923,5011618,NEUTRAL,LOW,VIEW_BOOTH
|
| 311 |
+
427,41,5384250,5466995,5474403,5471036,5472840,POSITIVE,MEDIUM,CONVERSATION
|
| 312 |
+
428,41,6096933,6126741,6144612,6137039,6142446,POSITIVE,MEDIUM,CONVERSATION
|
| 313 |
+
429,42,384228,399210,412073,405380,407533,NEUTRAL,MEDIUM,WALKING
|
| 314 |
+
432,42,2207313,2212717,2230043,2222234,2223985,NEGATIVE,MEDIUM,WALKING
|
| 315 |
+
434,42,3412215,3415935,3423645,3419517,3421448,POSITIVE,MEDIUM,CONVERSATION
|
| 316 |
+
440,43,405159,413338,430651,419832,423368,POSITIVE,MEDIUM,WALKING
|
| 317 |
+
443,43,2438678,2471892,2483633,2478961,2481228,POSITIVE,MEDIUM,CONVERSATION
|
| 318 |
+
445,43,3875682,3883277,3902504,3888155,3891527,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 319 |
+
446,43,4355656,4381506,4391650,4385945,4389476,POSITIVE,HIGH,CONVERSATION
|
| 320 |
+
447,43,5145408,5151287,5166824,5159387,5163519,NEUTRAL,MEDIUM,CONVERSATION
|
| 321 |
+
448,43,5805735,5812887,5821945,5816274,5820423,POSITIVE,MEDIUM,CONVERSATION
|
| 322 |
+
449,43,6383233,6411042,6419546,6415165,6417611,NEUTRAL,MEDIUM,CONVERSATION
|
| 323 |
+
451,43,7624508,7629824,7647099,7636938,7640844,NEUTRAL,LOW,WALKING
|
| 324 |
+
452,43,8400302,8414184,8422698,8418167,8420180,POSITIVE,MEDIUM,WALKING
|
| 325 |
+
453,44,455877,459825,473866,466570,469765,POSITIVE,HIGH,CONVERSATION
|
| 326 |
+
455,44,1856736,1862210,1875706,1869148,1871332,NEGATIVE,MEDIUM,WALKING
|
| 327 |
+
456,44,2480575,2592920,2602657,2596802,2598906,POSITIVE,MEDIUM,CONVERSATION
|
| 328 |
+
457,44,3149594,3163102,3179345,3174258,3176756,POSITIVE,MEDIUM,VIEW_BOOTH
|
| 329 |
+
460,44,4929682,4937300,4951621,4947538,4949290,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 330 |
+
461,44,5666736,5670460,5688609,5676481,5678315,NEUTRAL,MEDIUM,VIEW_BOOTH
|
| 331 |
+
462,44,6163589,6182378,6209246,6186723,6201830,POSITIVE,HIGH,VIEW_BOOTH
|
_config.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Configuration file for the pipeline
|
| 2 |
+
|
| 3 |
+
config = {
|
| 4 |
+
# Paths for Import Data
|
| 5 |
+
"accel_path": "Path or Name of File: ", # Path to the accelerometer data
|
| 6 |
+
"reports_path": "Path or Name of SelfReports File", # Path to the self-reports data
|
| 7 |
+
"combined_data_path": "Path or Name of File of Combined Data File", # Path to the combined data
|
| 8 |
+
"features_data_path": "Path or Name of File of Features Data File", # Path to the features data
|
| 9 |
+
"model_path": "Path or Name of Trained Model File", # Path to the trained model
|
| 10 |
+
|
| 11 |
+
# Label Configuration
|
| 12 |
+
"label_columns": ["valence", "arousal"], # Here you should input the emotion-labels that you are using
|
| 13 |
+
"target_label": "arousal", # This is the target label that you want to predict (Only one label can be selected)
|
| 14 |
+
|
| 15 |
+
# Configuration for combined data
|
| 16 |
+
"time_window": 2, # Minutes before and after the self-report
|
| 17 |
+
|
| 18 |
+
# Configuration for feature extraction
|
| 19 |
+
"window_length": 60, # Window length in seconds / 60
|
| 20 |
+
"window_step_size": 20, # Step size in seconds / 10%-50% of window_length / 20
|
| 21 |
+
"data_frequency": 25, # Data frequency in Hz
|
| 22 |
+
"selected_domains": None, # Default: Every domain / 'time_domain', 'spatial', 'frequency', 'statistical', 'wavelet' / multiple domains: ["time_domain", "frequency"] / order is not important
|
| 23 |
+
"include_magnitude": True, # Include magnitude-based features or not
|
| 24 |
+
|
| 25 |
+
#Configuration for Low-pass filter
|
| 26 |
+
"cutoff_frequency": 10, # Cut-off frequency for the low-pass filter
|
| 27 |
+
"order": 4, # Order of the filter
|
| 28 |
+
|
| 29 |
+
# Configuration for Scaling
|
| 30 |
+
"scaler_type": "standard", # Possible Scaler: 'standard' or 'minmax'
|
| 31 |
+
|
| 32 |
+
# Configuration for PCA
|
| 33 |
+
"apply_pca": False, # Apply PCA or not
|
| 34 |
+
"pca_variance": 0.95, # PCA variance threshold
|
| 35 |
+
|
| 36 |
+
# Configuration for model training
|
| 37 |
+
"classifier": "xgboost", # Default classifier ('xgboost', 'svm', 'randomforest')
|
| 38 |
+
|
| 39 |
+
# Configuration for hyperparameter tuning
|
| 40 |
+
"n_splits": 5, # Number of splits for cross-validation
|
| 41 |
+
"n_iter": 30, # Number of iterations for hyperparameter tuning
|
| 42 |
+
"n_jobs": -1, # Number of jobs for parallel processing
|
| 43 |
+
"n_points": 1, # Number of points to sample in the hyperparameter space
|
| 44 |
+
|
| 45 |
+
# If users want to define custom param_space, they can specify it here
|
| 46 |
+
"param_space": {
|
| 47 |
+
"learning_rate": (0.05, 0.2),
|
| 48 |
+
"n_estimators": (200, 800),
|
| 49 |
+
"max_depth": (4, 8),
|
| 50 |
+
"min_child_weight": (1, 5),
|
| 51 |
+
"subsample": (0.6, 0.9),
|
| 52 |
+
"colsample_bytree": (0.6, 0.9),
|
| 53 |
+
"gamma": (0, 5),
|
| 54 |
+
"reg_alpha": (0, 5),
|
| 55 |
+
"reg_lambda": (0, 5)
|
| 56 |
+
}, # Set to {None} to use default inside the TrainModel class
|
| 57 |
+
}
|
pipeline_classes/__init__.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Description: This file is used to import all the classes in the pipeline_classes folder.
|
| 2 |
+
|
| 3 |
+
from .import_data import ImportData
|
| 4 |
+
from .create_combineddataframe import CreateCombinedDataFrame
|
| 5 |
+
from .scale_xyzdata import ScaleXYZData
|
| 6 |
+
from .extract_features import ExtractFeatures
|
| 7 |
+
from .pcahandler import PCAHandler
|
| 8 |
+
from .train_model import TrainModel
|
| 9 |
+
from .classify_movementdata import ClassifyMovementData
|
| 10 |
+
from .lowpassfilter import LowPassFilter
|
pipeline_classes/__pycache__/__init__.cpython-313.pyc
ADDED
|
Binary file (622 Bytes). View file
|
|
|
pipeline_classes/__pycache__/classify_movementdata.cpython-313.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|
pipeline_classes/__pycache__/create_combineddataframe.cpython-313.pyc
ADDED
|
Binary file (4.57 kB). View file
|
|
|
pipeline_classes/__pycache__/extract_features.cpython-313.pyc
ADDED
|
Binary file (18.3 kB). View file
|
|
|
pipeline_classes/__pycache__/import_data.cpython-313.pyc
ADDED
|
Binary file (2.77 kB). View file
|
|
|
pipeline_classes/__pycache__/lowpassfilter.cpython-313.pyc
ADDED
|
Binary file (2.75 kB). View file
|
|
|
pipeline_classes/__pycache__/pcahandler.cpython-313.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
pipeline_classes/__pycache__/scale_xyzdata.cpython-313.pyc
ADDED
|
Binary file (1.82 kB). View file
|
|
|
pipeline_classes/__pycache__/train_model.cpython-313.pyc
ADDED
|
Binary file (8.5 kB). View file
|
|
|
pipeline_classes/classify_movementdata.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
import joblib
|
| 5 |
+
from _config import config
|
| 6 |
+
|
| 7 |
+
# This class is used to classify the movement data using a pre-trained model
|
| 8 |
+
class ClassifyMovementData(BaseEstimator, TransformerMixin):
|
| 9 |
+
def __init__(self, model_path=None):
|
| 10 |
+
self.model_path = model_path if model_path else config.get("model_path")
|
| 11 |
+
self.model = None
|
| 12 |
+
|
| 13 |
+
def fit(self, X, y=None):
|
| 14 |
+
return self
|
| 15 |
+
|
| 16 |
+
def transform(self, X):
|
| 17 |
+
if self.model is None:
|
| 18 |
+
self.model = joblib.load(self.model_path) # Load the pre-trained model
|
| 19 |
+
print(f"Model loaded from {self.model_path}")
|
| 20 |
+
|
| 21 |
+
# Assuming `X` is a DataFrame of pre-extracted features.
|
| 22 |
+
predictions = self.model.predict(X)
|
| 23 |
+
|
| 24 |
+
# Adding predictions to the DataFrame as the first column
|
| 25 |
+
X.insert(0, 'predicted_emotion', predictions)
|
| 26 |
+
|
| 27 |
+
print("Data classified successfully.")
|
| 28 |
+
|
| 29 |
+
# Export the labeled DataFrame to CSV
|
| 30 |
+
window_length_str = str(config["window_length"])
|
| 31 |
+
output_file = f"classified_movement_data_window_{window_length_str}.csv"
|
| 32 |
+
X.to_csv(output_file, index=False)
|
| 33 |
+
print(f"Classified movement data exported successfully to {output_file}.")
|
| 34 |
+
|
| 35 |
+
return X
|
pipeline_classes/create_combineddataframe.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from _config import config
|
| 5 |
+
|
| 6 |
+
class CreateCombinedDataFrame(BaseEstimator, TransformerMixin):
|
| 7 |
+
def __init__(self, time_window, label_columns=None):
|
| 8 |
+
self.time_window = time_window
|
| 9 |
+
self.label_columns = label_columns #if label_columns else ["arousal", "valence"] # Default to arousal and valence if not specified
|
| 10 |
+
|
| 11 |
+
def fit(self, X, y=None):
|
| 12 |
+
return self
|
| 13 |
+
|
| 14 |
+
def transform(self, X):
|
| 15 |
+
df_reports, df_accel = X
|
| 16 |
+
|
| 17 |
+
print(f"PreprocesssingCombined initialized with label_columns: {self.label_columns}")
|
| 18 |
+
# Ensure the chosen label columns exist in the dataset
|
| 19 |
+
valid_conditions = (df_reports['timeOfEngagement'] != 0)
|
| 20 |
+
for label in self.label_columns:
|
| 21 |
+
valid_conditions &= (df_reports[label] != "NONE")
|
| 22 |
+
|
| 23 |
+
df_reports = df_reports[valid_conditions].copy()
|
| 24 |
+
|
| 25 |
+
# Renaming and datetime conversion remains the same
|
| 26 |
+
df_accel.rename(columns={'timestamp': 'timeOfNotification'}, inplace=True)
|
| 27 |
+
df_accel["timeOfNotification"] = pd.to_datetime(df_accel["timeOfNotification"], unit="ms")
|
| 28 |
+
df_reports["timeOfNotification"] = pd.to_datetime(df_reports["timeOfNotification"], unit="ms")
|
| 29 |
+
|
| 30 |
+
print(f"ExtractAccelData initialized with time_window: {self.time_window}")
|
| 31 |
+
df_reports, df_accel = X
|
| 32 |
+
df_reports['accel_data'] = df_reports.apply(lambda row: self._extract_accel_data(row, df_accel), axis=1)
|
| 33 |
+
|
| 34 |
+
print(f"Combining called with label_columns: {self.label_columns}")
|
| 35 |
+
combined_data = []
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
for _, row in df_reports.iterrows():
|
| 39 |
+
accel_data = row['accel_data']
|
| 40 |
+
for _, accel_row in accel_data.iterrows():
|
| 41 |
+
combined_row = {
|
| 42 |
+
'participantId': row['participantId'], # Participant ID
|
| 43 |
+
'selfreport_time': row['timeOfNotification'], # Self-report time
|
| 44 |
+
'accel_time': accel_row['timeOfNotification'], # Accelerometer data time
|
| 45 |
+
'x': accel_row['x'], # x-axis accelerometer data
|
| 46 |
+
'y': accel_row['y'], # y-axis accelerometer data
|
| 47 |
+
'z': accel_row['z'] # z-axis accelerometer data
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
# Dynamically add emotion labels to the combined row
|
| 51 |
+
for label in self.label_columns:
|
| 52 |
+
# print(f"Processing label: {label}")
|
| 53 |
+
combined_row[label] = row[label]
|
| 54 |
+
|
| 55 |
+
combined_data.append(combined_row)
|
| 56 |
+
|
| 57 |
+
combined_df = pd.DataFrame(combined_data)
|
| 58 |
+
|
| 59 |
+
# Create groupid column (unique identifier based on participantId and selfreport_time)
|
| 60 |
+
combined_df['groupid'] = combined_df.groupby(['participantId', 'selfreport_time']).ngroup() + 1
|
| 61 |
+
col = combined_df.pop("groupid") # Move groupid to the first column
|
| 62 |
+
combined_df.insert(0, col.name, col)
|
| 63 |
+
|
| 64 |
+
# Export the combined dataframe to CSV
|
| 65 |
+
time_window_str = str(self.time_window)
|
| 66 |
+
label_columns_str = "_".join(self.label_columns)
|
| 67 |
+
file_name = f"combined_data_timewindow_{time_window_str}min_labels_{label_columns_str}.csv"
|
| 68 |
+
combined_df.to_csv(file_name, index=False)
|
| 69 |
+
print(f"Combined dataframe exported successfully to {file_name}.")
|
| 70 |
+
|
| 71 |
+
return combined_df
|
| 72 |
+
|
| 73 |
+
def _extract_accel_data(self, row, accel_data):
|
| 74 |
+
time_delta = pd.Timedelta(minutes=self.time_window)
|
| 75 |
+
start_time = pd.to_datetime(row['timeOfNotification']) - time_delta
|
| 76 |
+
end_time = pd.to_datetime(row['timeOfNotification']) + time_delta
|
| 77 |
+
participant_id = row['participantId']
|
| 78 |
+
mask = (
|
| 79 |
+
(accel_data['participantId'] == participant_id) & # Filter out rows with different participantId
|
| 80 |
+
(accel_data['timeOfNotification'] >= start_time) & # Filter out rows with time outside the window
|
| 81 |
+
(accel_data['timeOfNotification'] <= end_time) # Filter out rows with time outside the window
|
| 82 |
+
)
|
| 83 |
+
return accel_data[mask]
|
pipeline_classes/extract_features.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from scipy.fftpack import fft
|
| 5 |
+
from scipy.signal import welch
|
| 6 |
+
import pywt
|
| 7 |
+
from _config import config
|
| 8 |
+
|
| 9 |
+
class ExtractFeatures(BaseEstimator, TransformerMixin):
|
| 10 |
+
def __init__(self, window_length, window_step_size, data_frequency, selected_domains=None, include_magnitude=False, label_columns=None):
|
| 11 |
+
self.window_length = window_length
|
| 12 |
+
self.window_step_size = window_step_size
|
| 13 |
+
self.data_frequency = data_frequency
|
| 14 |
+
self.selected_domains = selected_domains
|
| 15 |
+
self.include_magnitude = include_magnitude
|
| 16 |
+
self.label_columns = label_columns #if label_columns else ["arousal", "valence"] # Default to arousal and valence if not specified
|
| 17 |
+
|
| 18 |
+
def fit(self, X, y=None):
|
| 19 |
+
return self
|
| 20 |
+
|
| 21 |
+
def transform(self, X):
|
| 22 |
+
features_list = []
|
| 23 |
+
|
| 24 |
+
if 'groupid' in X.columns: # Check for groupid column
|
| 25 |
+
for groupid in X['groupid'].unique(): # Iterate over unique group IDs
|
| 26 |
+
temp = X[X['groupid'] == groupid] # Filter rows by group ID
|
| 27 |
+
temp_ex = temp[['accel_time', 'x', 'y', 'z']].copy() # Keep only the necessary columns (accel_time can be removed if unused)
|
| 28 |
+
windows = self._window_data(temp_ex[['x', 'y', 'z']]) # Create windows of data
|
| 29 |
+
|
| 30 |
+
for window in windows:
|
| 31 |
+
features = self._extract_features_from_window(window) # Extract features from each window
|
| 32 |
+
features['groupid'] = groupid # Add groupid to the features
|
| 33 |
+
|
| 34 |
+
# Dynamically add emotion labels to the features
|
| 35 |
+
for label in self.label_columns:
|
| 36 |
+
features[label] = temp[label].iloc[0]
|
| 37 |
+
|
| 38 |
+
features_list.append(pd.DataFrame([features])) # Convert dictionary to DataFrame
|
| 39 |
+
|
| 40 |
+
else: # In case there's no groupid, calculate features without it
|
| 41 |
+
windows = self._window_data(X[['x', 'y', 'z']])
|
| 42 |
+
for window in windows:
|
| 43 |
+
features = self._extract_features_from_window(window)
|
| 44 |
+
features_list.append(pd.DataFrame([features]))
|
| 45 |
+
|
| 46 |
+
all_features = pd.concat(features_list, ignore_index=True)
|
| 47 |
+
|
| 48 |
+
# Export features to CSV
|
| 49 |
+
window_length_str = str(self.window_length)
|
| 50 |
+
window_step_size_str = str(self.window_step_size)
|
| 51 |
+
if self.selected_domains is None: # All features calculated if domains are not selected
|
| 52 |
+
domain_str = "all_features"
|
| 53 |
+
else:
|
| 54 |
+
domain_str = "_".join(self.selected_domains)
|
| 55 |
+
file_name = f"features_window_{window_length_str}_step_{window_step_size_str}_{domain_str}.csv"
|
| 56 |
+
all_features.to_csv(file_name, index=False)
|
| 57 |
+
|
| 58 |
+
print("All features extracted successfully.")
|
| 59 |
+
return all_features
|
| 60 |
+
|
| 61 |
+
# Time Domain Features
|
| 62 |
+
def _calculate_magnitude(self, window):
|
| 63 |
+
return np.sqrt(window[:, 0]**2 + window[:, 1]**2 + window[:, 2]**2)
|
| 64 |
+
|
| 65 |
+
def _window_data(self, data): # Function to create windows of the data
|
| 66 |
+
window_samples = int(self.window_length * self.data_frequency) # Number of samples in each window 60sec * 25Hz = 1500 samples
|
| 67 |
+
step_samples = int(self.window_step_size * self.data_frequency) # Number of samples to move the window
|
| 68 |
+
windows = [data[i:i + window_samples] for i in range(0, len(data) - window_samples + 1, step_samples)] # Create windows
|
| 69 |
+
return np.array(windows)
|
| 70 |
+
|
| 71 |
+
def _extract_features_from_window(self, window): #DONE Mehrere domains gleichzeitig berechnen
|
| 72 |
+
all_features = {}
|
| 73 |
+
|
| 74 |
+
if self.selected_domains is None or 'time_domain' in self.selected_domains:
|
| 75 |
+
all_features.update(self._extract_time_domain_features(window))
|
| 76 |
+
|
| 77 |
+
if self.selected_domains is None or 'spatial' in self.selected_domains:
|
| 78 |
+
all_features.update(self._extract_spatial_features(window))
|
| 79 |
+
|
| 80 |
+
if self.selected_domains is None or 'frequency' in self.selected_domains:
|
| 81 |
+
all_features.update(self._extract_frequency_domain_features(window))
|
| 82 |
+
|
| 83 |
+
if self.selected_domains is None or 'statistical' in self.selected_domains:
|
| 84 |
+
all_features.update(self._extract_statistical_features(window))
|
| 85 |
+
|
| 86 |
+
if self.selected_domains is None or 'wavelet' in self.selected_domains:
|
| 87 |
+
all_features.update(self._extract_wavelet_features(window))
|
| 88 |
+
|
| 89 |
+
return all_features
|
| 90 |
+
|
| 91 |
+
def _extract_time_domain_features(self, window):
|
| 92 |
+
features = {
|
| 93 |
+
'mean_x': np.mean(window[:, 0]),
|
| 94 |
+
'mean_y': np.mean(window[:, 1]),
|
| 95 |
+
'mean_z': np.mean(window[:, 2]),
|
| 96 |
+
'std_x': np.std(window[:, 0]),
|
| 97 |
+
'std_y': np.std(window[:, 1]),
|
| 98 |
+
'std_z': np.std(window[:, 2]),
|
| 99 |
+
'variance_x': np.var(window[:, 0]),
|
| 100 |
+
'variance_y': np.var(window[:, 1]),
|
| 101 |
+
'variance_z': np.var(window[:, 2]),
|
| 102 |
+
'rms_x': np.sqrt(np.mean(window[:, 0]**2)),
|
| 103 |
+
'rms_y': np.sqrt(np.mean(window[:, 1]**2)),
|
| 104 |
+
'rms_z': np.sqrt(np.mean(window[:, 2]**2)),
|
| 105 |
+
'max_x': np.max(window[:, 0]),
|
| 106 |
+
'max_y': np.max(window[:, 1]),
|
| 107 |
+
'max_z': np.max(window[:, 2]),
|
| 108 |
+
'min_x': np.min(window[:, 0]),
|
| 109 |
+
'min_y': np.min(window[:, 1]),
|
| 110 |
+
'min_z': np.min(window[:, 2]),
|
| 111 |
+
'peak_to_peak_x': np.ptp(window[:, 0]),
|
| 112 |
+
'peak_to_peak_y': np.ptp(window[:, 1]),
|
| 113 |
+
'peak_to_peak_z': np.ptp(window[:, 2]),
|
| 114 |
+
'skewness_x': pd.Series(window[:, 0]).skew(),
|
| 115 |
+
'skewness_y': pd.Series(window[:, 1]).skew(),
|
| 116 |
+
'skewness_z': pd.Series(window[:, 2]).skew(),
|
| 117 |
+
'kurtosis_x': pd.Series(window[:, 0]).kurt(),
|
| 118 |
+
'kurtosis_y': pd.Series(window[:, 1]).kurt(),
|
| 119 |
+
'kurtosis_z': pd.Series(window[:, 2]).kurt(),
|
| 120 |
+
'zero_crossing_rate_x': np.sum(np.diff(np.sign(window[:, 0])) != 0),
|
| 121 |
+
'zero_crossing_rate_y': np.sum(np.diff(np.sign(window[:, 1])) != 0),
|
| 122 |
+
'zero_crossing_rate_z': np.sum(np.diff(np.sign(window[:, 2])) != 0),
|
| 123 |
+
'sma' : np.sum(np.abs(window[:, 0])) + np.sum(np.abs(window[:, 1])) + np.sum(np.abs(window[:, 2])), #Signal Magnitude Area
|
| 124 |
+
}
|
| 125 |
+
# print(f"Time domain features extracted successfully.")
|
| 126 |
+
|
| 127 |
+
# Additional features for Magnitude (xyz in one vector)
|
| 128 |
+
if self.include_magnitude:
|
| 129 |
+
magnitude = self._calculate_magnitude(window)
|
| 130 |
+
features['mean_magnitude'] = np.mean(magnitude)
|
| 131 |
+
features['std_magnitude'] = np.std(magnitude)
|
| 132 |
+
features['variance_magnitude'] = np.var(magnitude)
|
| 133 |
+
features['rms_magnitude'] = np.sqrt(np.mean(magnitude**2))
|
| 134 |
+
features['max_magnitude'] = np.max(magnitude)
|
| 135 |
+
features['min_magnitude'] = np.min(magnitude)
|
| 136 |
+
features['peak_to_peak_magnitude'] = np.ptp(magnitude)
|
| 137 |
+
features['skewness_magnitude'] = pd.Series(magnitude).skew()
|
| 138 |
+
features['kurtosis_magnitude'] = pd.Series(magnitude).kurt()
|
| 139 |
+
features['zero_crossing_rate_magnitude'] = np.sum(np.diff(np.sign(magnitude)) != 0)
|
| 140 |
+
# print(f"Additional time domain features for magnitude extracted successfully.")
|
| 141 |
+
|
| 142 |
+
return features
|
| 143 |
+
|
| 144 |
+
# Spatial Features
|
| 145 |
+
def _extract_spatial_features(self, window):
|
| 146 |
+
features = {}
|
| 147 |
+
|
| 148 |
+
# Euclidean Norm (Magnitude)
|
| 149 |
+
magnitude = self._calculate_magnitude(window)
|
| 150 |
+
features['euclidean_norm'] = np.mean(magnitude) # or np.linalg.norm for each window
|
| 151 |
+
|
| 152 |
+
# Tilt Angles (Pitch and Roll)
|
| 153 |
+
pitch = np.arctan2(window[:, 1], np.sqrt(window[:, 0]**2 + window[:, 2]**2)) * (180 / np.pi)
|
| 154 |
+
roll = np.arctan2(window[:, 0], np.sqrt(window[:, 1]**2 + window[:, 2]**2)) * (180 / np.pi)
|
| 155 |
+
features['mean_pitch'] = np.mean(pitch)
|
| 156 |
+
features['mean_roll'] = np.mean(roll)
|
| 157 |
+
|
| 158 |
+
# Correlation between Axes
|
| 159 |
+
features['correlation_xy'] = np.corrcoef(window[:, 0], window[:, 1])[0, 1]
|
| 160 |
+
features['correlation_xz'] = np.corrcoef(window[:, 0], window[:, 2])[0, 1]
|
| 161 |
+
features['correlation_yz'] = np.corrcoef(window[:, 1], window[:, 2])[0, 1]
|
| 162 |
+
|
| 163 |
+
# print(f"Spatial features extracted successfully.")
|
| 164 |
+
return features
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# Frequency Domain Features
|
| 169 |
+
def _extract_frequency_domain_features(self, window):
|
| 170 |
+
n = len(window)
|
| 171 |
+
freq_values = np.fft.fftfreq(n, d=1/self.data_frequency)[:n // 2]
|
| 172 |
+
fft_values = fft(window, axis=0)
|
| 173 |
+
fft_magnitude = np.abs(fft_values)[:n // 2]
|
| 174 |
+
|
| 175 |
+
features = {}
|
| 176 |
+
|
| 177 |
+
# Spectral Entropy
|
| 178 |
+
def spectral_entropy(signal):
|
| 179 |
+
psd = np.square(signal)
|
| 180 |
+
psd_norm = psd / np.sum(psd)
|
| 181 |
+
return -np.sum(psd_norm * np.log(psd_norm + 1e-10))
|
| 182 |
+
|
| 183 |
+
for i, axis in enumerate(['x', 'y', 'z']):
|
| 184 |
+
# Dominant Frequency
|
| 185 |
+
dominant_frequency = freq_values[np.argmax(fft_magnitude[:, i])]
|
| 186 |
+
features[f'dominant_frequency_{axis}'] = dominant_frequency
|
| 187 |
+
|
| 188 |
+
# Spectral Entropy
|
| 189 |
+
entropy = spectral_entropy(fft_magnitude[:, i])
|
| 190 |
+
features[f'spectral_entropy_{axis}'] = entropy
|
| 191 |
+
|
| 192 |
+
# Power Spectral Density (PSD) and Energy
|
| 193 |
+
f, psd_values = welch(window[:, i], fs=self.data_frequency, nperseg=n)
|
| 194 |
+
features[f'psd_mean_{axis}'] = np.mean(psd_values)
|
| 195 |
+
features[f'energy_{axis}'] = np.sum(psd_values**2)
|
| 196 |
+
|
| 197 |
+
# Bandwidth (frequency range containing significant portion of the energy)
|
| 198 |
+
cumulative_energy = np.cumsum(psd_values)
|
| 199 |
+
total_energy = cumulative_energy[-1]
|
| 200 |
+
low_cutoff_idx = np.argmax(cumulative_energy > 0.1 * total_energy)
|
| 201 |
+
high_cutoff_idx = np.argmax(cumulative_energy > 0.9 * total_energy)
|
| 202 |
+
bandwidth = f[high_cutoff_idx] - f[low_cutoff_idx]
|
| 203 |
+
features[f'bandwidth_{axis}'] = bandwidth
|
| 204 |
+
|
| 205 |
+
# Spectral Centroid (Center of mass of the spectrum)
|
| 206 |
+
spectral_centroid = np.sum(f * psd_values) / np.sum(psd_values)
|
| 207 |
+
features[f'spectral_centroid_{axis}'] = spectral_centroid
|
| 208 |
+
|
| 209 |
+
if self.include_magnitude:
|
| 210 |
+
# Magnitude-based Frequency Domain Features
|
| 211 |
+
magnitude = self._calculate_magnitude(window)
|
| 212 |
+
fft_magnitude_mag = np.abs(fft(magnitude))[:n // 2]
|
| 213 |
+
|
| 214 |
+
# Dominant Frequency for Magnitude
|
| 215 |
+
features['dominant_frequency_magnitude'] = freq_values[np.argmax(fft_magnitude_mag)]
|
| 216 |
+
|
| 217 |
+
# Spectral Entropy for Magnitude
|
| 218 |
+
features['spectral_entropy_magnitude'] = spectral_entropy(fft_magnitude_mag)
|
| 219 |
+
|
| 220 |
+
# Power Spectral Density and Energy for Magnitude
|
| 221 |
+
f, psd_values_mag = welch(magnitude, fs=self.data_frequency, nperseg=n)
|
| 222 |
+
features['psd_mean_magnitude'] = np.mean(psd_values_mag)
|
| 223 |
+
features['energy_magnitude'] = np.sum(psd_values_mag**2)
|
| 224 |
+
|
| 225 |
+
# Bandwidth for Magnitude
|
| 226 |
+
cumulative_energy_mag = np.cumsum(psd_values_mag)
|
| 227 |
+
total_energy_mag = cumulative_energy_mag[-1]
|
| 228 |
+
low_cutoff_idx_mag = np.argmax(cumulative_energy_mag > 0.1 * total_energy_mag)
|
| 229 |
+
high_cutoff_idx_mag = np.argmax(cumulative_energy_mag > 0.9 * total_energy_mag)
|
| 230 |
+
bandwidth_mag = f[high_cutoff_idx_mag] - f[low_cutoff_idx_mag]
|
| 231 |
+
features['bandwidth_magnitude'] = bandwidth_mag
|
| 232 |
+
|
| 233 |
+
# Spectral Centroid for Magnitude
|
| 234 |
+
features['spectral_centroid_magnitude'] = np.sum(f * psd_values_mag) / np.sum(psd_values_mag)
|
| 235 |
+
|
| 236 |
+
# print(f"Frequency domain features extracted successfully.")
|
| 237 |
+
return features
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def _extract_statistical_features(self, window):
|
| 241 |
+
features = {
|
| 242 |
+
'25th_percentile_x': np.percentile(window[:, 0], 25),
|
| 243 |
+
'25th_percentile_y': np.percentile(window[:, 1], 25),
|
| 244 |
+
'25th_percentile_z': np.percentile(window[:, 2], 25),
|
| 245 |
+
'75th_percentile_x': np.percentile(window[:, 0], 75),
|
| 246 |
+
'75th_percentile_y': np.percentile(window[:, 1], 75),
|
| 247 |
+
'75th_percentile_z': np.percentile(window[:, 2], 75),
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
if self.include_magnitude:
|
| 251 |
+
magnitude = self._calculate_magnitude(window)
|
| 252 |
+
features['25th_percentile_magnitude'] = np.percentile(magnitude, 25)
|
| 253 |
+
features['75th_percentile_magnitude'] = np.percentile(magnitude, 75)
|
| 254 |
+
|
| 255 |
+
# print(f"Statistical features extracted successfully.")
|
| 256 |
+
return features
|
| 257 |
+
|
| 258 |
+
def _extract_wavelet_features(self, window, wavelet='db1'):
|
| 259 |
+
coeffs = pywt.wavedec(window, wavelet, axis=0, level=3)
|
| 260 |
+
features = {
|
| 261 |
+
'wavelet_energy_approx_x': np.sum(coeffs[0][:, 0]**2),
|
| 262 |
+
'wavelet_energy_approx_y': np.sum(coeffs[0][:, 1]**2),
|
| 263 |
+
'wavelet_energy_approx_z': np.sum(coeffs[0][:, 2]**2),
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
if self.include_magnitude:
|
| 267 |
+
magnitude = self._calculate_magnitude(window)
|
| 268 |
+
coeffs_magnitude = pywt.wavedec(magnitude, wavelet, level=3)
|
| 269 |
+
features['wavelet_energy_approx_magnitude'] = np.sum(coeffs_magnitude[0]**2)
|
| 270 |
+
|
| 271 |
+
# print(f"Wavelet features extracted successfully.")
|
| 272 |
+
return features
|
pipeline_classes/import_data.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from _config import config
|
| 5 |
+
|
| 6 |
+
class ImportData(BaseEstimator, TransformerMixin):
|
| 7 |
+
def __init__(self, use_accel=True, use_reports=True, use_combined=False, use_features=False):
|
| 8 |
+
self.use_accel = use_accel
|
| 9 |
+
self.use_reports = use_reports
|
| 10 |
+
self.use_combined = use_combined
|
| 11 |
+
self.use_features = use_features
|
| 12 |
+
# Dynamically assign file paths based on usage
|
| 13 |
+
self.accel_path = config["accel_path"] if use_accel else None
|
| 14 |
+
self.reports_path = config["reports_path"] if use_reports else None
|
| 15 |
+
self.combined_data_path = config["combined_data_path"] if use_combined else None
|
| 16 |
+
self.features_data_path = config["features_data_path"] if use_features else None
|
| 17 |
+
|
| 18 |
+
def fit(self, X, y=None):
|
| 19 |
+
return self
|
| 20 |
+
|
| 21 |
+
def transform(self, X):
|
| 22 |
+
if self.features_data_path: # If the path to features data is provided
|
| 23 |
+
# Load the features dataframe
|
| 24 |
+
features_df = pd.read_csv(self.features_data_path)
|
| 25 |
+
print('Features dataframe imported successfully.')
|
| 26 |
+
return features_df
|
| 27 |
+
|
| 28 |
+
elif self.combined_data_path: # If the path to combined data is provided
|
| 29 |
+
# Load the combined dataframe
|
| 30 |
+
combined_df = pd.read_csv(self.combined_data_path)
|
| 31 |
+
print('Combined dataframe imported successfully.')
|
| 32 |
+
return combined_df
|
| 33 |
+
|
| 34 |
+
else: # Otherwise, load the raw accelerometer and reports data
|
| 35 |
+
if not self.accel_path:
|
| 36 |
+
raise ValueError("accel_path needs to be provided if combined_data_path and features_data_path are not given.")
|
| 37 |
+
|
| 38 |
+
# Load accelerometer data
|
| 39 |
+
raw_acceleration_data = pd.read_csv(self.accel_path)
|
| 40 |
+
df_accel = raw_acceleration_data.copy()
|
| 41 |
+
|
| 42 |
+
if self.reports_path:
|
| 43 |
+
# Load self-reports data if provided
|
| 44 |
+
raw_selfreports_data = pd.read_csv(self.reports_path)
|
| 45 |
+
df_reports = raw_selfreports_data.copy()
|
| 46 |
+
print('Raw data (accelerometer and self-reports) imported successfully.')
|
| 47 |
+
return df_reports, df_accel
|
| 48 |
+
else:
|
| 49 |
+
# Only accelerometer data provided
|
| 50 |
+
print('Raw accelerometer data imported successfully.')
|
| 51 |
+
return df_accel
|
pipeline_classes/lowpassfilter.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from scipy.signal import butter, filtfilt
|
| 5 |
+
|
| 6 |
+
class LowPassFilter(BaseEstimator, TransformerMixin):
|
| 7 |
+
def __init__(self, cutoff_frequency, sampling_rate, order):
|
| 8 |
+
"""
|
| 9 |
+
Initialize the LowPassFilter class.
|
| 10 |
+
|
| 11 |
+
Parameters:
|
| 12 |
+
- cutoff_frequency: The cutoff frequency for the low-pass filter (default: 5 Hz).
|
| 13 |
+
- sampling_rate: The sampling rate of the accelerometer data (default: 25 Hz).
|
| 14 |
+
- order: The order of the filter (default: 4).
|
| 15 |
+
"""
|
| 16 |
+
self.cutoff_frequency = cutoff_frequency
|
| 17 |
+
self.sampling_rate = sampling_rate
|
| 18 |
+
self.order = order
|
| 19 |
+
|
| 20 |
+
def _butter_lowpass_filter(self, data):
|
| 21 |
+
"""
|
| 22 |
+
Apply a Butterworth low-pass filter to the data.
|
| 23 |
+
|
| 24 |
+
Parameters:
|
| 25 |
+
- data: A NumPy array containing the accelerometer data to be filtered.
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
- A filtered NumPy array.
|
| 29 |
+
"""
|
| 30 |
+
nyquist = 0.5 * self.sampling_rate
|
| 31 |
+
normalized_cutoff = self.cutoff_frequency / nyquist
|
| 32 |
+
b, a = butter(self.order, normalized_cutoff, btype='low', analog=False)
|
| 33 |
+
filtered_data = filtfilt(b, a, data, axis=0)
|
| 34 |
+
return filtered_data
|
| 35 |
+
|
| 36 |
+
def fit(self, X, y=None):
|
| 37 |
+
return self
|
| 38 |
+
|
| 39 |
+
def transform(self, X):
|
| 40 |
+
"""
|
| 41 |
+
Apply the low-pass filter to the accelerometer data.
|
| 42 |
+
|
| 43 |
+
Parameters:
|
| 44 |
+
- X: A DataFrame with 'x', 'y', and 'z' columns representing the accelerometer data.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
- The DataFrame with filtered 'x', 'y', and 'z' columns.
|
| 48 |
+
"""
|
| 49 |
+
if 'x' in X.columns and 'y' in X.columns and 'z' in X.columns:
|
| 50 |
+
X[['x', 'y', 'z']] = self._butter_lowpass_filter(X[['x', 'y', 'z']].values)
|
| 51 |
+
print("Low-pass filter applied successfully.")
|
| 52 |
+
else:
|
| 53 |
+
raise ValueError("The input DataFrame must contain 'x', 'y', and 'z' columns.")
|
| 54 |
+
|
| 55 |
+
return X
|
pipeline_classes/pcahandler.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from sklearn.decomposition import PCA
|
| 5 |
+
from _config import config
|
| 6 |
+
|
| 7 |
+
class PCAHandler(BaseEstimator, TransformerMixin):
|
| 8 |
+
def __init__(self, apply_pca=False, variance=0.95):
|
| 9 |
+
self.apply_pca = apply_pca
|
| 10 |
+
self.variance = variance
|
| 11 |
+
self.pca = None
|
| 12 |
+
|
| 13 |
+
def fit(self, X, y=None):
|
| 14 |
+
if self.apply_pca:
|
| 15 |
+
self.pca = PCA(n_components=self.variance)
|
| 16 |
+
self.pca.fit(X)
|
| 17 |
+
return self
|
| 18 |
+
|
| 19 |
+
def transform(self, X):
|
| 20 |
+
if self.apply_pca and self.pca:
|
| 21 |
+
X_transformed = self.pca.transform(X)
|
| 22 |
+
return pd.DataFrame(X_transformed, index=X.index)
|
| 23 |
+
|
| 24 |
+
return X
|
pipeline_classes/scale_xyzdata.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from sklearn.preprocessing import StandardScaler, MinMaxScaler
|
| 5 |
+
from _config import config
|
| 6 |
+
|
| 7 |
+
class ScaleXYZData(BaseEstimator, TransformerMixin):
|
| 8 |
+
def __init__(self, scaler_type='standard'):
|
| 9 |
+
self.scaler_type = scaler_type
|
| 10 |
+
|
| 11 |
+
def fit(self, X, y=None):
|
| 12 |
+
return self
|
| 13 |
+
|
| 14 |
+
def transform(self, X):
|
| 15 |
+
columns_to_scale = ['x', 'y', 'z']
|
| 16 |
+
if self.scaler_type == 'standard': # Scale the columns using StandardScaler or MinMaxScaler
|
| 17 |
+
scaler = StandardScaler()
|
| 18 |
+
elif self.scaler_type == 'minmax':
|
| 19 |
+
scaler = MinMaxScaler()
|
| 20 |
+
elif self.scaler_type == 'none':
|
| 21 |
+
return X # Return the DataFrame without scaling
|
| 22 |
+
else:
|
| 23 |
+
raise ValueError("Invalid scaler_type. Expected 'standard' or 'minmax'.") # Raise an error if scaler_type is invalid
|
| 24 |
+
scaled_columns = scaler.fit_transform(X[columns_to_scale])
|
| 25 |
+
scaled_df = pd.DataFrame(scaled_columns, columns=columns_to_scale, index=X.index)
|
| 26 |
+
X[columns_to_scale] = scaled_df
|
| 27 |
+
print("Data scaled successfully.")
|
| 28 |
+
return X
|
pipeline_classes/train_model.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
| 4 |
+
from sklearn.model_selection import StratifiedGroupKFold
|
| 5 |
+
from skopt import BayesSearchCV
|
| 6 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 7 |
+
from sklearn.svm import SVC
|
| 8 |
+
from xgboost import XGBClassifier
|
| 9 |
+
import joblib
|
| 10 |
+
from skopt.space import Real, Integer, Categorical
|
| 11 |
+
from sklearn.metrics import classification_report, accuracy_score
|
| 12 |
+
import json
|
| 13 |
+
from sklearn.preprocessing import LabelEncoder
|
| 14 |
+
from _config import config
|
| 15 |
+
|
| 16 |
+
class TrainModel(BaseEstimator, TransformerMixin):
|
| 17 |
+
def __init__(self, config):
|
| 18 |
+
self.config = config
|
| 19 |
+
self.target = config.get("target_label", None) # User-defined target label in config
|
| 20 |
+
self.label_encoder = LabelEncoder()
|
| 21 |
+
self.selected_domains = self.config.get("selected_domains", "All domains") # Default to all domains if None
|
| 22 |
+
|
| 23 |
+
if not self.target:
|
| 24 |
+
raise ValueError("No target label specified in the config. Please set 'target_label'.")
|
| 25 |
+
|
| 26 |
+
def get_default_param_space(self, classifier):
|
| 27 |
+
""" Returns the default hyperparameter space for a given classifier. """
|
| 28 |
+
if classifier == 'xgboost':
|
| 29 |
+
return {
|
| 30 |
+
'learning_rate': Real(0.01, 0.3, prior='log-uniform'),
|
| 31 |
+
'n_estimators': Integer(100, 1000),
|
| 32 |
+
'max_depth': Integer(3, 10),
|
| 33 |
+
'min_child_weight': (1, 10),
|
| 34 |
+
'subsample': (0.5, 1.0),
|
| 35 |
+
'colsample_bytree': (0.5, 1.0),
|
| 36 |
+
'gamma': (0, 10),
|
| 37 |
+
'reg_alpha': (0, 10),
|
| 38 |
+
'reg_lambda': (0, 10),
|
| 39 |
+
}
|
| 40 |
+
elif classifier == 'svm':
|
| 41 |
+
return {
|
| 42 |
+
'C': Real(0.1, 10, prior='log-uniform'),
|
| 43 |
+
'kernel': Categorical(['linear', 'rbf'])
|
| 44 |
+
}
|
| 45 |
+
elif classifier == 'randomforest':
|
| 46 |
+
return {
|
| 47 |
+
'n_estimators': Integer(100, 1000),
|
| 48 |
+
'max_depth': Integer(3, 10)
|
| 49 |
+
}
|
| 50 |
+
else:
|
| 51 |
+
raise ValueError(f"Unsupported classifier type: {classifier}")
|
| 52 |
+
|
| 53 |
+
def fit(self, X, y=None):
|
| 54 |
+
# Ensure the target column exists in the dataset
|
| 55 |
+
if self.target not in X.columns:
|
| 56 |
+
raise ValueError(f"Target label '{self.target}' not found in the dataset.")
|
| 57 |
+
|
| 58 |
+
# Fit the label encoder on the target column
|
| 59 |
+
print(f"Encoding the target labels for '{self.target}'...")
|
| 60 |
+
self.label_encoder.fit(X[self.target])
|
| 61 |
+
|
| 62 |
+
# Print the mapping between original labels and encoded labels
|
| 63 |
+
original_labels = list(self.label_encoder.classes_)
|
| 64 |
+
encoded_labels = list(range(len(original_labels)))
|
| 65 |
+
label_mapping = dict(zip(encoded_labels, original_labels))
|
| 66 |
+
print(f"Label encoding complete. Mapping: {label_mapping}")
|
| 67 |
+
|
| 68 |
+
# Transform the target column and add it as 'encoded_target'
|
| 69 |
+
X['encoded_target'] = self.label_encoder.transform(X[self.target])
|
| 70 |
+
|
| 71 |
+
# Value counts for the encoded target
|
| 72 |
+
value_counts = X['encoded_target'].value_counts().to_dict()
|
| 73 |
+
print(f"Value counts for encoded target: {value_counts}")
|
| 74 |
+
|
| 75 |
+
# Pop unnecessary columns (groupid, emotion labels not being used, etc.)
|
| 76 |
+
groups = X.pop('groupid')
|
| 77 |
+
# Pop the label columns which aren't used
|
| 78 |
+
for label in self.config["label_columns"]:
|
| 79 |
+
X.pop(label)
|
| 80 |
+
|
| 81 |
+
# Pop the encoded target as Y
|
| 82 |
+
y = X.pop('encoded_target')
|
| 83 |
+
|
| 84 |
+
# Store the feature names for later use
|
| 85 |
+
feature_names = X.columns.tolist()
|
| 86 |
+
|
| 87 |
+
# Choose classifier
|
| 88 |
+
classifier = self.config['classifier']
|
| 89 |
+
if classifier == 'xgboost':
|
| 90 |
+
model = XGBClassifier(objective='multi:softmax', random_state=42)
|
| 91 |
+
elif classifier == 'svm':
|
| 92 |
+
model = SVC(probability=True)
|
| 93 |
+
elif classifier == 'randomforest':
|
| 94 |
+
model = RandomForestClassifier(random_state=42)
|
| 95 |
+
else:
|
| 96 |
+
raise ValueError(f"Unsupported classifier type: {classifier}")
|
| 97 |
+
|
| 98 |
+
print(f"Training the model using {classifier}...")
|
| 99 |
+
|
| 100 |
+
# Use user-defined param_space if provided, otherwise use default
|
| 101 |
+
print(f"Classifier: {classifier}")
|
| 102 |
+
default_param_space = self.get_default_param_space(classifier)
|
| 103 |
+
param_space = self.config.get("param_space") or default_param_space
|
| 104 |
+
|
| 105 |
+
# Hyperparameter tuning using Bayesian optimization
|
| 106 |
+
sgkf = StratifiedGroupKFold(n_splits=self.config['n_splits'])
|
| 107 |
+
print(f"Parameter space being used: {param_space}")
|
| 108 |
+
if param_space is None:
|
| 109 |
+
raise ValueError("Parameter space cannot be None. Please check the classifier configuration.")
|
| 110 |
+
|
| 111 |
+
opt = BayesSearchCV(
|
| 112 |
+
estimator=model,
|
| 113 |
+
search_spaces=param_space,
|
| 114 |
+
cv=sgkf,
|
| 115 |
+
n_iter=self.config['n_iter'],
|
| 116 |
+
n_jobs=self.config['n_jobs'],
|
| 117 |
+
n_points=self.config['n_points'],
|
| 118 |
+
verbose=1,
|
| 119 |
+
scoring='accuracy'
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
print("Hyperparameter tuning in progress...")
|
| 123 |
+
print(X.describe(),X.columns)
|
| 124 |
+
print(f"stop")
|
| 125 |
+
|
| 126 |
+
# Fit the model using the encoded target
|
| 127 |
+
opt.fit(X, y, groups=groups)
|
| 128 |
+
self.best_model = opt.best_estimator_
|
| 129 |
+
print(f"Best parameters found: {opt.best_params_}")
|
| 130 |
+
|
| 131 |
+
# Print classification metrics
|
| 132 |
+
y_pred = self.best_model.predict(X)
|
| 133 |
+
accuracy = accuracy_score(y, y_pred)
|
| 134 |
+
report = classification_report(y, y_pred, target_names=self.label_encoder.classes_, output_dict=True)
|
| 135 |
+
|
| 136 |
+
# Save classification report
|
| 137 |
+
classification_report_json = report
|
| 138 |
+
with open(f'classification_report_{self.target}.json', 'w') as f:
|
| 139 |
+
json.dump(classification_report_json, f, indent=4)
|
| 140 |
+
|
| 141 |
+
print(f"Accuracy: {accuracy}")
|
| 142 |
+
print(f"Classification Report:\n{report}")
|
| 143 |
+
|
| 144 |
+
# Save the best model with the target label in the file name
|
| 145 |
+
model_name = f"{classifier}_best_model_{self.target}.pkl"
|
| 146 |
+
joblib.dump(self.best_model, model_name)
|
| 147 |
+
print("Model saved successfully.")
|
| 148 |
+
|
| 149 |
+
# Save model metadata
|
| 150 |
+
model_metadata = {
|
| 151 |
+
"best_params": opt.best_params_,
|
| 152 |
+
"accuracy": accuracy,
|
| 153 |
+
"classification_report": classification_report_json,
|
| 154 |
+
"label_mapping": label_mapping,
|
| 155 |
+
"model_name": model_name,
|
| 156 |
+
"value_counts": value_counts,
|
| 157 |
+
"selected_domains": self.selected_domains,
|
| 158 |
+
"include_magnitude": self.config.get("include_magnitude", True)
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
if hasattr(self.best_model, "feature_importances_"):
|
| 162 |
+
feature_importances = self.best_model.feature_importances_
|
| 163 |
+
# Convert feature importances to native Python floats
|
| 164 |
+
feature_importance_dict = {feature: float(importance) for feature, importance in zip(feature_names, feature_importances)}
|
| 165 |
+
model_metadata["feature_importances"] = feature_importance_dict
|
| 166 |
+
print("Feature Importances:")
|
| 167 |
+
for feature, importance in feature_importance_dict.items():
|
| 168 |
+
print(f"{feature}: {importance:.4f}")
|
| 169 |
+
|
| 170 |
+
# Save metadata with the target name in the file name
|
| 171 |
+
metadata_file = f"{classifier}_model_metadata_{self.target}.json"
|
| 172 |
+
with open(metadata_file, "w") as f:
|
| 173 |
+
json.dump(model_metadata, f, indent=4)
|
| 174 |
+
print(f"Model metadata saved to {metadata_file}.")
|
| 175 |
+
|
| 176 |
+
return self
|
| 177 |
+
|
| 178 |
+
def transform(self, X):
|
| 179 |
+
return X # Placeholder for transform step (not needed for training)
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pandas>=1.3.0
|
| 2 |
+
numpy>=1.21.0
|
| 3 |
+
scikit-learn>=1.0.0
|
| 4 |
+
scikit-optimize>=0.9.0
|
| 5 |
+
xgboost>=1.5.0
|
| 6 |
+
joblib>=1.0.0
|
| 7 |
+
PyWavelets>=1.2.0
|
| 8 |
+
scipy>=1.7.0
|
single_participant_positive_high.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|