markdown stringlengths 0 1.02M | code stringlengths 0 832k | output stringlengths 0 1.02M | license stringlengths 3 36 | path stringlengths 6 265 | repo_name stringlengths 6 127 |
|---|---|---|---|---|---|
์ด๊ธฐํ---์ด ๋
ธํธ๋ถ์์๋ ๋ฐ์ดํฐ ํด๋์ ์ถ๋ก ๋๋ ํ ๋ฆฌ๋ฅผ ์ถ๊ฐํ๊ฒ๋ ์ ์ฅ์ ๊ตฌ์กฐ๋ฅผ ๊ฐฑ์ ํฉ๋๋ค.```/lookout-equipment-demo|+-- data/| || +-- inference/| | || | |-- input/| | || | \-- output/| || +-- labelled-data/| | \-- labels.csv| || \-- training-data/| \-- expander/| |-- subsystem-01| | \-- subsystem-01.csv| || |-- subsystem-02| | \-- subsystem-02.csv| || |-- ...| || \-- subsystem-24| \-- subsystem-24.csv|+-- dataset/| |-- labels.csv| |-- tags_description.csv| |-- timeranges.txt| \-- timeseries.zip|+-- notebooks/| |-- 1_data_preparation.ipynb| |-- 2_dataset_creation.ipynb| |-- 3_model_training.ipynb| |-- 4_model_evaluation.ipynb| \-- 5_inference_scheduling.ipynb <<< ๋ณธ ๋
ธํธ๋ถ <<<|+-- utils/ |-- lookout_equipment_utils.py \-- lookoutequipment.json``` ์ํฌํธ | %%sh
pip -q install --upgrade pip
pip -q install --upgrade awscli boto3 sagemaker
aws configure add-model --service-model file://../utils/lookoutequipment.json --service-name lookoutequipment
from IPython.core.display import HTML
HTML("<script>Jupyter.notebook.kernel.restart()</script>")
import boto3
import datetime
import os
import pandas as pd
import pprint
import pyarrow as pa
import pyarrow.parquet as pq
import sagemaker
import s3fs
import sys
import time
import uuid
import warnings
# Lookout for Equipment API ํธ์ถ ๊ด๋ฆฌ๋ฅผ ์ํ Helper ํจ์
sys.path.append('../utils')
import lookout_equipment_utils as lookout | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
ํ๋ผ๋ฏธํฐ | warnings.filterwarnings('ignore')
DATA = os.path.join('..', 'data')
RAW_DATA = os.path.join('..', 'dataset')
INFER_DATA = os.path.join(DATA, 'inference')
os.makedirs(os.path.join(INFER_DATA, 'input'), exist_ok=True)
os.makedirs(os.path.join(INFER_DATA, 'output'), exist_ok=True)
ROLE_ARN = sagemaker.get_execution_role()
REGION_NAME = boto3.session.Session().region_name | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ถ๋ก ์ค์ผ์ค๋ฌ ์์ฑํ๊ธฐ---์ฝ์์ ๋ชจ๋ธ ์ธ๋ถ ์ ๋ณด ๋ถ๋ถ์ผ๋ก ์ด๋ํ๋ฉด ์ถ๋ก ์ค์ผ์ค์ด ์์ง ์์์ ํ์ธํ ์ ์์ต๋๋ค. ์ค์ผ์ค๋ฌ ์ค์ ์๋ก์ด ์ถ๋ก ์ค์ผ์ค์ ๋ง๋ค์ด ๋ณด๊ฒ ์ต๋๋ค. ํ๋ผ๋ฏธํฐ ์ผ๋ถ๋ ํ์ ์
๋ ฅ์ด์ง๋ง ํ๋ผ๋ฏธํฐ ๋ค์๋ ์ ์ฐํ๊ฒ ์ถ๊ฐ ์ค์ ํ ์ ์์ต๋๋ค. ํ๋ผ๋ฏธํฐ* ์ถ๋ก ์ ์ํด ๋ฐ์ดํฐ๋ฅผ ์
๋ก๋ํ ๋น๋๋ก `DATA_UPLOAD_FREQUENCY`๋ฅผ ์ค์ ํฉ๋๋ค. ํ์ฉ๋๋ ๊ฐ์`PT5M`,`PT10M`,`PT15M`,`PT30M`๊ณผ`PT1H`์
๋๋ค. * ์ด๊ฒ์ ์ถ๋ก ์ค์ผ์ค๋ฌ๊ฐ ์คํ๋๋ ๋น๋์ ๋ฐ์ดํฐ๊ฐ ์์ค ๋ฒํท์ ์
๋ก๋๋๋ ๋น๋์
๋๋ค. * **์ฐธ๊ณ ** : ***์
๋ก๋ ๋น๋๋ ํ๋ จ ๋ ์ ํํ ์ํ๋ง ๋น์จ๊ณผ ํธํ๋์ด์ผํฉ๋๋ค.*** *์๋ฅผ ๋ค์ด ๋ชจ๋ธ์ 30๋ถ ๊ฐ๊ฒฉ์ ๋ฆฌ์ํ๋ง์ผ๋ก ํ๋ จ์ํจ ๊ฒฝ์ฐ 5๋ถ์ ๊ฐ๋ฅํ์ง ์์ต๋๋ค. ์ถ๋ก ์ ํ๋ผ๋ฏธํฐ๋ก PT30M ๋๋ PT1H๋ฅผ ์ ํํด์ผํฉ๋๋ค.** ์ถ๋ก ๋ฐ์ดํฐ์ S3 ๋ฒํท์ผ๋ก `INFERENCE_DATA_SOURCE_BUCKET`๋ฅผ ์ค์ ํฉ๋๋ค.* ์ถ๋ก ๋ฐ์ดํฐ์ S3 ์ ๋์ฌ๋ก `INFERENCE_DATA_SOURCE_PREFIX`๋ฅผ ์ค์ ํฉ๋๋ค.* ์ถ๋ก ๊ฒฐ๊ณผ๋ฅผ ์ํ๋ S3 ๋ฒํท์ผ๋ก `INFERENCE_DATA_OUTPUT_BUCKET`๋ฅผ ์ค์ ํฉ๋๋ค.* ์ถ๋ก ๊ฒฐ๊ณผ๋ฅผ ์ํ๋ S3 ์ ๋์ฌ๋ก `INFERENCE_DATA_OUTPUT_PREFIX`๋ฅผ ์ค์ ํฉ๋๋ค.* ์ถ๋ก ํ ๋ฐ์ดํฐ๋ฅผ **์ฝ๊ณ ** ์ถ๋ก ์ถ๋ ฅ์ **์ธ** ๋ ์ฌ์ฉํ ์ญํ ๋ก `ROLE_ARN_FOR_INFERENCE`๋ฅผ ์ค์ ํฉ๋๋ค. | # ์์ฑํ๋ ค๋ ์ถ๋ก ์ค์ผ์ค๋ฌ์ ์ด๋ฆ
INFERENCE_SCHEDULER_NAME = 'lookout-demo-model-v1-scheduler'
# ๋ณธ ์ถ๋ก ์ค์ผ์ค๋ฌ๋ฅผ ์์ฑํ ๋ชจ๋ธ์ ์ด๋ฆ
MODEL_NAME_FOR_CREATING_INFERENCE_SCHEDULER = 'lookout-demo-model-v1'
# ํ์ ์
๋ ฅ ํ๋ผ๋ฏธํฐ
INFERENCE_DATA_SOURCE_BUCKET = BUCKET
INFERENCE_DATA_SOURCE_PREFIX = f'{PREFIX}/input/'
INFERENCE_DATA_OUTPUT_BUCKET = BUCKET
INFERENCE_DATA_OUTPUT_PREFIX = f'{PREFIX}/output/'
ROLE_ARN_FOR_INFERENCE = ROLE_ARN
DATA_UPLOAD_FREQUENCY = 'PT5M' | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์๋ต ๊ฐ๋ฅํ ํ๋ผ๋ฏธํฐ* ๋ฐ์ดํฐ ์
๋ก๋ํ๋๋ฐ ์ง์ฐ์ด ์์๋๋ ์๊ฐ(๋ถ)์ผ๋ก `DATA_DELAY_OFFSET_IN_MINUTES`๋ฅผ ์ค์ ํฉ๋๋ค. ์ฆ, ๋ฐ์ดํฐ ์
๋ก๋ํ๋ ์๊ฐ์ ๋ํ ๋ฒํผ์
๋๋ค.* ``INPUT_TIMEZONE_OFFSET``์ ์ค์ ํฉ๋๋ค. ํ์ฉ๋๋ ๊ฐ์ +00:00, +00:30, -01:00, ... +11:30, +12:00, -00:00, -00:30, -01:00, ... -11:30, -12:00์
๋๋ค.* `TIMESTAMP_FORMAT`์ ์ค์ ํฉ๋๋ค. ํ์ฉ๋๋ ๊ฐ์ `EPOCH`, `yyyy-MM-dd-HH-mm-ss` ๋๋ `yyyyMMddHHmmss`์
๋๋ค. ์ด๊ฒ์ ์
๋ ฅ ๋ฐ์ดํฐ ํ์ผ ๋ช
์ ์ ๋ฏธ์ฌ๋ก ๋ถ๋ ํ์์คํฌํ ํ์์
๋๋ค. ์ด๊ฒ์ Lookout Equipment์์ ์ถ๋ก ์ ์คํํ ํ์ผ์ ํ์
ํ๋ ๋ฐ ์ฌ์ฉ๋ฉ๋๋ค (๊ทธ๋ฌ๋ฏ๋ก ์ค์ผ์ค๋ฌ๊ฐ ์คํํ ํ์ผ์ ์ฐพ๊ฒ ํ๊ธฐ ์ํด ์ด์ ํ์ผ์ ์ ๊ฑฐํ ํ์๊ฐ ์์).* `COMPONENT_TIMESTAMP_DELIMITER`๋ฅผ ์ค์ ํฉ๋๋ค. ํ์ฉ๋๋ ๊ฐ์ `-`, `_` ๋๋ ` `์
๋๋ค. ์
๋ ฅ ํ์ผ ๋ช
์ ํ์์คํฌํ์์ ๊ตฌ์ฑ ์์๋ฅผ ๋ถ๋ฆฌํ ๋ ์ฌ์ฉํ๋ ๊ตฌ๋ถ์์
๋๋ค. | DATA_DELAY_OFFSET_IN_MINUTES = None
INPUT_TIMEZONE_OFFSET = '+00:00'
COMPONENT_TIMESTAMP_DELIMITER = '_'
TIMESTAMP_FORMAT = 'yyyyMMddHHmmss' | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ถ๋ก ์ค์ผ์ค๋ฌ ์์ฑํ๊ธฐCreateInferenceScheduler API๋ ์ค์ผ์ค๋ฌ๋ฅผ ์์ฑ**ํ๊ณ ** ๊ตฌ๋์ํต๋๋ค. ์ฆ, ์ฆ๊ฐ์ ์ผ๋ก ๋น์ฉ์ด ๋ฐ์ํ๊ธฐ ์์ํฉ๋๋ค. ๊ทธ๋ฌ๋ ๊ธฐ์กด ์ค์ผ์ค๋ฌ๋ฅผ ์ํ๋๋๋ก ์ค์งํ๊ฑฐ๋ ์ฌ๊ตฌ๋์ํฌ ์ ์์ต๋๋ค (์ด ๋
ธํธ๋ถ์ ๋ง์ง๋ง ๋ถ๋ถ ์ฐธ์กฐ). | scheduler = lookout.LookoutEquipmentScheduler(
scheduler_name=INFERENCE_SCHEDULER_NAME,
model_name=MODEL_NAME_FOR_CREATING_INFERENCE_SCHEDULER,
region_name=REGION_NAME
)
scheduler_params = {
'input_bucket': INFERENCE_DATA_SOURCE_BUCKET,
'input_prefix': INFERENCE_DATA_SOURCE_PREFIX,
'output_bucket': INFERENCE_DATA_OUTPUT_BUCKET,
'output_prefix': INFERENCE_DATA_OUTPUT_PREFIX,
'role_arn': ROLE_ARN_FOR_INFERENCE,
'upload_frequency': DATA_UPLOAD_FREQUENCY,
'delay_offset': DATA_DELAY_OFFSET_IN_MINUTES,
'timezone_offset': INPUT_TIMEZONE_OFFSET,
'component_delimiter': COMPONENT_TIMESTAMP_DELIMITER,
'timestamp_format': TIMESTAMP_FORMAT
}
scheduler.set_parameters(**scheduler_params) | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ถ๋ก ๋ฐ์ดํฐ ์ค๋นํ๊ธฐ---์ค์ผ์ค๋ฌ๊ฐ ๋ชจ๋ํฐ๋งํ S3 ์
๋ ฅ ์์น์ ๋ช ๊ฐ์ง ๋ฐ์ดํฐ๋ฅผ ์ค๋นํ๊ณ ์ ์กํ๊ฒ ์ต๋๋ค. | # ์๋ณธ ์ ํธ ์ ์ฒด๋ฅผ ๋ถ๋ฌ์ค๊ฒ ์ต๋๋ค.
all_tags_fname = os.path.join(DATA, 'training-data', 'expander.parquet')
table = pq.read_table(all_tags_fname)
all_tags_df = table.to_pandas()
del table
all_tags_df.head() | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
ํ๊ทธ ์ค๋ช
์ ๋ถ๋ฌ์ต์๋ค. ๋ณธ ๋ฐ์ดํฐ์
์๋ ๋ค์ ๋ด์ฉ์ ํฌํจํ๋ ํ๊ทธ ์ค๋ช
ํ์ผ์ด ์กด์ฌํฉ๋๋ค.* `Tag`: ์ด๋ ฅ ๊ด๋ฆฌ ์์คํ
์ ๊ณ ๊ฐ์ด ๊ธฐ๋กํ ํ๊ทธ ๋ช
(์์ปจ๋ [Honeywell ํ๋ก์ธ์ค ์ด๋ ฅ ๋ฐ์ดํฐ๋ฒ ์ด์ค](https://www.honeywellprocess.com/en-US/explore/products/advanced-applications/uniformance/Pages/uniformance-phd.aspx))* `UOM`: ๊ธฐ๋กํ ์ ํธ์ ์ธก์ ๋จ์* `Subsystem`: ํด๋น ์ผ์๊ฐ ์ฐ๊ฒฐ๋ ์์ฐ ๋ถ์์ ID์ฌ๊ธฐ์์ ๊ตฌ์ฑ ์์ (์ฆ, ํ์ ์์คํ
์ด)์ List๋ฅผ ์์งํ ์ ์์ต๋๋ค. | tags_description_fname = os.path.join(RAW_DATA, 'tags_description.csv')
tags_description_df = pd.read_csv(tags_description_fname)
components = tags_description_df['Subsystem'].unique()
tags_description_df.head() | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ํ ์ถ๋ก ๋ฐ์ดํฐ์
์ ๊ตฌ์ฑํ๊ธฐ ์ํด ์๋ณธ ์๊ณ์ด ๊ฒ์ฆ ๊ธฐ๊ฐ์์ ๋ง์ง๋ง ๋ช ๋ถ์ ์ถ์ถํฉ๋๋ค. | # ์ถ์ถํ๋ ค๋ ์ํ์ค ๊ฐ์
num_sequences = 3
# ์ค์ผ์ค๋ง ๋น๋ (๋ถ): ์ด ๊ฐ์ **๋ฐ๋์**
# ๋ชจ๋ธ ํ์ต์ ์ฌ์ฉํ ๋ฆฌ์ํ๋ง ๋น์จ์ ๋ง์ถฐ ์ค์ ํด์ผ ํฉ๋๋ค.
frequency = 5
# ๊ฐ ์ํ์ค๋ฅผ ๋ฐ๋ณตํฉ๋๋ค.
start = all_tags_df.index.max() + datetime.timedelta(minutes=-frequency * (num_sequences) + 1)
for i in range(num_sequences):
end = start + datetime.timedelta(minutes=+frequency - 1)
# ์ด์ 5๋ถ ๋จ์๋ก ์๊ฐ์ ๋ฐ์ฌ๋ฆผํฉ๋๋ค.
tm = datetime.datetime.now()
tm = tm - datetime.timedelta(
minutes=tm.minute % frequency,
seconds=tm.second,
microseconds=tm.microsecond
)
tm = tm + datetime.timedelta(minutes=+frequency * (i))
tm = tm - datetime.timedelta(hours=9) # KST์ ๋ฐ๋ฅธ ์กฐ์
current_timestamp = (tm).strftime(format='%Y%m%d%H%M%S')
# ๊ฐ ์ํ์ค๋ง๋ค ๊ตฌ์ฑ ์์ ์ ์ฒด๋ฅผ ๋ฐ๋ณตํฉ๋๋ค.
print(f'Extracting data from {start} to {end}:')
new_index = None
for component in components:
# ํด๋น ๊ตฌ์ฑ ์์์ ํน์ ์๊ฐ ๋ฒ์์ ๋ํด Dataframe์ ์ถ์ถํฉ๋๋ค.
signals = list(tags_description_df.loc[(tags_description_df['Subsystem'] == component), 'Tag'])
signals_df = all_tags_df.loc[start:end, signals]
# ์ค์ผ์ค๋ฌ๊ฐ ์ถ๋ก ์ ์คํํ ์๊ฐ์ ๋ง๊ฒ๋
# ์ธ๋ฑ์ค๋ฅผ ์ฌ์ค์ ํด์ผ ํฉ๋๋ค.
if new_index is None:
new_index = pd.date_range(
start=tm,
periods=signals_df.shape[0],
freq='1min'
)
signals_df.index = new_index
signals_df.index.name = 'Timestamp'
signals_df = signals_df.reset_index()
signals_df['Timestamp'] = signals_df['Timestamp'].dt.strftime('%Y-%m-%dT%H:%M:%S.%f')
# ํด๋น ํ์ผ์ CSV ํ์์ผ๋ก ๋ด๋ณด๋
๋๋ค.
component_fname = os.path.join(INFER_DATA, 'input', f'{component}_{current_timestamp}.csv')
signals_df.to_csv(component_fname, index=None)
start = start + datetime.timedelta(minutes=+frequency)
# ์
๋ ฅ ์์น์ ์ ์ฒด ํด๋๋ฅผ S3์ ์
๋ก๋ํฉ๋๋ค.
INFERENCE_INPUT = os.path.join(INFER_DATA, 'input')
!aws s3 cp --recursive --quiet $INFERENCE_INPUT s3://$BUCKET/$PREFIX/input
# ์ด์ ๋ฐ์ดํฐ๋ฅผ ์ค๋นํ์ผ๋ฏ๋ก ๋ค์์ ์คํํ์ฌ ์ค์ผ์ค๋ฌ๋ฅผ ๋ง๋ญ๋๋ค.
create_scheduler_response = scheduler.create() | Extracting data from 2015-11-30 23:45:00 to 2015-11-30 23:49:00:
Extracting data from 2015-11-30 23:50:00 to 2015-11-30 23:54:00:
Extracting data from 2015-11-30 23:55:00 to 2015-11-30 23:59:00:
===== Polling Inference Scheduler Status =====
Scheduler Status: PENDING
Scheduler Status: RUNNING
===== End of Polling Inference Scheduler Status =====
| MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ค์ผ์ค๋ฌ๊ฐ ์คํ ์ค์ด๋ฉฐ ์ถ๋ก ๊ธฐ๋ก์ ํ์ฌ ๋น์ด ์์ต๋๋ค. ์ถ๋ก ๊ฒฐ๊ณผ ์ป๊ธฐ--- ์ถ๋ก ์คํ ๊ฒฐ๊ณผ ๋์ดํ๊ธฐ **์ค์ผ์ค๋ฌ๊ฐ ์ถ๋ก ์ ์ต์ด๋ก ์คํํ ๊ฒฝ์ฐ 5-15๋ถ ์ ๋ ๊ฑธ๋ฆฝ๋๋ค.** ๋๊ธฐ๊ฐ ๋๋๋ฉด ํ์ฌ ์ถ๋ก ์ค์ผ์ค๋ฌ์์ ListInferenceExecution API๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค. ์
๋ ฅ ํ๋ผ๋ฏธํฐ๋ก ์ค์ผ์ค๋ฌ ๋ช
๋ง ํ์ํฉ๋๋ค.์ถ๋ก ์คํ ๊ฒฐ๊ณผ๋ฅผ ์ง์ํ ๊ธฐ๊ฐ์ ์ ํํ ์ ์์ต๋๋ค. ์ง์ ํ์ง ์์ผ๋ฉด ์ถ๋ก ์ค์ผ์ค๋ฌ์ ๋ํ ๋ชจ๋ ์คํ ๊ฒฐ๊ณผ๋ค์ด ๋์ด๋ฉ๋๋ค. ์๊ฐ ๋ฒ์๋ฅผ ์ง์ ํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํฉ๋๋ค.```pythonSTART_TIME_FOR_INFERENCE_EXECUTIONS = datetime.datetime(2010,1,3,0,0,0)END_TIME_FOR_INFERENCE_EXECUTIONS = datetime.datetime(2010,1,5,0,0,0)```์ฆ, `2010-01-03 00:00:00`๋ถํฐ `2010-01-05 00:00:00`๊น์ง์ ์คํ ๊ฒฐ๊ณผ๋ค์ด ๋์ด๋ฉ๋๋ค.ํน์ ์ํ์ ์คํ ๊ฒฐ๊ณผ๋ฅผ ์ง์ํ๋๋ก ์ ํํ ์๋ ์์ต๋๋ค. ํ์ฉ๋๋ ์ํ๋ `IN_PROGRESS`, `SUCCESS`์ `FAILED`์
๋๋ค. | START_TIME_FOR_INFERENCE_EXECUTIONS = None
END_TIME_FOR_INFERENCE_EXECUTIONS = None
EXECUTION_STATUS = None
execution_summaries = []
while len(execution_summaries) == 0:
execution_summaries = scheduler.list_inference_executions(
start_time=START_TIME_FOR_INFERENCE_EXECUTIONS,
end_time=END_TIME_FOR_INFERENCE_EXECUTIONS,
execution_status=EXECUTION_STATUS
)
if len(execution_summaries) == 0:
print('WAITING FOR THE FIRST INFERENCE EXECUTION')
time.sleep(60)
else:
print('FIRST INFERENCE EXECUTED\n')
break
# execution_summaries | WAITING FOR THE FIRST INFERENCE EXECUTION
WAITING FOR THE FIRST INFERENCE EXECUTION
FIRST INFERENCE EXECUTED
| MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ค์ผ์ค๋ฌ๋ฅผ 5๋ถ๋ง๋ค ์คํํ๋๋ก ๊ตฌ์ฑํ์ต๋๋ค. ๋ช ๋ถ ํ ์ฝ์์์ ์ฒซ ๋ฒ์งธ ์คํ ๊ฒฐ๊ณผ๊ฐ ์
๋ ฅ๋ ๊ธฐ๋ก์ ์ดํด๋ณผ ์ ์์ต๋๋ค.  ์ค์ผ์ค๋ฌ๊ฐ ์์๋ ๋, ์๋ฅผ ๋ค์ด `datetime.datetime (2021, 1, 27, 9, 15)`์ผ ๋ ์
๋ ฅ ์์น์์ **๋จ์ผ** CSV ํ์ผ์ ์ฐพ์ต๋๋ค. ์ฌ๊ธฐ์๋ ํ์์คํฌํ๊ฐ ํฌํจ๋ ํ์ผ ๋ช
์ด, ๋งํ์๋ฉด ๋ค์๊ณผ ๊ฐ์ ํ์ผ ๋ช
์ด ์กด์ฌํด์ผ ํฉ๋๋ค.* subsystem-01_2021012709**10**00.csv๊ฐ ๊ฒ์๋๊ณ ์์ง๋ฉ๋๋ค.* subsystem-01_2021012709**15**00.csv๋ ์์ง๋์ง **์์ต๋๋ค** (๋ค์ ์ถ๋ก ์คํ ์ ์์ง๋จ).`subsystem-01_20210127091000.csv` ํ์ผ์ ์ฐ ๋ค์ ์ถ๋ก ์คํ์ DataStartTime๊ณผ DataEndTime ์ฌ์ด์ ์กด์ฌํ๋ ์๊ฐ ํ์ ์ฐพ์ต๋๋ค. ๊ทธ๋ฌํ ํ์ ์ฐพ์ง ๋ชปํ๋ฉด ๋ด๋ถ ์์ธ๋ฅผ ๋ฐ์์ํต๋๋ค. ์ค์ ์์ธก ๊ฒฐ๊ณผ ์ป๊ธฐ ์ถ๋ก ์ ์ฑ๊ณตํ๋ฉด CSV ํ์ผ์ด ๋ฒํท์ ์ถ๋ ฅ ์์น์ ์ ์ฅ๋ฉ๋๋ค. ๊ฐ ์ถ๋ก ์ `results.csv` ๋จ์ผ ํ์ผ์ด ์กด์ฌํ๋ ์ ํด๋๋ฅผ ๋ง๋ญ๋๋ค. ํด๋น ํ์ผ์ ์ฝ๊ณ ์ฌ๊ธฐ์ ๋ด์ฉ์ ํ์ํด ๋ณด๊ฒ ์ต๋๋ค. | results_df = scheduler.get_predictions()
results_df.to_csv(os.path.join(INFER_DATA, 'output', 'results.csv'))
results_df.head() | _____no_output_____ | MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ถ๋ก ์ค์ผ์ค๋ฌ ์ด์--- ์ถ๋ก ์ค์ผ์ค๋ฌ ์ค๋จํ๊ธฐ**๊ทผ๊ฒ ์ ์ฝํด์ผํฉ๋๋ค**. ์ค์ผ์ค๋ฌ ์คํ์ด Amazon Lookout for Equipment ๋น์ฉ์ ์ฃผ๋ ์์ธ์
๋๋ค. ๋ค์ API๋ฅผ ์ด์ฉํ์ฌ ํ์ฌ ์คํ ์ค์ธ ์ถ๋ก ์ค์ผ์ค๋ฌ๋ฅผ ์ค์ง์ํค์ธ์. ๊ทธ๋ ๊ฒ ํ๋ฉด ์ฃผ๊ธฐ์ ์ธ ์ถ๋ก ์คํ์ด ์ค์ง๋ฉ๋๋ค. | scheduler.stop() | ===== Polling Inference Scheduler Status =====
Scheduler Status: STOPPING
Scheduler Status: STOPPED
===== End of Polling Inference Scheduler Status =====
| MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ถ๋ก ์ค์ผ์ค๋ฌ ์์ํ๊ธฐ๋ค์ API๋ฅผ ์ฌ์ฉํ์ฌ `STOPPED` ์ถ๋ก ์ค์ผ์ค๋ฌ๋ฅผ ์ฌ์์ํ ์ ์์ต๋๋ค. | scheduler.start() | ===== Polling Inference Scheduler Status =====
Scheduler Status: PENDING
Scheduler Status: RUNNING
===== End of Polling Inference Scheduler Status =====
| MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
์ถ๋ก ์ค์ผ์ค๋ฌ ์ญ์ ํ๊ธฐ๋ ์ด์ ์ฌ์ฉํ์ง ์๋, **์ค์ง๋** ์ค์ผ์ค๋ฌ๋ฅผ ์ญ์ ํ ์ ์์ต๋๋ค. ๋ชจ๋ธ ๋น ํ๋์ ์ค์ผ์ค๋ฌ๋ง ๊ฐ์ง ์ ์์ต๋๋ค. | scheduler.stop()
scheduler.delete() | ===== Polling Inference Scheduler Status =====
Scheduler Status: STOPPING
Scheduler Status: STOPPED
===== End of Polling Inference Scheduler Status =====
| MIT-0 | notebooks/5_inference_scheduling.ipynb | youngmki/lookout-for-equipment-demo |
RGBD integrationOpen3D implements a scalable RGBD image integration algorithm. The algorithm is based on the technique presented in [\[Curless1996\]](../reference.htmlcurless1996) and [\[Newcombe2011\]](../reference.htmlnewcombe2011). In order to support large scenes, we use a hierarchical hashing structure introduced in [Integrater in ElasticReconstruction](https://github.com/qianyizh/ElasticReconstruction/tree/master/Integrate). Read trajectory from .log fileThis tutorial uses the function `read_trajectory` to read a camera trajectory from a [.log file](http://redwood-data.org/indoor/fileformat.html). A sample `.log` file is as follows.``` examples/test_data/RGBD/odometry.log0 0 11 0 0 20 1 0 20 0 1 -0.30 0 0 11 1 20.999988 3.08668e-005 0.0049181 1.99962-8.84184e-005 0.999932 0.0117022 1.97704-0.0049174 -0.0117024 0.999919 -0.3004860 0 0 1``` | class CameraPose:
def __init__(self, meta, mat):
self.metadata = meta
self.pose = mat
def __str__(self):
return 'Metadata : ' + ' '.join(map(str, self.metadata)) + '\n' + \
"Pose : " + "\n" + np.array_str(self.pose)
def read_trajectory(filename):
traj = []
with open(filename, 'r') as f:
metastr = f.readline()
while metastr:
metadata = list(map(int, metastr.split()))
mat = np.zeros(shape=(4, 4))
for i in range(4):
matstr = f.readline()
mat[i, :] = np.fromstring(matstr, dtype=float, sep=' \t')
traj.append(CameraPose(metadata, mat))
metastr = f.readline()
return traj
camera_poses = read_trajectory("../../test_data/RGBD/odometry.log") | _____no_output_____ | MIT | examples/python/pipelines/rgbd_integration.ipynb | aaronlhe/Open3D |
TSDF volume integrationOpen3D provides two types of TSDF volumes: `UniformTSDFVolume` and `ScalableTSDFVolume`. The latter is recommended since it uses a hierarchical structure and thus supports larger scenes.`ScalableTSDFVolume` has several parameters. `voxel_length = 4.0 / 512.0` means a single voxel size for TSDF volume is $\frac{4.0\mathrm{m}}{512.0} = 7.8125\mathrm{mm}$. Lowering this value makes a high-resolution TSDF volume, but the integration result can be susceptible to depth noise. `sdf_trunc = 0.04` specifies the truncation value for the signed distance function (SDF). When `color_type = TSDFVolumeColorType.RGB8`, 8 bit RGB color is also integrated as part of the TSDF volume. Float type intensity can be integrated with `color_type = TSDFVolumeColorType.Gray32` and `convert_rgb_to_intensity = True`. The color integration is inspired by [PCL](http://pointclouds.org/). | volume = o3d.pipelines.integration.ScalableTSDFVolume(
voxel_length=4.0 / 512.0,
sdf_trunc=0.04,
color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)
for i in range(len(camera_poses)):
print("Integrate {:d}-th image into the volume.".format(i))
color = o3d.io.read_image("../../test_data/RGBD/color/{:05d}.jpg".format(i))
depth = o3d.io.read_image("../../test_data/RGBD/depth/{:05d}.png".format(i))
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False)
volume.integrate(
rgbd,
o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault),
np.linalg.inv(camera_poses[i].pose)) | _____no_output_____ | MIT | examples/python/pipelines/rgbd_integration.ipynb | aaronlhe/Open3D |
Extract a meshMesh extraction uses the marching cubes algorithm [\[LorensenAndCline1987\]](../reference.htmllorensenandcline1987). | print("Extract a triangle mesh from the volume and visualize it.")
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh],
front=[0.5297, -0.1873, -0.8272],
lookat=[2.0712, 2.0312, 1.7251],
up=[-0.0558, -0.9809, 0.1864],
zoom=0.47) | _____no_output_____ | MIT | examples/python/pipelines/rgbd_integration.ipynb | aaronlhe/Open3D |
Apache Arrow 1 Compare performance of csv, Parquet and Arrow - 1 Change | import pyarrow.parquet as pq
import pyarrow as pa
import pandas as pd
import numpy as np
import os
import psutil | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.1 Load and prepare data One more change | ## Read Palmer Station Penguin dataset from GitHub
df = pd.read_csv("https://raw.githubusercontent.com/allisonhorst/"
"palmerpenguins/47a3476d2147080e7ceccef4cf70105c808f2cbf/"
"data-raw/penguins_raw.csv")
# Increase dataset to 1m rows and reset index
df = df.sample(1_000_000, replace=True).reset_index(drop=True)
# Update sample number (0 to 999'999)
df["Sample Number"] = df.index
# Add some random variation to numeric columns
df[["Culmen Length (mm)", "Culmen Depth (mm)",
"Flipper Length (mm)", "Body Mass (g)"]] = df[["Culmen Length (mm)", "Culmen Depth (mm)",
"Flipper Length (mm)", "Body Mass (g)"]] \
+ np.random.rand(df.shape[0], 4)
# Create dataframe where missing numeric values are filled with zero
df_nonan = df.copy()
df_nonan[["Culmen Length (mm)", "Culmen Depth (mm)",
"Flipper Length (mm)", "Body Mass (g)"]] = df[["Culmen Length (mm)", "Culmen Depth (mm)",
"Flipper Length (mm)", "Body Mass (g)"]].fillna(0) | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.2 Write to disk | # Write to csv
df.to_csv("penguin-dataset.csv")
# Write to parquet
df.to_parquet("penguin-dataset.parquet")
context = pa.default_serialization_context()
# Write to Arrow
# Convert from pandas to Arrow
table = pa.Table.from_pandas(df)
# Write out to file
writer = pa.RecordBatchFileWriter('penguin-dataset.arrow', table.schema)
writer.write(table)
writer.close()
#with pa.OSFile('penguin-dataset.arrow', 'wb') as sink:
#with pa.RecordBatchFileWriter(sink, table.schema,write_legacy_format=True) as writer:
#writer.write_table(table)
# Convert from no-NaN pandas to Arrow
table_nonan = pa.Table.from_pandas(df_nonan)
# Write out to file
writer = pa.RecordBatchFileWriter('penguin-dataset-nonan.arrow', table.schema)
writer.write(table_nonan)
writer.close()
#with pa.OSFile('penguin-dataset-nonan.arrow', 'wb') as sink:
#with pa.RecordBatchFileWriter(sink, table_nonan.schema,write_legacy_format=True) as writer:
#writer.write_table(table_nonan) | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.3 Reading time - calculate average of numeric column 1.3.1 Read csv and calculate mean | %%timeit
pd.read_csv("penguin-dataset.csv")["Flipper Length (mm)"].mean() | 3.4 s ยฑ 105 ms per loop (mean ยฑ std. dev. of 7 runs, 1 loop each)
| Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.3.2 Read parquet and calculate mean | %%timeit
pd.read_parquet("penguin-dataset.parquet", columns=["Flipper Length (mm)"]).mean() | /Users/ravishankarnair/anaconda3/envs/py36/lib/python3.6/site-packages/pyarrow/pandas_compat.py:708: FutureWarning: .labels was deprecated in version 0.24.0. Use .codes instead.
labels = getattr(columns, 'labels', None) or [
/Users/ravishankarnair/anaconda3/envs/py36/lib/python3.6/site-packages/pyarrow/pandas_compat.py:735: FutureWarning: the 'labels' keyword is deprecated, use 'codes' instead
return pd.MultiIndex(levels=new_levels, labels=labels, names=columns.names)
/Users/ravishankarnair/anaconda3/envs/py36/lib/python3.6/site-packages/pyarrow/pandas_compat.py:752: FutureWarning: .labels was deprecated in version 0.24.0. Use .codes instead.
labels, = index.labels
| Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.3.3 Read Arrow using file API | %%timeit
with pa.OSFile('penguin-dataset.arrow', 'rb') as source:
table = pa.ipc.open_file(source).read_all().column("Flipper Length (mm)")
result = table.to_pandas().mean() | 133 ms ยฑ 2.73 ms per loop (mean ยฑ std. dev. of 7 runs, 10 loops each)
| Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.3.4 Read Arrow with memory-mapped API with missing values | %%timeit
source = pa.memory_map('penguin-dataset.arrow', 'r')
table = pa.ipc.RecordBatchFileReader(source).read_all().column("Flipper Length (mm)")
result = table.to_pandas().mean() | 6.19 ms ยฑ 82.5 ยตs per loop (mean ยฑ std. dev. of 7 runs, 100 loops each)
| Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.3.5 Read Arrow with memory-mapped API without missing values (zero-copy) | %%timeit
source = pa.memory_map('penguin-dataset-nonan.arrow', 'r')
table = pa.ipc.RecordBatchFileReader(source).read_all().column("Flipper Length (mm)")
result = table.to_pandas().mean() | 4.04 ms ยฑ 80.4 ยตs per loop (mean ยฑ std. dev. of 7 runs, 100 loops each)
| Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4 Memory consumption - read column | # Measure initial memory consumption
memory_init = psutil.Process(os.getpid()).memory_info().rss >> 20 | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4.1 Read csv | col_csv = pd.read_csv("penguin-dataset.csv")["Flipper Length (mm)"]
memory_post_csv = psutil.Process(os.getpid()).memory_info().rss >> 20 | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4.2 Read parquet | col_parquet = pd.read_parquet("penguin-dataset.parquet", columns=["Flipper Length (mm)"])
memory_post_parquet = psutil.Process(os.getpid()).memory_info().rss >> 20 | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4.3 Read Arrow using file API | with pa.OSFile('penguin-dataset.arrow', 'rb') as source:
col_arrow_file = pa.ipc.open_file(source).read_all().column("Flipper Length (mm)").to_pandas()
memory_post_arrowos = psutil.Process(os.getpid()).memory_info().rss >> 20 | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4.4 Read Arrow with memory-mapped API with missing values | source = pa.memory_map('penguin-dataset.arrow', 'r')
table_mmap = pa.ipc.RecordBatchFileReader(source).read_all().column("Flipper Length (mm)")
col_arrow_mapped = table_mmap.to_pandas()
memory_post_arrowmmap = psutil.Process(os.getpid()).memory_info().rss >> 20 | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4.5 Read Arrow with memory-mapped API without missing values (zero-copy) | source = pa.memory_map('penguin-dataset-nonan.arrow', 'r')
table_mmap_zc = pa.ipc.RecordBatchFileReader(source).read_all().column("Flipper Length (mm)")
col_arrow_mapped_zc = table_mmap_zc.to_pandas()
memory_post_arrowmmap_zc = psutil.Process(os.getpid()).memory_info().rss >> 20 | _____no_output_____ | Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
1.4.6 Display memory consupmtion | # Print memory consumption
print(f"csv: {memory_post_csv - memory_init}\n"
f"Parquet: {memory_post_parquet - memory_post_csv}\n"
f"Arrow file API: {memory_post_arrowos - memory_post_parquet}\n"
f"Arrow memory-mapped API with NaNs: {memory_post_arrowmmap - memory_post_arrowos}\n"
f"Arrow memory-mapped API (zero-copy): {memory_post_arrowmmap_zc - memory_post_arrowmmap}\n") | csv: 223
Parquet: -4
Arrow file API: -8
Arrow memory-mapped API with NaNs: 8
Arrow memory-mapped API (zero-copy): 0
| Apache-2.0 | arrow_performance_comparison_notebook.ipynb | passionbytes/arrowexp |
Controlling accesss to attributes * Following blocks are one possible implementation of vectors of `double`s. * Here, member variable `new_name` is in `protected:` part.* Member methods and subclass members can access this variable but from the outside of the class, we cannot access it.* We call it **encapsulation**; instead of directly reading or writing to the variable, we would use mutator or reader **methods**.* This is because to modularize software components to the level of integrated circuit chips. ``` C++// Begin vector_double.hinclude include include include include include // This directive would activate method call loggingifndef LOGdefine LOGendif// This directive woudl activate bracket [] operator logging// Added this just because the examples call [] operator frequentlyifndef LOGBRACKET// define LOGBRACKETendif// This is to prevent declaring vector class twice// If declared twice, C/C++ compilers would show an error messageifndef VECTOR_DOUBLEdefine VECTOR_DOUBLEclass RowVector { // automatic allocation // https://stackoverflow.com/questions/8553464/vector-as-a-class-member std::vector columns; protected: // To distinguish vectors from each other std::string name; public: // Default constructor RowVector(); // Destructor ~ RowVector(); // Default arguments // If the function could not find the argument in the call, it uses the default value. RowVector(const uint32_t n, const double *values=NULL, std::string new_name="None"); // Whenever possible, it is advisible to use `const` keyword // Protects data from being overwritten and may optimize further RowVector(const uint32_t n, std::string new_name="None"); // Copy constructor must use a reference. // What would happen otherwise? RowVector(const RowVector & other); // Two versions of [] operators // This one is for normal vectors. Allows changing values double & operator [] (const uint32_t i); // This one is for constant vectors. Protects the values from overwriting double operator [] (const uint32_t i) const; const std::string get_name() const; RowVector operator + (const RowVector & other); RowVector operator * (const double a); const double operator * (const RowVector & other); void show(); void resize(std::size_t new_size); std::size_t size() const noexcept; RowVector & operator += (const RowVector & other); RowVector & operator *= (const double a);};endif// End vector_double.h``` ``` C++// Begin vector_double.cppinclude include include include include include include "vector_double.h"RowVector::RowVector(){// This may look involving but sometimes helps how the program works.ifdef LOG std::cout << '[' << &columns << ']' << "RowVector()" << '\n';endif name = "None";}RowVector::~ RowVector(){ifdef LOG std::cout << '[' << &columns << ']' << "~ RowVector()" << '\n';endif}RowVector::RowVector(const uint32_t n, const double *values, std::string new_name){ifdef LOG std::cout << '[' << &columns << ']' << "RowVector(" << n << ", " << values << ", " << new_name << ")\n";endif columns.resize(n); // If initial values available, copy if (values){ for (uint32_t i = 0; columns.size() > i; ++i){ columns[i] = values[i]; } } // If no initial values, set all values zero else{ for (uint32_t i = 0; columns.size() > i; ++i){ columns[i] = 0.0; } } name = new_name;}// Instead of implementing another constructor, calling an existing one// c++ 11 or laterRowVector::RowVector(const uint32_t n, std::string new_name) : RowVector(n, NULL, new_name){ifdef LOG std::cout << '[' << &columns << ']' << "RowVector(" << n << ", " << new_name << ")\n";endif}RowVector::RowVector(const RowVector & other){ifdef LOG std::cout << '[' << &columns << ']' << "RowVector(" << & other << ")\n";endif // https://codereview.stackexchange.com/questions/149669/c-operator-overloading-for-matrix-operations-follow-up // http://www.cplusplus.com/reference/vector/vector/resize/ columns.resize(other.columns.size()); // element loop for(uint32_t i=0; columns.size() > i; ++i){ columns[i] = other.columns[i]; } // Copy name of the other one name = other.name; // Then append name.append("2");}double & RowVector::operator [] (const uint32_t i){ifdef LOGBRACKET std::cout << '[' << &columns << ']' << "double & RowVector::operator [] (" << i << ")\n";endif // Return reference; otherwise, unable to assign return columns[i];}double RowVector::operator [] (const uint32_t i) const {ifdef LOGBRACKET std::cout << '[' << &columns << ']' << "double RowVector::operator [] (" << i << ") const\n";endif // Return reference; otherwise, unable to assign return columns[i];}const std::string RowVector::get_name() const{ifdef LOG std::cout << '[' << &columns << ']' << "const std::string RowVector::get_name()\n";endif // Return constant; to prevent change return name;}RowVector RowVector::operator + (const RowVector & other){ifdef LOG std::cout << '[' << &columns << ']' << "RowVector RowVector::operator + (" << & other << ")\n";endif // Check size assert(columns.size() == other.columns.size()); // Make a new vector to return RowVector temp(other); // Element loop for (uint32_t i=0; columns.size() > i; ++i){ temp[i] += columns[i]; } // Returning a temporary image return temp;}RowVector RowVector::operator * (const double a){ifdef LOG std::cout << '[' << &columns << ']' << "RowVector RowVector::operator * (" << a << ")\n";endif // Make a new vector to return RowVector temp(*this); // Element loop in `for each` style // c++ 11 or later for (auto & element : temp.columns){ element *= a; } // Returning a temporary image return temp;}const double RowVector::operator * (const RowVector & other){ifdef LOG std::cout << '[' << &columns << ']' << "const double RowVector::operator * (" << & other << ")\n";endif // Check size assert(columns.size() == other.columns.size()); double dot_product = 0.0; // Element loop for (uint32_t i = 0; columns.size() > i; ++i){ dot_product += columns[i] * other.columns[i]; } // Returning a temporary image return dot_product;}void RowVector::show(){ifdef LOG std::cout << '[' << &columns << ']' << "void RowVector::show()\n";endif for (uint32_t i=0; columns.size()> i; ++i){ std::cout << name << '[' << i << "] = " << columns[i] << '\n'; }}void RowVector::resize(std::size_t new_size){ifdef LOG std::cout << '[' << &columns << ']' << "void RowVector::resize(" << new_size << ")\n";endif columns.resize(new_size);}std::size_t RowVector::size() const noexcept{ifdef LOG std::cout << '[' << &columns << ']' << "std::size_t RowVector::size() const noexcept\n";endif return columns.size();}RowVector & RowVector::operator += (const RowVector & other) {ifdef LOG std::cout << '[' << &columns << ']' << "RowVector & RowVector::operator += (" << & other << ")\n";endif // https://stackoverflow.com/questions/4581961/c-how-to-overload-operator for (uint32_t i=0; size()>i; ++i){ columns[i] += other[i]; } return *this;}RowVector & RowVector::operator *= (const double a) {ifdef LOG std::cout << '[' << &columns << ']' << "RowVector & RowVector::operator *= (" << a << ")\n";endif // https://stackoverflow.com/questions/4581961/c-how-to-overload-operator for (uint32_t i=0; size()>i; ++i){ columns[i] *= a; } return *this;}// End vector_double.cpp// Build command : g++ -Wall -g -std=c++14 vector_double.cpp -fsyntax-only``` ``` C++// Begin cpp_vector_double_practice.cppinclude include include include include include include "vector_double.h"int32_t main(int32_t argn, char *argv[]){ double s[] = {1.0, 2.0}; std::cout << "RowVector row (2u, s, \"row\");\n"; RowVector row (2u, s, "row"); row.show(); std::cout << "RowVector another_row (row);\n"; RowVector another_row (row); row.show(); another_row.show(); std::cout << "another_row[1] += 0.5;\n"; another_row[1] += 0.5; row.show(); another_row.show(); std::cout << "RowVector row_plus_another(row + another_row);\n"; RowVector row_plus_another(row + another_row); row.show(); another_row.show(); row_plus_another.show(); std::cout << "RowVector zeros(3);\n"; RowVector zeros(3u, "zeros"); row.show(); another_row.show(); row_plus_another.show(); zeros.show(); double t[] = {2.0, -1.0}; RowVector ortho (2u, t, "ortho"); double dot = row * ortho; std::cout << "double dot = row * ortho;\n"; std::cout << "dot = " << dot << '\n'; std::cout << "dot = row * row;\n"; dot = row * row; std::cout << "dot = " << dot << '\n';}// End cpp_vector_double_practice.cpp// Build command : g++ -Wall -g -std=c++14 cpp_vector_double_practice.cpp vector_double.cpp -o cpp_vector_double_practice``` * In the mean while, following code blocks depict a possible implementation in python. | import collections
class Vector(collections.UserList):
def __add__(self, other):
# check size
assert len(self) == len(other), f"Lengths are different ({len(self)} == {len(other)})"
# trying list comprehension
return Vector([a + b for a, b in zip(self, other)])
def __radd__(self, other):
# What is this?
return self.__add__(other)
def __mul__(self, other):
# what is happening here?
if isinstance(other, (int, float, complex)):
result = Vector([a * other for a in self])
elif isinstance(other, Vector):
assert len(self) == len(other), f"Lengths are different ({len(self)} == {len(other)})"
result = sum(a * b for a, b in zip(self, other))
return result
def __rmul__(self, other):
return __mul__(self, other)
def __str__(self):
# How does the .join() work?
return '\n'.join(f"{hex(id(self))}[{i}] = {self[i]}" for i in range(len(self)))
def __len__(self):
return len(self.data)
print("a = Vector([1, 2])")
a = Vector([1, 2])
print(a)
print("b = Vector(a)")
b = Vector(a)
print(a)
print(b)
print("b[1] += (-0.5)")
b[1] += (-0.5)
print(a)
print(b)
print("c = a + b")
c = a + b
print(a)
print(b)
print(c)
print("ortho = Vector([2, -1])")
ortho = Vector([2, -1])
print(a)
print(b)
print(c)
print(ortho)
print("dot = a * ortho")
dot = a * ortho
print(f"a * ortho = {dot}")
print("dot = a * a")
dot = a * a
print(f"a * a = {dot}")
| _____no_output_____ | BSD-3-Clause | 02.ipynb | 2018pycpp/18pycpp-04 |
Matrix class example In C++ * Following code blocks present a possible implementation of matrix class in C++.* Please note that to build these files, `vector_double.h` and `vector_double.cpp` files are necessary. ```C++// Begin matrix_double.hinclude include include include include include include "vector_double.h"ifndef MATRIX_DOUBLEdefine MATRIX_DOUBLEclass Matrix{ std::vector rows; protected: std::string name; public: Matrix(); ~ Matrix(); Matrix(const uint32_t m, const uint32_t n, const double *values, std::string new_name="None"); Matrix(const uint32_t m, const uint32_t n, std::string new_name="None"); Matrix(const Matrix & other, std::string new_name=""); Matrix(const RowVector & other, std::string new_name=""); RowVector & operator [] (const uint32_t i); const RowVector operator [] (const uint32_t i) const; const std::string get_name() const; Matrix operator + (const Matrix & other); Matrix operator * (const double a); RowVector operator * (const RowVector &v); Matrix operator * (const Matrix & other); void show(); Matrix transpose(); const size_t get_height() const; const size_t get_width() const;};endif// End matrix_double.h``` ``` C++// Begin matrix_double.cppinclude include include include include include include "vector_double.h"include "matrix_double.h"Matrix::Matrix(){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix()" << '\n';endif name = "None";}Matrix::~ Matrix(){ifdef LOG std::cout << '[' << &rows << ']' << "~ Matrix()" << '\n';endif}Matrix::Matrix(const uint32_t m, const uint32_t n, const double *values, std::string new_name){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix(" << m << ", "<< n << ", " << values << ", " << new_name << ")\n";endif name = new_name; rows.resize(m); // If initial values available, copy if (values){ // row loop for (uint32_t i = 0; m > i; ++i){ rows[i].resize(n); // column loop for (uint32_t j = 0; n > j; ++j){ rows[i][j] = *(values + i * n + j) ; } } } // If no initial values, set all values zero else{ // row loop for (uint32_t i = 0; m > i; ++i){ rows[i].resize(n); // column loop for (uint32_t j = 0; n > j; ++j){ rows[i][j] = 0.0; } } }}// Instead of implementing another constructor, calling an existing one// c++ 11 or laterMatrix::Matrix(const uint32_t m, const uint32_t n, std::string new_name) : Matrix(m, n, NULL, new_name){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix(" << m << ", " << n << ", " << new_name << ")\n";endif}Matrix::Matrix(const Matrix & other, std::string new_name){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix(" << & other << ")\n";endif // https://codereview.stackexchange.com/questions/149669/c-operator-overloading-for-matrix-operations-follow-up // http://www.cplusplus.com/reference/vector/vector/resize/ rows.resize(other.rows.size()); // row loop for(uint32_t i=0; rows.size() > i; ++i){ rows[i].resize(other.rows[i].size()); // column loop for(uint32_t j=0; other.rows[i].size() > j; ++j){ // Another possibility is as follows // rows[i][j] = other.rows[i][j]; // However for now the line above would create a temporary row vector // To avoid seemingly unnecessary such temporary object, // for now would use the following line rows[i][j] = other.rows[i][j]; } } if ("" != new_name){ name = new_name; } else{ // Copy name of the other one name = other.name; // Then append name.append("2"); }}Matrix::Matrix(const RowVector & other, std::string new_name){ // RowVector -> n x 1 matrix ifdef LOG std::cout << '[' << &rows << ']' << "Matrix(const RowVector &" << & other << ")\n";endif rows.resize(other.size()); // row loop for(uint32_t i=0; rows.size() > i; ++i){ rows[i].resize(1); rows[i][0] = other[0]; } if ("" != new_name){ name = new_name; } else{ // Copy name of the other one name = other.get_name(); // Then append name.append("2"); }}RowVector & Matrix::operator [] (const uint32_t i){ifdef LOGBRACKET std::cout << '[' << &rows << ']' << "RowVector & Matrix::operator [] (" << i << ")\n";endif // Return reference; otherwise, unable to assign return rows[i];}const RowVector Matrix::operator [] (const uint32_t i) const {ifdef LOGBRACKET std::cout << '[' << &rows << ']' << "const RowVector Matrix::operator [] (" << i << ")\n";endif // Return reference; otherwise, unable to assign return rows[i];}const std::string Matrix::get_name() const{ifdef LOG std::cout << '[' << &rows << ']' << "const std::string Matrix::get_name()\n";endif // Return constant; to prevent change return name;}Matrix Matrix::operator + (const Matrix & other){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix Matrix::operator + ("<< & other <<")\n";endif // Check size assert(this->get_height() == other.get_height()); assert(this->get_width() == other.get_width());ifdef LOG std::cout << "Matrix temp(other);\n";endif // Make a new vector to return Matrix temp(other, get_name() + '+' + other.get_name());ifdef LOG std::cout << "Begin row loop\n";endif // Row loop for (uint32_t i=0; rows.size() > i; ++i){ temp[i] += rows[i]; }ifdef LOG std::cout << "End row loop\n";endif // Returning a temporary image return temp;}Matrix Matrix::operator * (const double a){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix Matrix::operator * (" << a << ")\n";endif // Make a new vector to return // https://stackoverflow.com/questions/332111/how-do-i-convert-a-double-into-a-string-in-c Matrix temp(*this, std::to_string(a) + '*' + get_name()); // Element loop in `for each` style // c++ 11 or later for (auto & element : temp.rows){ element *= a; } // Returning a temporary image return temp;}RowVector Matrix::operator * (const RowVector &v){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix Matrix::operator * (" << &v << ")\n";endif // Make a new vector to return RowVector temp(rows.size(), NULL, name + '*' + v.get_name()); // Element loop in `for each` style // c++ 11 or later for (uint32_t i=0; rows.size()>i; ++i){ temp[i] = rows[i] * v; } // Returning a temporary image return temp;}Matrix Matrix::operator * (const Matrix & other){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix Matrix::operator * (" << &other << ")\n";endif // Check size assert(rows[0].size() == other.rows.size()); Matrix temp(rows.size(), other[0].size(), name + '*' + other.name); // row loop for (uint32_t i = 0; rows.size() > i; ++i){ // column loop for(uint32_t j = 0; other[0].size() > j; ++j){ // dummy index loop for(uint32_t k = 0; rows[0].size() > k; ++k){ temp[i][j] += rows[i][k] * other[k][j]; } } } // Returning a temporary image return temp;}void Matrix::show(){ifdef LOG std::cout << '[' << &rows << ']' << "void Matrix::show()\n";endif // row loop for (uint32_t i=0; rows.size()> i; ++i){ // column loop for (uint32_t j=0; rows[i].size()> j; ++j){ std::cout << get_name() << '['<< i << "][" << j << "]= " << rows[i][j] << '\n'; } }}Matrix Matrix::transpose(){ifdef LOG std::cout << '[' << &rows << ']' << "Matrix Matrix::transpose()\n";endif Matrix temp(rows[0].size(), rows.size(), name+"T"); // row loop for(uint32_t i=0; temp.rows.size()> i; ++i){ // column loop for(uint32_t j=0; temp.rows.size()> j; ++j){ temp[i][j] = rows[i][j]; } } return temp;}const size_t Matrix::get_height() const{ return rows.size();}const size_t Matrix::get_width() const{ return rows[0].size();}// End matrix_double.cpp// Build command : g++ -Wall -g -std=c++14 matrix_double.cpp -fsyntax-only``` ``` C++// Begin cpp_matrix_double_practice.cppinclude include include include include include include include "matrix_double.h"int32_t main(int32_t argn, char *argv[]){ double s[] = {1.0, 0.0, 0.0, 1.0}; std::cout << "Matrix id (2u, 2u, s, \"identity\");\n"; Matrix identity (2u, 2u, s, "id"); identity.show(); double r[] = {+cos(M_PI/6.0), sin(M_PI/6.0), -sin(M_PI/6.0), cos(M_PI/6.0)}; std::cout << "Matrix rotation (2u, 2u, r, \"rot\");\n"; Matrix rotation (2u, 2u, r, "rot"); identity.show(); rotation.show(); std::cout << "Matrix sum(identity + rotation);\n"; Matrix sum(identity + rotation); identity.show(); rotation.show(); sum.show(); // Check sum operation result for (uint32_t i=0; 2u > i; ++i){ for (uint32_t j=0; 2u > j; ++j){ assert(sum[i][j] == (identity[i][j] + rotation[i][j])); } } std::cout << "Matrix twice(identity * 2.0);\n"; Matrix twice(identity * 2.0); // Check scala multiplication result assert(twice[0][0] == 2.0); assert(twice[0][1] == 0.0); assert(twice[1][0] == 0.0); assert(twice[1][1] == 2.0); std::cout << "Matrix new_axis(twice * rotation);\n"; Matrix new_axis(twice * rotation); // Check matrix multiplication result for (uint32_t i=0; 2u > i; ++i){ for (uint32_t j=0; 2u > j; ++j){ assert(new_axis[i][j] == (2.0 * rotation[i][j])); } } Matrix ninety_degrees(rotation * rotation * rotation); // Check matrix multiplication result assert(abs(ninety_degrees[0][0] - ( 0.0)) < 1e-12); assert(abs(ninety_degrees[0][1] - ( 1.0)) < 1e-12); assert(abs(ninety_degrees[1][0] - (-1.0)) < 1e-12); assert(abs(ninety_degrees[1][1] - ( 0.0)) < 1e-12); // State Space Representation Ax + B u double xi_d[] = {1.0, 0.0}; double ones_d[] = {1.0, 1.0}; Matrix xi(2, 1, xi_d, "xi"); Matrix B(2, 1, ones_d, "B"); double u = 0.75; Matrix xj; // xj = A xi + B u xj = rotation * xi + B * u; xj.show(); assert(abs(xj[0][0] - ( 0.75 + cos(M_PI/6.0))) < 1e-12); assert(abs(xj[1][0] - ( 0.75 - sin(M_PI/6.0))) < 1e-12);}// End cpp_matrix_double_practice.cpp// Build command : g++ -Wall -g -std=c++14 cpp_matrix_double_practice.cpp vector_double.cpp matrix_double.cpp -o cpp_matrix_double_practice``` * The build command above lists necessary files. In Python * Following code blocks are a possible implementation of matrix in python.* As in C++ example, it will build on the prior `Vector` class. | import collections
import copy
class Matrix(collections.UserList):
def __init__(self, m=None, n=None, values=None):
if m is None:
self.m = self.n = 0
self.data = []
elif values is not None:
self.m = int(m) # number of rows
self.n = int(n) # number of columns
# Again utilizing Vector class and list comprehension
self.data = [Vector(values[(i * n):((i+1) * n)]) for i in range(m)]
elif n is None:
if isinstance(m, Matrix):
# copy constructor
self.m = m.m
self.n = m.n
# To avoid referencing rows of m matrix
self.data = copy.deepcopy(m.data)
elif isinstance(m, Vector):
# Vector to n x 1 Matrix
self.data = [Vector([value]) for value in m]
self.m = len(self.data)
self.n = 1
elif isinstance(m, int) and isinstance(n, int) and values is None:
# zeros
self.m = m
self.n = n
self.data = [Vector([0.0] * n) for i in range(m)]
else:
raise NotImplementedError
def __add__(self, other):
assert isinstance(other, Matrix)
result = Matrix()
for self_row, other_row in zip(self, other):
result.append(self_row + other_row)
return result
def __mul__(self, other):
if isinstance(other, (int, float, complex)):
result = Matrix()
for row in self:
result.append(row * other)
elif isinstance(other, Matrix):
assert self.n == other.m, f"Matrix sizes ({self.m}, {self.n}) x ({other.m}, {other.n}) not compatible"
result = Matrix(self.m, other.n)
for i in range(self.m):
for j in range(other.n):
for k in range(self.n):
result[i][j] += self[i][k] * other[k][j]
elif isinstance(other, Vector):
assert self.n == len(other), f"Matrix sizes ({self.m}, {self.n}) x ({len(other)}, 1) not compatible"
result = Vector([row * other for row in self])
else:
raise NotImplementedError
return result
def __str__(self):
row_text = []
for i, row in enumerate(self):
for j, value in enumerate(row):
row_text.append(f"{hex(id(self))}[{i}][{j}] = {self[i][j]}")
return '\n'.join(row_text)
def transpose(self):
result = Matrix()
result.data = list(zip(self.data))
result.m = self.n
resutl.n = self.m
matA = Matrix(2, 2, list(range(4)))
print(matA)
matB = Matrix(matA)
matB[0][0] = matA[0][0] + 7
print(matA)
print(matB)
assert matA[0][0] != matB[0][0], "Please use deep copy"
vecC = Vector([1, 0])
print("matC = Matrix(vecC)")
matC = Matrix(vecC)
print(matA)
print(matB)
print(matC)
print("matD = Matrix(2, 2)")
matD = Matrix(2, 2)
print(matA)
print(matB)
print(matC)
print(matD)
for i in range(matD.m):
for j in range(matD.n):
assert 0 == matD[i][j]
print("matE = matA + matA")
matE = matA + matA
print(matA)
print(matB)
print(matC)
print(matD)
print(matE)
for i in range(matE.m):
for j in range(matE.n):
assert matE[i][j] == 2 * matA[i][j]
print("matF = matA * matA")
matF = matA * matA
print(matA)
print(matB)
print(matC)
print(matD)
print(matE)
print(matF)
print("matG = matA * vecC")
vecG = matA * vecC
print(matA)
print(matB)
print(matC)
print(matD)
print(matE)
print(matF)
print(vecG)
assert len(vecG) == matA.m
for i in range(matA.m):
assert vecG[i] == matA[i][0]
| _____no_output_____ | BSD-3-Clause | 02.ipynb | 2018pycpp/18pycpp-04 |
Overview- nb023 ใใผใน- nb034ใฎ็ตๆใไฝฟใ Const | NB = '035'
isSmallSet = False
if isSmallSet:
LENGTH = 7000
else:
LENGTH = 500_000
PATH_TRAIN = './../data/input/train_clean.csv'
PATH_TEST = './../data/input/test_clean.csv'
PATH_SMPLE_SUB = './../data/input/sample_submission.csv'
DIR_OUTPUT = './../data/output/'
cp = ['#f8b195', '#f67280', '#c06c84', '#6c5b7b', '#355c7d']
sr = 10*10**3 # 10 kHz | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
Import everything I need :) | import warnings
warnings.filterwarnings('ignore')
import time
import gc
import random
import os
import itertools
import multiprocessing
import numpy as np
from scipy import signal
# from pykalman import KalmanFilter
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from fastprogress import progress_bar
from lightgbm import LGBMRegressor
from sklearn.model_selection import KFold, train_test_split, StratifiedKFold, GroupKFold
from sklearn.metrics import f1_score, mean_absolute_error, confusion_matrix
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
# from sklearn.svm import SVR
from sklearn.linear_model import Lasso
# from dtreeviz.trees import dtreeviz
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import Callback, LearningRateScheduler
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras import losses, models, optimizers
# import tensorflow_addons as tfa | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
My function | def f1_macro(true, pred):
return f1_score(true, pred, average='macro')
def get_df_batch(df, batch):
idxs = df['batch'] == batch
assert any(idxs), 'ใใฎใใใชbatchใฏใใใพใใ'
return df[idxs]
def get_signal_mv_mean(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).mean().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def get_signal_mv_std(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).std().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def get_signal_mv_min(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).min().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def get_signal_mv_max(df, n=3001):
signal_mv = np.zeros(len(df))
for bt in df['batch'].unique():
idxs = df['batch'] == bt
_signal_mv = df['signal'][idxs].rolling(n, center=True).max().interpolate('spline', order=5, limit_direction='both').values
signal_mv[idxs] = _signal_mv
return signal_mv
def group_feat_train(_train):
train = _train.copy()
# group init
train['group'] = int(0)
# group 1
idxs = (train['batch'] == 3) | (train['batch'] == 7)
train['group'][idxs] = int(1)
# group 2
idxs = (train['batch'] == 5) | (train['batch'] == 8)
train['group'][idxs] = int(2)
# group 3
idxs = (train['batch'] == 2) | (train['batch'] == 6)
train['group'][idxs] = int(3)
# group 4
idxs = (train['batch'] == 4) | (train['batch'] == 9)
train['group'][idxs] = int(4)
return train[['group']]
def group_feat_test(_test):
test = _test.copy()
# group init
test['group'] = int(0)
x_idx = np.arange(len(test))
# group 1
idxs = (100000<=x_idx) & (x_idx<200000)
test['group'][idxs] = int(1)
idxs = (900000<=x_idx) & (x_idx<=1000000)
test['group'][idxs] = int(1)
# group 2
idxs = (200000<=x_idx) & (x_idx<300000)
test['group'][idxs] = int(2)
idxs = (600000<=x_idx) & (x_idx<700000)
test['group'][idxs] = int(2)
# group 3
idxs = (400000<=x_idx) & (x_idx<500000)
test['group'][idxs] = int(3)
# group 4
idxs = (500000<=x_idx) & (x_idx<600000)
test['group'][idxs] = int(4)
idxs = (700000<=x_idx) & (x_idx<800000)
test['group'][idxs] = int(4)
return test[['group']]
class permutation_importance():
def __init__(self, model, metric):
self.is_computed = False
self.n_feat = 0
self.base_score = 0
self.model = model
self.metric = metric
self.df_result = []
def compute(self, X_valid, y_valid):
self.n_feat = len(X_valid.columns)
if self.metric == 'auc':
y_valid_score = self.model.predict_proba(X_valid)[:, 1]
fpr, tpr, thresholds = roc_curve(y_valid, y_valid_score)
self.base_score = auc(fpr, tpr)
else:
pred = np.round(self.model.predict(X_valid)).astype('int8')
self.base_score = self.metric(y_valid, pred)
self.df_result = pd.DataFrame({'feat': X_valid.columns,
'score': np.zeros(self.n_feat),
'score_diff': np.zeros(self.n_feat)})
# predict
for i, col in enumerate(X_valid.columns):
df_perm = X_valid.copy()
np.random.seed(1)
df_perm[col] = np.random.permutation(df_perm[col])
y_valid_pred = self.model.predict(df_perm)
if self.metric == 'auc':
y_valid_score = self.model.predict_proba(df_perm)[:, 1]
fpr, tpr, thresholds = roc_curve(y_valid, y_valid_score)
score = auc(fpr, tpr)
else:
score = self.metric(y_valid, np.round(y_valid_pred).astype('int8'))
self.df_result['score'][self.df_result['feat']==col] = score
self.df_result['score_diff'][self.df_result['feat']==col] = self.base_score - score
self.is_computed = True
def get_negative_feature(self):
assert self.is_computed!=False, 'compute ใกใฝใใใๅฎ่กใใใฆใใพใใ'
idx = self.df_result['score_diff'] < 0
return self.df_result.loc[idx, 'feat'].values.tolist()
def get_positive_feature(self):
assert self.is_computed!=False, 'compute ใกใฝใใใๅฎ่กใใใฆใใพใใ'
idx = self.df_result['score_diff'] > 0
return self.df_result.loc[idx, 'feat'].values.tolist()
def show_permutation_importance(self, score_type='loss'):
'''score_type = 'loss' or 'accuracy' '''
assert self.is_computed!=False, 'compute ใกใฝใใใๅฎ่กใใใฆใใพใใ'
if score_type=='loss':
ascending = True
elif score_type=='accuracy':
ascending = False
else:
ascending = ''
plt.figure(figsize=(15, int(0.25*self.n_feat)))
sns.barplot(x="score_diff", y="feat", data=self.df_result.sort_values(by="score_diff", ascending=ascending))
plt.title('base_score - permutation_score')
def plot_corr(df, abs_=False, threshold=0.95):
if abs_==True:
corr = df.corr().abs()>threshold
vmin = 0
else:
corr = df.corr()
vmin = -1
# Plot
fig, ax = plt.subplots(figsize=(12, 10), dpi=100)
fig.patch.set_facecolor('white')
sns.heatmap(corr,
xticklabels=df.corr().columns,
yticklabels=df.corr().columns,
vmin=vmin,
vmax=1,
center=0,
annot=False)
# Decorations
ax.set_title('Correlation', fontsize=22)
def get_low_corr_column(df, threshold):
df_corr = df.corr()
df_corr = abs(df_corr)
columns = df_corr.columns
# ๅฏพ่ง็ทใฎๅคใ0ใซใใ
for i in range(0, len(columns)):
df_corr.iloc[i, i] = 0
while True:
columns = df_corr.columns
max_corr = 0.0
query_column = None
target_column = None
df_max_column_value = df_corr.max()
max_corr = df_max_column_value.max()
query_column = df_max_column_value.idxmax()
target_column = df_corr[query_column].idxmax()
if max_corr < threshold:
# ใใใๅคใ่ถ
ใใใใฎใใชใใฃใใใ็ตไบ
break
else:
# ใใใๅคใ่ถ
ใใใใฎใใใฃใๅ ดๅ
delete_column = None
saved_column = None
# ใใฎไปใจใฎ็ธ้ขใฎ็ตถๅฏพๅคใๅคงใใๆนใ้คๅป
if sum(df_corr[query_column]) <= sum(df_corr[target_column]):
delete_column = target_column
saved_column = query_column
else:
delete_column = query_column
saved_column = target_column
# ้คๅปใในใ็นๅพดใ็ธ้ข่กๅใใๆถใ๏ผ่กใๅ๏ผ
df_corr.drop([delete_column], axis=0, inplace=True)
df_corr.drop([delete_column], axis=1, inplace=True)
return df_corr.columns # ็ธ้ขใ้ซใ็นๅพด้ใ้คใใๅๅใชในใ
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
if col!='open_channels':
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
def train_lgbm(X, y, X_te, lgbm_params, random_state=5, n_fold=5, verbose=50, early_stopping_rounds=100, show_fig=True):
# using features
print(f'features({len(X.columns)}): \n{X.columns}') if not verbose==0 else None
# folds = KFold(n_splits=n_fold, shuffle=True, random_state=random_state)
folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=random_state)
scores = []
oof = np.zeros(len(X))
oof_round = np.zeros(len(X))
test_pred = np.zeros(len(X_te))
df_pi = pd.DataFrame(columns=['feat', 'score_diff'])
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y=y)):
if verbose==0:
pass
else:
print('\n------------------')
print(f'- Fold {fold_n + 1}/{N_FOLD} started at {time.ctime()}')
# prepare dataset
X_train, X_valid = X.iloc[train_idx], X.iloc[valid_idx]
y_train, y_valid = y[train_idx], y[valid_idx]
# train
model = LGBMRegressor(**lgbm_params, n_estimators=N_ESTIMATORS)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=verbose,
early_stopping_rounds=early_stopping_rounds)
# pred
y_valid_pred = model.predict(X_valid, model.best_iteration_)
y_valid_pred_round = np.round(y_valid_pred).astype('int8')
_test_pred = model.predict(X_te, model.best_iteration_)
if show_fig==False:
pass
else:
# permutation importance
pi = permutation_importance(model, f1_macro) # model ใจ metric ใๆธกใ
pi.compute(X_valid, y_valid)
pi_result = pi.df_result
df_pi = pd.concat([df_pi, pi_result[['feat', 'score_diff']]])
# result
oof[valid_idx] = y_valid_pred
oof_round[valid_idx] = y_valid_pred_round
score = f1_score(y_valid, y_valid_pred_round, average='macro')
scores.append(score)
test_pred += _test_pred
if verbose==0:
pass
else:
print(f'---> f1-score(macro) valid: {f1_score(y_valid, y_valid_pred_round, average="macro"):.4f}')
print('')
print('====== finish ======')
print('score list:', scores)
print('CV mean score(f1_macro): {0:.4f}, std: {1:.4f}'.format(np.mean(scores), np.std(scores)))
print(f'oof score(f1_macro): {f1_score(y, oof_round, average="macro"):.4f}')
print('')
if show_fig==False:
pass
else:
# visualization
plt.figure(figsize=(5, 5))
plt.plot([0, 10], [0, 10], color='gray')
plt.scatter(y, oof, alpha=0.05, color=cp[1])
plt.xlabel('true')
plt.ylabel('pred')
plt.show()
# confusion_matrix
plot_confusion_matrix(y, oof_round, classes=np.arange(11))
# permutation importance
plt.figure(figsize=(15, int(0.25*len(X.columns))))
order = df_pi.groupby(["feat"]).mean()['score_diff'].reset_index().sort_values('score_diff', ascending=False)
sns.barplot(x="score_diff", y="feat", data=df_pi, order=order['feat'])
plt.title('base_score - permutation_score')
plt.show()
# submission
test_pred = test_pred/N_FOLD
test_pred_round = np.round(test_pred).astype('int8')
return test_pred_round, test_pred, oof_round, oof
def plot_confusion_matrix(truth, pred, classes, normalize=False, title=''):
cm = confusion_matrix(truth, pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix', size=15)
plt.colorbar(fraction=0.046, pad=0.04)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(False)
plt.tight_layout()
def train_test_split_lgbm(X, y, X_te, lgbm_params, random_state=5, test_size=0.3, verbose=50, early_stopping_rounds=100, show_fig=True):
# using features
print(f'features({len(X.columns)}): \n{X.columns}') if not verbose==0 else None
# folds = KFold(n_splits=n_fold, shuffle=True, random_state=random_state)
# folds = StratifiedKFold(n_splits=n_fold, shuffle=True, random_state=random_state)
# prepare dataset
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=test_size, random_state=random_state)
# train
model = LGBMRegressor(**lgbm_params, n_estimators=N_ESTIMATORS)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=verbose,
early_stopping_rounds=early_stopping_rounds)
# pred
oof = model.predict(X_valid, model.best_iteration_)
oof_round = np.round(oof).astype('int8')
test_pred = model.predict(X_te, model.best_iteration_)
test_pred_round = np.round(test_pred).astype('int8')
print('====== finish ======')
print(f'oof score(f1_macro): {f1_score(y_valid, oof_round, average="macro"):.4f}')
print('')
if show_fig==False:
pass
else:
# visualization
plt.figure(figsize=(5, 5))
plt.plot([0, 10], [0, 10], color='gray')
plt.scatter(y_valid, oof, alpha=0.05, color=cp[1])
plt.xlabel('true')
plt.ylabel('pred')
plt.show()
# confusion_matrix
plot_confusion_matrix(y_valid, oof_round, classes=np.arange(11))
# permutation importance
pi = permutation_importance(model, f1_macro) # model ใจ metric ใๆธกใ
pi.compute(X_valid, y_valid)
pi.show_permutation_importance(score_type='accuracy') # loss or accuracy
plt.show()
return test_pred_round, test_pred, oof_round, oof | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
ref: https://www.kaggle.com/martxelo/fe-and-ensemble-mlp-and-lgbm | def calc_gradients(s, n_grads=4):
'''
Calculate gradients for a pandas series. Returns the same number of samples
'''
grads = pd.DataFrame()
g = s.values
for i in range(n_grads):
g = np.gradient(g)
grads['grad_' + str(i+1)] = g
return grads
def calc_low_pass(s, n_filts=10):
'''
Applies low pass filters to the signal. Left delayed and no delayed
'''
wns = np.logspace(-2, -0.3, n_filts)
# wns = [0.3244]
low_pass = pd.DataFrame()
x = s.values
for wn in wns:
b, a = signal.butter(1, Wn=wn, btype='low')
zi = signal.lfilter_zi(b, a)
low_pass['lowpass_lf_' + str('%.4f' %wn)] = signal.lfilter(b, a, x, zi=zi*x[0])[0]
low_pass['lowpass_ff_' + str('%.4f' %wn)] = signal.filtfilt(b, a, x)
return low_pass
def calc_high_pass(s, n_filts=10):
'''
Applies high pass filters to the signal. Left delayed and no delayed
'''
wns = np.logspace(-2, -0.1, n_filts)
# wns = [0.0100, 0.0264, 0.0699, 0.3005, 0.4885, 0.7943]
high_pass = pd.DataFrame()
x = s.values
for wn in wns:
b, a = signal.butter(1, Wn=wn, btype='high')
zi = signal.lfilter_zi(b, a)
high_pass['highpass_lf_' + str('%.4f' %wn)] = signal.lfilter(b, a, x, zi=zi*x[0])[0]
high_pass['highpass_ff_' + str('%.4f' %wn)] = signal.filtfilt(b, a, x)
return high_pass
def calc_roll_stats(s, windows=[10, 50, 100, 500, 1000, 3000]):
'''
Calculates rolling stats like mean, std, min, max...
'''
roll_stats = pd.DataFrame()
for w in windows:
roll_stats['roll_mean_' + str(w)] = s.rolling(window=w, min_periods=1).mean().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_std_' + str(w)] = s.rolling(window=w, min_periods=1).std().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_min_' + str(w)] = s.rolling(window=w, min_periods=1).min().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_max_' + str(w)] = s.rolling(window=w, min_periods=1).max().interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_range_' + str(w)] = roll_stats['roll_max_' + str(w)] - roll_stats['roll_min_' + str(w)]
roll_stats['roll_q10_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.10).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q25_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.25).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q50_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.50).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q75_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.75).interpolate('spline', order=5, limit_direction='both')
roll_stats['roll_q90_' + str(w)] = s.rolling(window=w, min_periods=1).quantile(0.90).interpolate('spline', order=5, limit_direction='both')
# add zeros when na values (std)
# roll_stats = roll_stats.fillna(value=0)
return roll_stats
def calc_ewm(s, windows=[10, 50, 100, 500, 1000, 3000]):
'''
Calculates exponential weighted functions
'''
ewm = pd.DataFrame()
for w in windows:
ewm['ewm_mean_' + str(w)] = s.ewm(span=w, min_periods=1).mean()
ewm['ewm_std_' + str(w)] = s.ewm(span=w, min_periods=1).std()
# add zeros when na values (std)
ewm = ewm.fillna(value=0)
return ewm
def divide_and_add_features(s, signal_size=500000):
'''
Divide the signal in bags of "signal_size".
Normalize the data dividing it by 15.0
'''
# normalize
s = s/15.0
ls = []
for i in progress_bar(range(int(s.shape[0]/signal_size))):
sig = s[i*signal_size:(i+1)*signal_size].copy().reset_index(drop=True)
sig_featured = add_features(sig)
ls.append(sig_featured)
return pd.concat(ls, axis=0) | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
ref: https://www.kaggle.com/nxrprime/single-model-lgbm-kalman-filter-ii | def Kalman1D(observations,damping=1):
# To return the smoothed time series data
observation_covariance = damping
initial_value_guess = observations[0]
transition_matrix = 1
transition_covariance = 0.1
initial_value_guess
kf = KalmanFilter(
initial_state_mean=initial_value_guess,
initial_state_covariance=observation_covariance,
observation_covariance=observation_covariance,
transition_covariance=transition_covariance,
transition_matrices=transition_matrix
)
pred_state, state_cov = kf.smooth(observations)
return pred_state | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
Preparation setting | sns.set() | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
load dataset | df_tr = pd.read_csv(PATH_TRAIN)
df_te = pd.read_csv(PATH_TEST) | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
ๅฆ็ใฎใใใใใฎใใใซใใใใ็ชๅทใๆฏใ | batch_list = []
for n in range(10):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
df_tr['batch'] = batch_list
batch_list = []
for n in range(4):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
df_te['batch'] = batch_list | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
smallset? | if isSmallSet:
print('small set mode')
# train
batchs = df_tr['batch'].values
dfs = []
for i_bt, bt in enumerate(df_tr['batch'].unique()):
idxs = batchs == bt
_df = df_tr[idxs][:LENGTH].copy()
dfs.append(_df)
df_tr = pd.concat(dfs).reset_index(drop=True)
# test
batchs = df_te['batch'].values
dfs = []
for i_bt, bt in enumerate(df_te['batch'].unique()):
idxs = batchs == bt
_df = df_te[idxs][:LENGTH].copy()
dfs.append(_df)
df_te = pd.concat(dfs).reset_index(drop=True) | _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
Train | # configurations and main hyperparammeters
# EPOCHS = 180
EPOCHS = 180
NNBATCHSIZE = 16
GROUP_BATCH_SIZE = 4000
SEED = 321
LR = 0.0015
SPLITS = 6
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# tf.random.set_seed(seed)
# read data
def read_data():
train = pd.read_csv(PATH_TRAIN, dtype={'time': np.float32, 'signal': np.float32, 'open_channels':np.int32})
test = pd.read_csv(PATH_TEST, dtype={'time': np.float32, 'signal': np.float32})
sub = pd.read_csv(PATH_SMPLE_SUB, dtype={'time': np.float32})
# Y_train_proba = np.load('./../data/input/Y_train_proba.npy')
# Y_test_proba = np.load('./../data/input/Y_test_proba.npy')
probas = np.load('./../data/output_ignore/probas_nb034_RandomForestClassifier_cv_0.9383.npz')
Y_train_proba = probas['arr_0']
Y_test_proba = probas['arr_1']
for i in range(11):
train[f"proba_{i}"] = Y_train_proba[:, i]
test[f"proba_{i}"] = Y_test_proba[:, i]
return train, test, sub
# create batches of 4000 observations
def batching(df, batch_size):
df['group'] = df.groupby(df.index//batch_size, sort=False)['signal'].agg(['ngroup']).values
df['group'] = df['group'].astype(np.uint16)
return df
# normalize the data (standard scaler). We can also try other scalers for a better score!
def normalize(train, test):
train_input_mean = train.signal.mean()
train_input_sigma = train.signal.std()
train['signal'] = (train.signal - train_input_mean) / train_input_sigma
test['signal'] = (test.signal - train_input_mean) / train_input_sigma
return train, test
# get lead and lags features
def lag_with_pct_change(df, windows):
for window in windows:
df['signal_shift_pos_' + str(window)] = df.groupby('group')['signal'].shift(window).fillna(0)
df['signal_shift_neg_' + str(window)] = df.groupby('group')['signal'].shift(-1 * window).fillna(0)
return df
# main module to run feature engineering. Here you may want to try and add other features and check if your score imporves :).
def run_feat_engineering(df, batch_size):
# create batches
df = batching(df, batch_size = batch_size)
# create leads and lags (1, 2, 3 making them 6 features)
df = lag_with_pct_change(df, [1, 2, 3])
# create signal ** 2 (this is the new feature)
df['signal_2'] = df['signal'] ** 2
return df
# fillna with the mean and select features for training
def feature_selection(train, test):
features = [col for col in train.columns if col not in ['index', 'group', 'open_channels', 'time']]
train = train.replace([np.inf, -np.inf], np.nan)
test = test.replace([np.inf, -np.inf], np.nan)
for feature in features:
feature_mean = pd.concat([train[feature], test[feature]], axis = 0).mean()
train[feature] = train[feature].fillna(feature_mean)
test[feature] = test[feature].fillna(feature_mean)
return train, test, features
# model function (very important, you can try different arquitectures to get a better score. I believe that top public leaderboard is a 1D Conv + RNN style)
def Classifier(shape_):
def cbr(x, out_layer, kernel, stride, dilation):
x = Conv1D(out_layer, kernel_size=kernel, dilation_rate=dilation, strides=stride, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def wave_block(x, filters, kernel_size, n):
dilation_rates = [2**i for i in range(n)]
x = Conv1D(filters = filters,
kernel_size = 1,
padding = 'same')(x)
res_x = x
for dilation_rate in dilation_rates:
tanh_out = Conv1D(filters = filters,
kernel_size = kernel_size,
padding = 'same',
activation = 'tanh',
dilation_rate = dilation_rate)(x)
sigm_out = Conv1D(filters = filters,
kernel_size = kernel_size,
padding = 'same',
activation = 'sigmoid',
dilation_rate = dilation_rate)(x)
x = Multiply()([tanh_out, sigm_out])
x = Conv1D(filters = filters,
kernel_size = 1,
padding = 'same')(x)
res_x = Add()([res_x, x])
return res_x
inp = Input(shape = (shape_))
x = cbr(inp, 64, 7, 1, 1)
x = BatchNormalization()(x)
x = wave_block(x, 16, 3, 12)
x = BatchNormalization()(x)
x = wave_block(x, 32, 3, 8)
x = BatchNormalization()(x)
x = wave_block(x, 64, 3, 4)
x = BatchNormalization()(x)
x = wave_block(x, 128, 3, 1)
x = cbr(x, 32, 7, 1, 1)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
out = Dense(11, activation = 'softmax', name = 'out')(x)
model = models.Model(inputs = inp, outputs = out)
opt = Adam(lr = LR)
# opt = tfa.optimizers.SWA(opt)
# model.compile(loss = losses.CategoricalCrossentropy(), optimizer = opt, metrics = ['accuracy'])
model.compile(loss = categorical_crossentropy, optimizer = opt, metrics = ['accuracy'])
return model
# function that decrease the learning as epochs increase (i also change this part of the code)
def lr_schedule(epoch):
if epoch < 30:
lr = LR
elif epoch < 40:
lr = LR / 3
elif epoch < 50:
lr = LR / 5
elif epoch < 60:
lr = LR / 7
elif epoch < 70:
lr = LR / 9
elif epoch < 80:
lr = LR / 11
elif epoch < 90:
lr = LR / 13
else:
lr = LR / 100
return lr
# class to get macro f1 score. This is not entirely necessary but it's fun to check f1 score of each epoch (be carefull, if you use this function early stopping callback will not work)
class MacroF1(Callback):
def __init__(self, model, inputs, targets):
self.model = model
self.inputs = inputs
self.targets = np.argmax(targets, axis = 2).reshape(-1)
def on_epoch_end(self, epoch, logs):
pred = np.argmax(self.model.predict(self.inputs), axis = 2).reshape(-1)
score = f1_score(self.targets, pred, average = 'macro')
print(f'F1 Macro Score: {score:.5f}')
# main function to perfrom groupkfold cross validation (we have 1000 vectores of 4000 rows and 8 features (columns)). Going to make 5 groups with this subgroups.
def run_cv_model_by_batch(train, test, splits, batch_col, feats, sample_submission, nn_epochs, nn_batch_size):
seed_everything(SEED)
K.clear_session()
# config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
# gpu_options=tf.compat.v1.GPUOptions(
# visible_device_list='4', # specify GPU number
# allow_growth=True
# )
# )
# sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=config)
# tf.compat.v1.keras.backend.set_session(sess)
# tf.compat.v1 ---> tf (tensorflow2็ณปใใtensorflow1็ณปใซๅคๆด)
config = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
# gpu_options=tf.GPUOptions(
# visible_device_list='4', # specify GPU number
# allow_growth=True
# )
)
sess = tf.Session(graph=tf.get_default_graph(), config=config)
tf.keras.backend.set_session(sess)
oof_ = np.zeros((len(train), 11)) # build out of folds matrix with 11 columns, they represent our target variables classes (from 0 to 10)
preds_ = np.zeros((len(test), 11))
target = ['open_channels']
group = train['group']
kf = GroupKFold(n_splits=5)
splits = [x for x in kf.split(train, train[target], group)]
new_splits = []
for sp in splits:
new_split = []
new_split.append(np.unique(group[sp[0]]))
new_split.append(np.unique(group[sp[1]]))
new_split.append(sp[1])
new_splits.append(new_split)
# pivot target columns to transform the net to a multiclass classification estructure (you can also leave it in 1 vector with sparsecategoricalcrossentropy loss function)
tr = pd.concat([pd.get_dummies(train.open_channels), train[['group']]], axis=1)
tr.columns = ['target_'+str(i) for i in range(11)] + ['group']
target_cols = ['target_'+str(i) for i in range(11)]
train_tr = np.array(list(tr.groupby('group').apply(lambda x: x[target_cols].values))).astype(np.float32)
train = np.array(list(train.groupby('group').apply(lambda x: x[feats].values)))
test = np.array(list(test.groupby('group').apply(lambda x: x[feats].values)))
for n_fold, (tr_idx, val_idx, val_orig_idx) in enumerate(new_splits[0:], start=0):
train_x, train_y = train[tr_idx], train_tr[tr_idx]
valid_x, valid_y = train[val_idx], train_tr[val_idx]
print(f'Our training dataset shape is {train_x.shape}')
print(f'Our validation dataset shape is {valid_x.shape}')
gc.collect()
shape_ = (None, train_x.shape[2]) # input is going to be the number of feature we are using (dimension 2 of 0, 1, 2)
model = Classifier(shape_)
# using our lr_schedule function
cb_lr_schedule = LearningRateScheduler(lr_schedule)
model.fit(train_x,train_y,
epochs = nn_epochs,
callbacks = [cb_lr_schedule, MacroF1(model, valid_x, valid_y)], # adding custom evaluation metric for each epoch
batch_size = nn_batch_size,verbose = 2,
validation_data = (valid_x,valid_y))
preds_f = model.predict(valid_x)
f1_score_ = f1_score(np.argmax(valid_y, axis=2).reshape(-1), np.argmax(preds_f, axis=2).reshape(-1), average = 'macro') # need to get the class with the biggest probability
print(f'Training fold {n_fold + 1} completed. macro f1 score : {f1_score_ :1.5f}')
preds_f = preds_f.reshape(-1, preds_f.shape[-1])
oof_[val_orig_idx,:] += preds_f
te_preds = model.predict(test)
te_preds = te_preds.reshape(-1, te_preds.shape[-1])
preds_ += te_preds / SPLITS
# calculate the oof macro f1_score
f1_score_ = f1_score(np.argmax(train_tr, axis = 2).reshape(-1), np.argmax(oof_, axis = 1), average = 'macro') # axis 2 for the 3 Dimension array and axis 1 for the 2 Domension Array (extracting the best class)
print(f'Training completed. oof macro f1 score : {f1_score_:1.5f}')
save_path = f'{DIR_OUTPUT}submission_nb{NB}_cv_{f1_score_:.4f}.csv'
print(f'save path: {save_path}')
sample_submission['open_channels'] = np.argmax(preds_, axis = 1).astype(int)
sample_submission.to_csv(save_path, index=False, float_format='%.4f')
# save_path = f'{DIR_OUTPUT}oof_nb{NB}_cv_{f1_score_:.4f}.csv'
# sample_submission['open_channels'] = np.argmax(preds_, axis = 1).astype(int)
# sample_submission.to_csv(save_path, index=False, float_format='%.4f')
return oof_
%%time
# this function run our entire program
def run_everything():
print(f'Reading Data Started...({time.ctime()})')
train, test, sample_submission = read_data()
train, test = normalize(train, test)
print(f'Reading and Normalizing Data Completed')
print(f'Creating Features({time.ctime()})')
print(f'Feature Engineering Started...')
train = run_feat_engineering(train, batch_size = GROUP_BATCH_SIZE)
test = run_feat_engineering(test, batch_size = GROUP_BATCH_SIZE)
train, test, features = feature_selection(train, test)
print(f'Feature Engineering Completed...')
print(f'Training Wavenet model with {SPLITS} folds of GroupKFold Started...({time.ctime()})')
oof_ = run_cv_model_by_batch(train, test, SPLITS, 'group', features, sample_submission, EPOCHS, NNBATCHSIZE)
print(f'Training completed...')
return oof_
oof_ = run_everything() | Reading Data Started...(Wed May 6 11:15:32 2020)
Reading and Normalizing Data Completed
Creating Features(Wed May 6 11:15:36 2020)
Feature Engineering Started...
Feature Engineering Completed...
Training Wavenet model with 6 folds of GroupKFold Started...(Wed May 6 11:15:46 2020)
Our training dataset shape is (1000, 4000, 19)
Our validation dataset shape is (250, 4000, 19)
Train on 1000 samples, validate on 250 samples
Epoch 1/180
F1 Macro Score: 0.65447
- 23s - loss: 0.6088 - acc: 0.8310 - val_loss: 0.8985 - val_acc: 0.8545
Epoch 2/180
F1 Macro Score: 0.86893
- 8s - loss: 0.1985 - acc: 0.9559 - val_loss: 0.4352 - val_acc: 0.9475
Epoch 3/180
F1 Macro Score: 0.93159
- 9s - loss: 0.1436 - acc: 0.9642 - val_loss: 0.2295 - val_acc: 0.9642
Epoch 4/180
F1 Macro Score: 0.93344
- 8s - loss: 0.1296 - acc: 0.9656 - val_loss: 0.1473 - val_acc: 0.9650
Epoch 5/180
F1 Macro Score: 0.93503
- 9s - loss: 0.1221 - acc: 0.9661 - val_loss: 0.1301 - val_acc: 0.9655
Epoch 6/180
F1 Macro Score: 0.93693
- 9s - loss: 0.1177 - acc: 0.9665 - val_loss: 0.1105 - val_acc: 0.9663
Epoch 7/180
F1 Macro Score: 0.93668
- 9s - loss: 0.1130 - acc: 0.9667 - val_loss: 0.1085 - val_acc: 0.9662
Epoch 8/180
F1 Macro Score: 0.93671
- 9s - loss: 0.1092 - acc: 0.9670 - val_loss: 0.1023 - val_acc: 0.9665
Epoch 9/180
F1 Macro Score: 0.93550
- 9s - loss: 0.1081 - acc: 0.9670 - val_loss: 0.1055 - val_acc: 0.9657
Epoch 10/180
F1 Macro Score: 0.93486
- 9s - loss: 0.1074 - acc: 0.9668 - val_loss: 0.1144 - val_acc: 0.9654
Epoch 11/180
F1 Macro Score: 0.93078
- 9s - loss: 0.1196 - acc: 0.9654 - val_loss: 0.1083 - val_acc: 0.9652
Epoch 12/180
F1 Macro Score: 0.93584
- 9s - loss: 0.1153 - acc: 0.9659 - val_loss: 0.1019 - val_acc: 0.9663
Epoch 13/180
F1 Macro Score: 0.93794
- 9s - loss: 0.1044 - acc: 0.9672 - val_loss: 0.0958 - val_acc: 0.9668
Epoch 14/180
F1 Macro Score: 0.93633
- 9s - loss: 0.1035 - acc: 0.9671 - val_loss: 0.0983 - val_acc: 0.9661
Epoch 15/180
F1 Macro Score: 0.93802
- 8s - loss: 0.1002 - acc: 0.9675 - val_loss: 0.0938 - val_acc: 0.9670
Epoch 16/180
F1 Macro Score: 0.93670
- 9s - loss: 0.1003 - acc: 0.9674 - val_loss: 0.0964 - val_acc: 0.9663
Epoch 17/180
F1 Macro Score: 0.93737
- 9s - loss: 0.0981 - acc: 0.9675 - val_loss: 0.0938 - val_acc: 0.9666
Epoch 18/180
F1 Macro Score: 0.93661
- 8s - loss: 0.0974 - acc: 0.9676 - val_loss: 0.0931 - val_acc: 0.9668
Epoch 19/180
F1 Macro Score: 0.93760
- 8s - loss: 0.0965 - acc: 0.9676 - val_loss: 0.0917 - val_acc: 0.9668
Epoch 20/180
F1 Macro Score: 0.93758
- 8s - loss: 0.0949 - acc: 0.9677 - val_loss: 0.0928 - val_acc: 0.9667
Epoch 21/180
F1 Macro Score: 0.93429
- 8s - loss: 0.0953 - acc: 0.9676 - val_loss: 0.0972 - val_acc: 0.9658
Epoch 22/180
F1 Macro Score: 0.93760
- 8s - loss: 0.0953 - acc: 0.9675 - val_loss: 0.0915 - val_acc: 0.9669
Epoch 23/180
F1 Macro Score: 0.93586
- 8s - loss: 0.0939 - acc: 0.9678 - val_loss: 0.0946 - val_acc: 0.9663
Epoch 24/180
F1 Macro Score: 0.93759
- 9s - loss: 0.0925 - acc: 0.9679 - val_loss: 0.0912 - val_acc: 0.9667
Epoch 25/180
F1 Macro Score: 0.93796
- 8s - loss: 0.0923 - acc: 0.9680 - val_loss: 0.0902 - val_acc: 0.9669
Epoch 26/180
F1 Macro Score: 0.93698
- 9s - loss: 0.0919 - acc: 0.9680 - val_loss: 0.0913 - val_acc: 0.9663
Epoch 27/180
F1 Macro Score: 0.93641
- 9s - loss: 0.0948 - acc: 0.9675 - val_loss: 0.0920 - val_acc: 0.9667
Epoch 28/180
F1 Macro Score: 0.93774
- 9s - loss: 0.0912 - acc: 0.9681 - val_loss: 0.0898 - val_acc: 0.9670
Epoch 29/180
F1 Macro Score: 0.93779
- 7s - loss: 0.0909 - acc: 0.9680 - val_loss: 0.0908 - val_acc: 0.9668
Epoch 30/180
F1 Macro Score: 0.93654
- 8s - loss: 0.0899 - acc: 0.9681 - val_loss: 0.0926 - val_acc: 0.9661
Epoch 31/180
F1 Macro Score: 0.93856
- 7s - loss: 0.0893 - acc: 0.9684 - val_loss: 0.0882 - val_acc: 0.9673
Epoch 32/180
F1 Macro Score: 0.93897
- 9s - loss: 0.0873 - acc: 0.9688 - val_loss: 0.0871 - val_acc: 0.9676
Epoch 33/180
F1 Macro Score: 0.93890
- 8s - loss: 0.0866 - acc: 0.9688 - val_loss: 0.0870 - val_acc: 0.9676
Epoch 34/180
F1 Macro Score: 0.93929
- 8s - loss: 0.0865 - acc: 0.9689 - val_loss: 0.0859 - val_acc: 0.9678
Epoch 35/180
F1 Macro Score: 0.93905
- 8s - loss: 0.0864 - acc: 0.9689 - val_loss: 0.0863 - val_acc: 0.9676
Epoch 36/180
F1 Macro Score: 0.93888
- 8s - loss: 0.0860 - acc: 0.9691 - val_loss: 0.0865 - val_acc: 0.9675
Epoch 37/180
F1 Macro Score: 0.93732
- 9s - loss: 0.0855 - acc: 0.9692 - val_loss: 0.0900 - val_acc: 0.9668
Epoch 38/180
F1 Macro Score: 0.93992
- 8s - loss: 0.0847 - acc: 0.9693 - val_loss: 0.0851 - val_acc: 0.9681
Epoch 39/180
F1 Macro Score: 0.93891
- 8s - loss: 0.0873 - acc: 0.9689 - val_loss: 0.0860 - val_acc: 0.9677
Epoch 40/180
F1 Macro Score: 0.93900
- 9s - loss: 0.0849 - acc: 0.9693 - val_loss: 0.0862 - val_acc: 0.9677
Epoch 41/180
F1 Macro Score: 0.94022
- 8s - loss: 0.0842 - acc: 0.9696 - val_loss: 0.0845 - val_acc: 0.9682
Epoch 42/180
F1 Macro Score: 0.94046
- 9s - loss: 0.0832 - acc: 0.9697 - val_loss: 0.0842 - val_acc: 0.9684
Epoch 43/180
F1 Macro Score: 0.93883
- 9s - loss: 0.0835 - acc: 0.9697 - val_loss: 0.0861 - val_acc: 0.9679
Epoch 44/180
F1 Macro Score: 0.94063
- 9s - loss: 0.0831 - acc: 0.9697 - val_loss: 0.0841 - val_acc: 0.9684
Epoch 45/180
F1 Macro Score: 0.93997
- 8s - loss: 0.0831 - acc: 0.9698 - val_loss: 0.0846 - val_acc: 0.9682
Epoch 46/180
F1 Macro Score: 0.93945
- 7s - loss: 0.0831 - acc: 0.9699 - val_loss: 0.0854 - val_acc: 0.9680
Epoch 47/180
F1 Macro Score: 0.94035
- 8s - loss: 0.0829 - acc: 0.9699 - val_loss: 0.0840 - val_acc: 0.9683
Epoch 48/180
F1 Macro Score: 0.94079
- 8s - loss: 0.0826 - acc: 0.9699 - val_loss: 0.0837 - val_acc: 0.9686
Epoch 49/180
F1 Macro Score: 0.93997
- 9s - loss: 0.0823 - acc: 0.9700 - val_loss: 0.0846 - val_acc: 0.9681
Epoch 50/180
F1 Macro Score: 0.94037
- 7s - loss: 0.0821 - acc: 0.9700 - val_loss: 0.0844 - val_acc: 0.9683
Epoch 51/180
F1 Macro Score: 0.94070
- 8s - loss: 0.0825 - acc: 0.9699 - val_loss: 0.0836 - val_acc: 0.9685
Epoch 52/180
F1 Macro Score: 0.94046
- 9s - loss: 0.0815 - acc: 0.9702 - val_loss: 0.0837 - val_acc: 0.9685
Epoch 53/180
F1 Macro Score: 0.94078
- 9s - loss: 0.0816 - acc: 0.9702 - val_loss: 0.0836 - val_acc: 0.9686
Epoch 54/180
F1 Macro Score: 0.93971
- 9s - loss: 0.0818 - acc: 0.9701 - val_loss: 0.0847 - val_acc: 0.9682
Epoch 55/180
F1 Macro Score: 0.94058
- 9s - loss: 0.0817 - acc: 0.9701 - val_loss: 0.0838 - val_acc: 0.9684
Epoch 56/180
F1 Macro Score: 0.94053
- 8s - loss: 0.0817 - acc: 0.9701 - val_loss: 0.0840 - val_acc: 0.9684
Epoch 57/180
F1 Macro Score: 0.94036
- 9s - loss: 0.0809 - acc: 0.9703 - val_loss: 0.0836 - val_acc: 0.9684
Epoch 58/180
F1 Macro Score: 0.93987
- 9s - loss: 0.0814 - acc: 0.9701 - val_loss: 0.0847 - val_acc: 0.9681
Epoch 59/180
F1 Macro Score: 0.94029
- 9s - loss: 0.0821 - acc: 0.9699 - val_loss: 0.0844 - val_acc: 0.9683
Epoch 60/180
F1 Macro Score: 0.94081
- 9s - loss: 0.0833 - acc: 0.9697 - val_loss: 0.0841 - val_acc: 0.9685
Epoch 61/180
F1 Macro Score: 0.94069
- 8s - loss: 0.0808 - acc: 0.9704 - val_loss: 0.0837 - val_acc: 0.9685
Epoch 62/180
F1 Macro Score: 0.94080
- 9s - loss: 0.0812 - acc: 0.9702 - val_loss: 0.0834 - val_acc: 0.9686
Epoch 63/180
F1 Macro Score: 0.94053
- 8s - loss: 0.0809 - acc: 0.9703 - val_loss: 0.0835 - val_acc: 0.9686
Epoch 64/180
F1 Macro Score: 0.94075
- 9s - loss: 0.0805 - acc: 0.9704 - val_loss: 0.0835 - val_acc: 0.9685
Epoch 65/180
F1 Macro Score: 0.94064
- 8s - loss: 0.0806 - acc: 0.9704 - val_loss: 0.0837 - val_acc: 0.9685
Epoch 66/180
F1 Macro Score: 0.93868
- 9s - loss: 0.0797 - acc: 0.9705 - val_loss: 0.0847 - val_acc: 0.9681
Epoch 67/180
F1 Macro Score: 0.94083
- 8s - loss: 0.0802 - acc: 0.9704 - val_loss: 0.0835 - val_acc: 0.9686
Epoch 68/180
F1 Macro Score: 0.94019
- 9s - loss: 0.0799 - acc: 0.9705 - val_loss: 0.0835 - val_acc: 0.9685
Epoch 69/180
F1 Macro Score: 0.94036
- 9s - loss: 0.0799 - acc: 0.9705 - val_loss: 0.0838 - val_acc: 0.9684
Epoch 70/180
F1 Macro Score: 0.94067
- 8s - loss: 0.0801 - acc: 0.9705 - val_loss: 0.0835 - val_acc: 0.9685
Epoch 71/180
F1 Macro Score: 0.94032
- 9s - loss: 0.0800 - acc: 0.9705 - val_loss: 0.0836 - val_acc: 0.9684
Epoch 72/180
F1 Macro Score: 0.94037
- 9s - loss: 0.0798 - acc: 0.9706 - val_loss: 0.0836 - val_acc: 0.9684
Epoch 73/180
F1 Macro Score: 0.94060
- 8s - loss: 0.0798 - acc: 0.9706 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 74/180
F1 Macro Score: 0.94058
- 9s - loss: 0.0790 - acc: 0.9707 - val_loss: 0.0836 - val_acc: 0.9685
Epoch 75/180
F1 Macro Score: 0.94053
- 9s - loss: 0.0798 - acc: 0.9705 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 76/180
F1 Macro Score: 0.94075
- 9s - loss: 0.0791 - acc: 0.9707 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 77/180
F1 Macro Score: 0.94047
- 8s - loss: 0.0792 - acc: 0.9706 - val_loss: 0.0837 - val_acc: 0.9684
Epoch 78/180
F1 Macro Score: 0.94027
- 9s - loss: 0.0790 - acc: 0.9708 - val_loss: 0.0836 - val_acc: 0.9683
Epoch 79/180
F1 Macro Score: 0.94051
- 9s - loss: 0.0791 - acc: 0.9706 - val_loss: 0.0836 - val_acc: 0.9684
Epoch 80/180
F1 Macro Score: 0.94027
- 9s - loss: 0.0790 - acc: 0.9707 - val_loss: 0.0839 - val_acc: 0.9683
Epoch 81/180
F1 Macro Score: 0.94077
- 8s - loss: 0.0783 - acc: 0.9708 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 82/180
F1 Macro Score: 0.94056
- 9s - loss: 0.0792 - acc: 0.9707 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 83/180
F1 Macro Score: 0.94059
- 8s - loss: 0.0785 - acc: 0.9708 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 84/180
F1 Macro Score: 0.94060
- 9s - loss: 0.0788 - acc: 0.9708 - val_loss: 0.0837 - val_acc: 0.9684
Epoch 85/180
F1 Macro Score: 0.94033
- 9s - loss: 0.0785 - acc: 0.9708 - val_loss: 0.0839 - val_acc: 0.9683
Epoch 86/180
F1 Macro Score: 0.94054
- 8s - loss: 0.0787 - acc: 0.9708 - val_loss: 0.0836 - val_acc: 0.9684
Epoch 87/180
F1 Macro Score: 0.94048
- 8s - loss: 0.0789 - acc: 0.9707 - val_loss: 0.0839 - val_acc: 0.9682
Epoch 88/180
F1 Macro Score: 0.94024
- 9s - loss: 0.0784 - acc: 0.9709 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 89/180
F1 Macro Score: 0.94008
- 9s - loss: 0.0781 - acc: 0.9708 - val_loss: 0.0840 - val_acc: 0.9682
Epoch 90/180
F1 Macro Score: 0.94065
- 7s - loss: 0.0782 - acc: 0.9709 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 91/180
F1 Macro Score: 0.94085
- 8s - loss: 0.0779 - acc: 0.9711 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 92/180
F1 Macro Score: 0.94063
- 8s - loss: 0.0776 - acc: 0.9711 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 93/180
F1 Macro Score: 0.94070
- 8s - loss: 0.0775 - acc: 0.9712 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 94/180
F1 Macro Score: 0.94071
- 9s - loss: 0.0779 - acc: 0.9711 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 95/180
F1 Macro Score: 0.94068
- 9s - loss: 0.0776 - acc: 0.9711 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 96/180
F1 Macro Score: 0.94070
- 9s - loss: 0.0775 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9686
Epoch 97/180
F1 Macro Score: 0.94063
- 9s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 98/180
F1 Macro Score: 0.94082
- 9s - loss: 0.0778 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9686
Epoch 99/180
F1 Macro Score: 0.94078
- 8s - loss: 0.0772 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 100/180
F1 Macro Score: 0.94085
- 9s - loss: 0.0779 - acc: 0.9710 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 101/180
F1 Macro Score: 0.94062
- 8s - loss: 0.0777 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 102/180
F1 Macro Score: 0.94061
- 9s - loss: 0.0777 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9684
Epoch 103/180
F1 Macro Score: 0.94059
- 8s - loss: 0.0776 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 104/180
F1 Macro Score: 0.94073
- 9s - loss: 0.0775 - acc: 0.9711 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 105/180
F1 Macro Score: 0.94066
- 9s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 106/180
F1 Macro Score: 0.94070
- 9s - loss: 0.0776 - acc: 0.9711 - val_loss: 0.0832 - val_acc: 0.9685
Epoch 107/180
F1 Macro Score: 0.94068
- 9s - loss: 0.0774 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 108/180
F1 Macro Score: 0.94058
- 9s - loss: 0.0774 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9684
Epoch 109/180
F1 Macro Score: 0.94079
- 9s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 110/180
F1 Macro Score: 0.94069
- 9s - loss: 0.0775 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 111/180
F1 Macro Score: 0.94061
- 9s - loss: 0.0775 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9684
Epoch 112/180
F1 Macro Score: 0.94073
- 9s - loss: 0.0776 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 113/180
F1 Macro Score: 0.94052
- 8s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9684
Epoch 114/180
F1 Macro Score: 0.94066
- 9s - loss: 0.0774 - acc: 0.9711 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 115/180
F1 Macro Score: 0.94057
- 7s - loss: 0.0776 - acc: 0.9710 - val_loss: 0.0833 - val_acc: 0.9684
Epoch 116/180
F1 Macro Score: 0.94074
- 7s - loss: 0.0773 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 117/180
F1 Macro Score: 0.94060
- 7s - loss: 0.0775 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 118/180
F1 Macro Score: 0.94070
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 119/180
F1 Macro Score: 0.94060
- 7s - loss: 0.0776 - acc: 0.9711 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 120/180
F1 Macro Score: 0.94052
- 7s - loss: 0.0769 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 121/180
F1 Macro Score: 0.94071
- 7s - loss: 0.0775 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 122/180
F1 Macro Score: 0.94047
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 123/180
F1 Macro Score: 0.94076
- 7s - loss: 0.0777 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 124/180
F1 Macro Score: 0.94069
- 7s - loss: 0.0774 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 125/180
F1 Macro Score: 0.94077
- 7s - loss: 0.0774 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 126/180
F1 Macro Score: 0.94069
- 7s - loss: 0.0775 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 127/180
F1 Macro Score: 0.94083
- 7s - loss: 0.0774 - acc: 0.9711 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 128/180
F1 Macro Score: 0.94051
- 7s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 129/180
F1 Macro Score: 0.94071
- 7s - loss: 0.0774 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 130/180
F1 Macro Score: 0.94071
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0833 - val_acc: 0.9684
Epoch 131/180
F1 Macro Score: 0.94064
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 132/180
F1 Macro Score: 0.94044
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 133/180
F1 Macro Score: 0.94075
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 134/180
F1 Macro Score: 0.94061
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 135/180
F1 Macro Score: 0.94037
- 7s - loss: 0.0775 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 136/180
F1 Macro Score: 0.94069
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 137/180
F1 Macro Score: 0.94017
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9683
Epoch 138/180
F1 Macro Score: 0.94080
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 139/180
F1 Macro Score: 0.94065
- 7s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 140/180
F1 Macro Score: 0.94053
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 141/180
F1 Macro Score: 0.94074
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 142/180
F1 Macro Score: 0.94064
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0833 - val_acc: 0.9685
Epoch 143/180
F1 Macro Score: 0.94073
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 144/180
F1 Macro Score: 0.94055
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 145/180
F1 Macro Score: 0.94076
- 7s - loss: 0.0773 - acc: 0.9711 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 146/180
F1 Macro Score: 0.94054
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 147/180
F1 Macro Score: 0.94072
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 148/180
F1 Macro Score: 0.94065
- 7s - loss: 0.0771 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 149/180
F1 Macro Score: 0.94073
- 7s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 150/180
F1 Macro Score: 0.94070
- 7s - loss: 0.0769 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 151/180
F1 Macro Score: 0.94068
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 152/180
F1 Macro Score: 0.94055
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 153/180
F1 Macro Score: 0.94060
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 154/180
F1 Macro Score: 0.94058
- 8s - loss: 0.0768 - acc: 0.9713 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 155/180
F1 Macro Score: 0.94071
- 8s - loss: 0.0773 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 156/180
F1 Macro Score: 0.94062
- 7s - loss: 0.0770 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 157/180
F1 Macro Score: 0.94070
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 158/180
F1 Macro Score: 0.94057
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 159/180
F1 Macro Score: 0.94049
- 7s - loss: 0.0766 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 160/180
F1 Macro Score: 0.94061
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 161/180
F1 Macro Score: 0.94060
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 162/180
F1 Macro Score: 0.94046
- 7s - loss: 0.0772 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 163/180
F1 Macro Score: 0.94053
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 164/180
F1 Macro Score: 0.94036
- 7s - loss: 0.0768 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 165/180
F1 Macro Score: 0.94063
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 166/180
F1 Macro Score: 0.94062
- 7s - loss: 0.0766 - acc: 0.9714 - val_loss: 0.0834 - val_acc: 0.9684
Epoch 167/180
F1 Macro Score: 0.94027
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9683
Epoch 168/180
F1 Macro Score: 0.94071
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9685
Epoch 169/180
F1 Macro Score: 0.94061
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 170/180
F1 Macro Score: 0.94066
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9685
Epoch 171/180
F1 Macro Score: 0.94061
- 7s - loss: 0.0775 - acc: 0.9712 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 172/180
F1 Macro Score: 0.94064
- 7s - loss: 0.0766 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 173/180
F1 Macro Score: 0.94058
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 174/180
F1 Macro Score: 0.94054
- 7s - loss: 0.0768 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 175/180
F1 Macro Score: 0.94059
- 7s - loss: 0.0767 - acc: 0.9715 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 176/180
F1 Macro Score: 0.94069
- 7s - loss: 0.0766 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9685
Epoch 177/180
F1 Macro Score: 0.94052
- 7s - loss: 0.0767 - acc: 0.9712 - val_loss: 0.0837 - val_acc: 0.9684
Epoch 178/180
F1 Macro Score: 0.94053
- 7s - loss: 0.0767 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 179/180
F1 Macro Score: 0.94058
- 7s - loss: 0.0765 - acc: 0.9714 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 180/180
F1 Macro Score: 0.94059
- 7s - loss: 0.0769 - acc: 0.9713 - val_loss: 0.0835 - val_acc: 0.9684
Training fold 1 completed. macro f1 score : 0.94059
Our training dataset shape is (1000, 4000, 19)
Our validation dataset shape is (250, 4000, 19)
Train on 1000 samples, validate on 250 samples
Epoch 1/180
F1 Macro Score: 0.77329
- 18s - loss: 0.5534 - acc: 0.8433 - val_loss: 0.7989 - val_acc: 0.9029
Epoch 2/180
F1 Macro Score: 0.92960
- 7s - loss: 0.1838 - acc: 0.9570 - val_loss: 0.3918 - val_acc: 0.9656
Epoch 3/180
F1 Macro Score: 0.93509
- 7s - loss: 0.1376 - acc: 0.9644 - val_loss: 0.1898 - val_acc: 0.9676
Epoch 4/180
F1 Macro Score: 0.93554
- 7s - loss: 0.1277 - acc: 0.9651 - val_loss: 0.1339 - val_acc: 0.9678
Epoch 5/180
F1 Macro Score: 0.93814
- 7s - loss: 0.1222 - acc: 0.9656 - val_loss: 0.1057 - val_acc: 0.9690
Epoch 6/180
F1 Macro Score: 0.93775
- 7s - loss: 0.1159 - acc: 0.9660 - val_loss: 0.0999 - val_acc: 0.9690
Epoch 7/180
F1 Macro Score: 0.93842
- 7s - loss: 0.1120 - acc: 0.9662 - val_loss: 0.0940 - val_acc: 0.9693
Epoch 8/180
F1 Macro Score: 0.93867
- 7s - loss: 0.1109 - acc: 0.9662 - val_loss: 0.0919 - val_acc: 0.9693
Epoch 9/180
F1 Macro Score: 0.93819
- 7s - loss: 0.1076 - acc: 0.9664 - val_loss: 0.0924 - val_acc: 0.9691
Epoch 10/180
F1 Macro Score: 0.93710
- 7s - loss: 0.1076 - acc: 0.9663 - val_loss: 0.0942 - val_acc: 0.9688
Epoch 11/180
F1 Macro Score: 0.93789
- 7s - loss: 0.1061 - acc: 0.9663 - val_loss: 0.0914 - val_acc: 0.9690
Epoch 12/180
F1 Macro Score: 0.93780
- 8s - loss: 0.1042 - acc: 0.9664 - val_loss: 0.0916 - val_acc: 0.9689
Epoch 13/180
F1 Macro Score: 0.93929
- 8s - loss: 0.1018 - acc: 0.9666 - val_loss: 0.0876 - val_acc: 0.9696
Epoch 14/180
F1 Macro Score: 0.93938
- 8s - loss: 0.1015 - acc: 0.9666 - val_loss: 0.0865 - val_acc: 0.9696
Epoch 15/180
F1 Macro Score: 0.93749
- 7s - loss: 0.1001 - acc: 0.9666 - val_loss: 0.0906 - val_acc: 0.9687
Epoch 16/180
F1 Macro Score: 0.93876
- 8s - loss: 0.1003 - acc: 0.9666 - val_loss: 0.0869 - val_acc: 0.9694
Epoch 17/180
F1 Macro Score: 0.93847
- 7s - loss: 0.0998 - acc: 0.9666 - val_loss: 0.0873 - val_acc: 0.9694
Epoch 18/180
F1 Macro Score: 0.93730
- 8s - loss: 0.0978 - acc: 0.9668 - val_loss: 0.0892 - val_acc: 0.9687
Epoch 19/180
F1 Macro Score: 0.93846
- 8s - loss: 0.0981 - acc: 0.9667 - val_loss: 0.0852 - val_acc: 0.9694
Epoch 20/180
F1 Macro Score: 0.93787
- 8s - loss: 0.0975 - acc: 0.9666 - val_loss: 0.0870 - val_acc: 0.9691
Epoch 21/180
F1 Macro Score: 0.93768
- 7s - loss: 0.0974 - acc: 0.9667 - val_loss: 0.0907 - val_acc: 0.9687
Epoch 22/180
F1 Macro Score: 0.93835
- 7s - loss: 0.1019 - acc: 0.9663 - val_loss: 0.0862 - val_acc: 0.9692
Epoch 23/180
F1 Macro Score: 0.93906
- 7s - loss: 0.0957 - acc: 0.9669 - val_loss: 0.0834 - val_acc: 0.9696
Epoch 24/180
F1 Macro Score: 0.93872
- 7s - loss: 0.0947 - acc: 0.9669 - val_loss: 0.0846 - val_acc: 0.9694
Epoch 25/180
F1 Macro Score: 0.93923
- 7s - loss: 0.0945 - acc: 0.9670 - val_loss: 0.0828 - val_acc: 0.9697
Epoch 26/180
F1 Macro Score: 0.93838
- 7s - loss: 0.0935 - acc: 0.9670 - val_loss: 0.0841 - val_acc: 0.9693
Epoch 27/180
F1 Macro Score: 0.93939
- 7s - loss: 0.0930 - acc: 0.9670 - val_loss: 0.0833 - val_acc: 0.9696
Epoch 28/180
F1 Macro Score: 0.93659
- 7s - loss: 0.0931 - acc: 0.9670 - val_loss: 0.0964 - val_acc: 0.9669
Epoch 29/180
F1 Macro Score: 0.93881
- 7s - loss: 0.0940 - acc: 0.9671 - val_loss: 0.0835 - val_acc: 0.9694
Epoch 30/180
F1 Macro Score: 0.93559
- 7s - loss: 0.0949 - acc: 0.9668 - val_loss: 0.0986 - val_acc: 0.9676
Epoch 31/180
F1 Macro Score: 0.93852
- 7s - loss: 0.0962 - acc: 0.9669 - val_loss: 0.0836 - val_acc: 0.9695
Epoch 32/180
F1 Macro Score: 0.93976
- 7s - loss: 0.0909 - acc: 0.9675 - val_loss: 0.0811 - val_acc: 0.9699
Epoch 33/180
F1 Macro Score: 0.93971
- 7s - loss: 0.0902 - acc: 0.9675 - val_loss: 0.0811 - val_acc: 0.9699
Epoch 34/180
F1 Macro Score: 0.93952
- 7s - loss: 0.0897 - acc: 0.9676 - val_loss: 0.0808 - val_acc: 0.9698
Epoch 35/180
F1 Macro Score: 0.93984
- 7s - loss: 0.0890 - acc: 0.9676 - val_loss: 0.0806 - val_acc: 0.9700
Epoch 36/180
F1 Macro Score: 0.93987
- 7s - loss: 0.0887 - acc: 0.9677 - val_loss: 0.0803 - val_acc: 0.9700
Epoch 37/180
F1 Macro Score: 0.93946
- 7s - loss: 0.0888 - acc: 0.9678 - val_loss: 0.0816 - val_acc: 0.9698
Epoch 38/180
F1 Macro Score: 0.93982
- 7s - loss: 0.0887 - acc: 0.9678 - val_loss: 0.0802 - val_acc: 0.9701
Epoch 39/180
F1 Macro Score: 0.93976
- 7s - loss: 0.0874 - acc: 0.9681 - val_loss: 0.0805 - val_acc: 0.9701
Epoch 40/180
F1 Macro Score: 0.94001
- 7s - loss: 0.0878 - acc: 0.9680 - val_loss: 0.0798 - val_acc: 0.9702
Epoch 41/180
F1 Macro Score: 0.94035
- 7s - loss: 0.0873 - acc: 0.9681 - val_loss: 0.0791 - val_acc: 0.9703
Epoch 42/180
F1 Macro Score: 0.94025
- 9s - loss: 0.0865 - acc: 0.9683 - val_loss: 0.0791 - val_acc: 0.9703
Epoch 43/180
F1 Macro Score: 0.94051
- 9s - loss: 0.0864 - acc: 0.9683 - val_loss: 0.0787 - val_acc: 0.9705
Epoch 44/180
F1 Macro Score: 0.94028
- 7s - loss: 0.0862 - acc: 0.9683 - val_loss: 0.0791 - val_acc: 0.9704
Epoch 45/180
F1 Macro Score: 0.94030
- 7s - loss: 0.0863 - acc: 0.9684 - val_loss: 0.0793 - val_acc: 0.9703
Epoch 46/180
F1 Macro Score: 0.94069
- 7s - loss: 0.0861 - acc: 0.9684 - val_loss: 0.0787 - val_acc: 0.9705
Epoch 47/180
F1 Macro Score: 0.94083
- 7s - loss: 0.0856 - acc: 0.9686 - val_loss: 0.0786 - val_acc: 0.9706
Epoch 48/180
F1 Macro Score: 0.94083
- 7s - loss: 0.0850 - acc: 0.9688 - val_loss: 0.0782 - val_acc: 0.9706
Epoch 49/180
F1 Macro Score: 0.94124
- 7s - loss: 0.0847 - acc: 0.9689 - val_loss: 0.0778 - val_acc: 0.9708
Epoch 50/180
F1 Macro Score: 0.94096
- 7s - loss: 0.0850 - acc: 0.9688 - val_loss: 0.0784 - val_acc: 0.9706
Epoch 51/180
F1 Macro Score: 0.94140
- 7s - loss: 0.0845 - acc: 0.9689 - val_loss: 0.0780 - val_acc: 0.9708
Epoch 52/180
F1 Macro Score: 0.94145
- 7s - loss: 0.0837 - acc: 0.9691 - val_loss: 0.0774 - val_acc: 0.9709
Epoch 53/180
F1 Macro Score: 0.94088
- 7s - loss: 0.0833 - acc: 0.9692 - val_loss: 0.0779 - val_acc: 0.9707
Epoch 54/180
F1 Macro Score: 0.94158
- 7s - loss: 0.0840 - acc: 0.9691 - val_loss: 0.0774 - val_acc: 0.9709
Epoch 55/180
F1 Macro Score: 0.94140
- 7s - loss: 0.0838 - acc: 0.9691 - val_loss: 0.0778 - val_acc: 0.9709
Epoch 56/180
F1 Macro Score: 0.94131
- 7s - loss: 0.0840 - acc: 0.9691 - val_loss: 0.0778 - val_acc: 0.9708
Epoch 57/180
F1 Macro Score: 0.94079
- 7s - loss: 0.0833 - acc: 0.9692 - val_loss: 0.0783 - val_acc: 0.9706
Epoch 58/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0831 - acc: 0.9693 - val_loss: 0.0773 - val_acc: 0.9709
Epoch 59/180
F1 Macro Score: 0.94174
- 7s - loss: 0.0833 - acc: 0.9693 - val_loss: 0.0775 - val_acc: 0.9709
Epoch 60/180
F1 Macro Score: 0.94141
- 7s - loss: 0.0836 - acc: 0.9691 - val_loss: 0.0778 - val_acc: 0.9708
Epoch 61/180
F1 Macro Score: 0.94170
- 7s - loss: 0.0825 - acc: 0.9694 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 62/180
F1 Macro Score: 0.94180
- 8s - loss: 0.0820 - acc: 0.9695 - val_loss: 0.0770 - val_acc: 0.9710
Epoch 63/180
F1 Macro Score: 0.94169
- 7s - loss: 0.0822 - acc: 0.9695 - val_loss: 0.0772 - val_acc: 0.9711
Epoch 64/180
F1 Macro Score: 0.94187
- 7s - loss: 0.0825 - acc: 0.9693 - val_loss: 0.0769 - val_acc: 0.9711
Epoch 65/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0823 - acc: 0.9696 - val_loss: 0.0770 - val_acc: 0.9710
Epoch 66/180
F1 Macro Score: 0.94191
- 7s - loss: 0.0823 - acc: 0.9695 - val_loss: 0.0772 - val_acc: 0.9710
Epoch 67/180
F1 Macro Score: 0.94134
- 7s - loss: 0.0820 - acc: 0.9696 - val_loss: 0.0774 - val_acc: 0.9708
Epoch 68/180
F1 Macro Score: 0.94161
- 7s - loss: 0.0825 - acc: 0.9694 - val_loss: 0.0774 - val_acc: 0.9708
Epoch 69/180
F1 Macro Score: 0.94170
- 7s - loss: 0.0819 - acc: 0.9696 - val_loss: 0.0772 - val_acc: 0.9710
Epoch 70/180
F1 Macro Score: 0.94167
- 7s - loss: 0.0819 - acc: 0.9697 - val_loss: 0.0772 - val_acc: 0.9709
Epoch 71/180
F1 Macro Score: 0.94097
- 7s - loss: 0.0816 - acc: 0.9697 - val_loss: 0.0779 - val_acc: 0.9707
Epoch 72/180
F1 Macro Score: 0.94158
- 7s - loss: 0.0816 - acc: 0.9697 - val_loss: 0.0771 - val_acc: 0.9709
Epoch 73/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0811 - acc: 0.9698 - val_loss: 0.0770 - val_acc: 0.9710
Epoch 74/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0815 - acc: 0.9697 - val_loss: 0.0774 - val_acc: 0.9709
Epoch 75/180
F1 Macro Score: 0.94164
- 7s - loss: 0.0813 - acc: 0.9697 - val_loss: 0.0771 - val_acc: 0.9710
Epoch 76/180
F1 Macro Score: 0.94179
- 7s - loss: 0.0809 - acc: 0.9698 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 77/180
F1 Macro Score: 0.94181
- 7s - loss: 0.0811 - acc: 0.9698 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 78/180
F1 Macro Score: 0.94150
- 7s - loss: 0.0811 - acc: 0.9698 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 79/180
F1 Macro Score: 0.94150
- 7s - loss: 0.0806 - acc: 0.9698 - val_loss: 0.0772 - val_acc: 0.9709
Epoch 80/180
F1 Macro Score: 0.94142
- 7s - loss: 0.0812 - acc: 0.9698 - val_loss: 0.0772 - val_acc: 0.9709
Epoch 81/180
F1 Macro Score: 0.94187
- 7s - loss: 0.0808 - acc: 0.9699 - val_loss: 0.0769 - val_acc: 0.9711
Epoch 82/180
F1 Macro Score: 0.94169
- 7s - loss: 0.0803 - acc: 0.9700 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 83/180
F1 Macro Score: 0.94187
- 7s - loss: 0.0801 - acc: 0.9701 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 84/180
F1 Macro Score: 0.94138
- 7s - loss: 0.0808 - acc: 0.9699 - val_loss: 0.0774 - val_acc: 0.9709
Epoch 85/180
F1 Macro Score: 0.94171
- 7s - loss: 0.0802 - acc: 0.9700 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 86/180
F1 Macro Score: 0.94175
- 7s - loss: 0.0809 - acc: 0.9698 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 87/180
F1 Macro Score: 0.94119
- 7s - loss: 0.0803 - acc: 0.9701 - val_loss: 0.0772 - val_acc: 0.9708
Epoch 88/180
F1 Macro Score: 0.94152
- 7s - loss: 0.0800 - acc: 0.9700 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 89/180
F1 Macro Score: 0.94163
- 7s - loss: 0.0802 - acc: 0.9700 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 90/180
F1 Macro Score: 0.94167
- 7s - loss: 0.0800 - acc: 0.9701 - val_loss: 0.0770 - val_acc: 0.9710
Epoch 91/180
F1 Macro Score: 0.94175
- 7s - loss: 0.0795 - acc: 0.9702 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 92/180
F1 Macro Score: 0.94182
- 7s - loss: 0.0794 - acc: 0.9702 - val_loss: 0.0767 - val_acc: 0.9710
Epoch 93/180
F1 Macro Score: 0.94178
- 7s - loss: 0.0794 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 94/180
F1 Macro Score: 0.94180
- 7s - loss: 0.0796 - acc: 0.9702 - val_loss: 0.0767 - val_acc: 0.9710
Epoch 95/180
F1 Macro Score: 0.94185
- 7s - loss: 0.0793 - acc: 0.9703 - val_loss: 0.0767 - val_acc: 0.9711
Epoch 96/180
F1 Macro Score: 0.94177
- 7s - loss: 0.0791 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 97/180
F1 Macro Score: 0.94181
- 7s - loss: 0.0790 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 98/180
F1 Macro Score: 0.94183
- 7s - loss: 0.0789 - acc: 0.9703 - val_loss: 0.0767 - val_acc: 0.9710
Epoch 99/180
F1 Macro Score: 0.94180
- 7s - loss: 0.0792 - acc: 0.9702 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 100/180
F1 Macro Score: 0.94181
- 7s - loss: 0.0791 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 101/180
F1 Macro Score: 0.94171
- 7s - loss: 0.0793 - acc: 0.9702 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 102/180
F1 Macro Score: 0.94176
- 7s - loss: 0.0792 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 103/180
F1 Macro Score: 0.94172
- 7s - loss: 0.0795 - acc: 0.9702 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 104/180
F1 Macro Score: 0.94171
- 8s - loss: 0.0790 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 105/180
F1 Macro Score: 0.94180
- 8s - loss: 0.0790 - acc: 0.9704 - val_loss: 0.0767 - val_acc: 0.9710
Epoch 106/180
F1 Macro Score: 0.94171
- 8s - loss: 0.0790 - acc: 0.9703 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 107/180
F1 Macro Score: 0.94171
- 7s - loss: 0.0790 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 108/180
F1 Macro Score: 0.94176
- 8s - loss: 0.0793 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 109/180
F1 Macro Score: 0.94189
- 8s - loss: 0.0789 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 110/180
F1 Macro Score: 0.94170
- 7s - loss: 0.0791 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 111/180
F1 Macro Score: 0.94173
- 7s - loss: 0.0787 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 112/180
F1 Macro Score: 0.94161
- 7s - loss: 0.0793 - acc: 0.9703 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 113/180
F1 Macro Score: 0.94168
- 7s - loss: 0.0795 - acc: 0.9702 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 114/180
F1 Macro Score: 0.94183
- 7s - loss: 0.0790 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 115/180
F1 Macro Score: 0.94168
- 7s - loss: 0.0794 - acc: 0.9703 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 116/180
F1 Macro Score: 0.94168
- 7s - loss: 0.0791 - acc: 0.9703 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 117/180
F1 Macro Score: 0.94169
- 7s - loss: 0.0793 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 118/180
F1 Macro Score: 0.94173
- 7s - loss: 0.0790 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 119/180
F1 Macro Score: 0.94177
- 7s - loss: 0.0791 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 120/180
F1 Macro Score: 0.94171
- 7s - loss: 0.0786 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 121/180
F1 Macro Score: 0.94179
- 7s - loss: 0.0787 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 122/180
F1 Macro Score: 0.94175
- 7s - loss: 0.0788 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 123/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0793 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 124/180
F1 Macro Score: 0.94172
- 7s - loss: 0.0790 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 125/180
F1 Macro Score: 0.94174
- 7s - loss: 0.0790 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 126/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0792 - acc: 0.9703 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 127/180
F1 Macro Score: 0.94177
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 128/180
F1 Macro Score: 0.94171
- 9s - loss: 0.0786 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 129/180
F1 Macro Score: 0.94169
- 9s - loss: 0.0788 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 130/180
F1 Macro Score: 0.94170
- 8s - loss: 0.0789 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 131/180
F1 Macro Score: 0.94170
- 8s - loss: 0.0789 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 132/180
F1 Macro Score: 0.94166
- 8s - loss: 0.0793 - acc: 0.9703 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 133/180
F1 Macro Score: 0.94165
- 8s - loss: 0.0784 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 134/180
F1 Macro Score: 0.94177
- 8s - loss: 0.0789 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 135/180
F1 Macro Score: 0.94173
- 7s - loss: 0.0788 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 136/180
F1 Macro Score: 0.94160
- 9s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 137/180
F1 Macro Score: 0.94162
- 8s - loss: 0.0787 - acc: 0.9704 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 138/180
F1 Macro Score: 0.94163
- 7s - loss: 0.0787 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 139/180
F1 Macro Score: 0.94167
- 8s - loss: 0.0789 - acc: 0.9704 - val_loss: 0.0768 - val_acc: 0.9710
Epoch 140/180
F1 Macro Score: 0.94161
- 7s - loss: 0.0789 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 141/180
F1 Macro Score: 0.94175
- 7s - loss: 0.0788 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 142/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 143/180
F1 Macro Score: 0.94169
- 7s - loss: 0.0786 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 144/180
F1 Macro Score: 0.94169
- 8s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 145/180
F1 Macro Score: 0.94167
- 7s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 146/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 147/180
F1 Macro Score: 0.94161
- 7s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 148/180
F1 Macro Score: 0.94165
- 7s - loss: 0.0788 - acc: 0.9704 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 149/180
F1 Macro Score: 0.94152
- 7s - loss: 0.0786 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9708
Epoch 150/180
F1 Macro Score: 0.94164
- 7s - loss: 0.0787 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 151/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 152/180
F1 Macro Score: 0.94158
- 7s - loss: 0.0787 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 153/180
F1 Macro Score: 0.94166
- 7s - loss: 0.0789 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 154/180
F1 Macro Score: 0.94159
- 7s - loss: 0.0786 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 155/180
F1 Macro Score: 0.94160
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 156/180
F1 Macro Score: 0.94167
- 7s - loss: 0.0785 - acc: 0.9704 - val_loss: 0.0769 - val_acc: 0.9710
Epoch 157/180
F1 Macro Score: 0.94163
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 158/180
F1 Macro Score: 0.94161
- 7s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 159/180
F1 Macro Score: 0.94146
- 7s - loss: 0.0787 - acc: 0.9704 - val_loss: 0.0770 - val_acc: 0.9708
Epoch 160/180
F1 Macro Score: 0.94157
- 7s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 161/180
F1 Macro Score: 0.94163
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 162/180
F1 Macro Score: 0.94159
- 7s - loss: 0.0783 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 163/180
F1 Macro Score: 0.94155
- 7s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 164/180
F1 Macro Score: 0.94158
- 7s - loss: 0.0782 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 165/180
F1 Macro Score: 0.94160
- 7s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 166/180
F1 Macro Score: 0.94159
- 7s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 167/180
F1 Macro Score: 0.94158
- 7s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 168/180
F1 Macro Score: 0.94162
- 7s - loss: 0.0788 - acc: 0.9704 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 169/180
F1 Macro Score: 0.94161
- 7s - loss: 0.0781 - acc: 0.9706 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 170/180
F1 Macro Score: 0.94164
- 7s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0771 - val_acc: 0.9709
Epoch 171/180
F1 Macro Score: 0.94158
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 172/180
F1 Macro Score: 0.94162
- 8s - loss: 0.0783 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 173/180
F1 Macro Score: 0.94156
- 8s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 174/180
F1 Macro Score: 0.94160
- 8s - loss: 0.0785 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 175/180
F1 Macro Score: 0.94163
- 7s - loss: 0.0784 - acc: 0.9704 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 176/180
F1 Macro Score: 0.94152
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 177/180
F1 Macro Score: 0.94156
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 178/180
F1 Macro Score: 0.94158
- 7s - loss: 0.0785 - acc: 0.9706 - val_loss: 0.0769 - val_acc: 0.9709
Epoch 179/180
F1 Macro Score: 0.94162
- 7s - loss: 0.0783 - acc: 0.9705 - val_loss: 0.0770 - val_acc: 0.9709
Epoch 180/180
F1 Macro Score: 0.94169
- 7s - loss: 0.0781 - acc: 0.9706 - val_loss: 0.0770 - val_acc: 0.9709
Training fold 2 completed. macro f1 score : 0.94169
Our training dataset shape is (1000, 4000, 19)
Our validation dataset shape is (250, 4000, 19)
Train on 1000 samples, validate on 250 samples
Epoch 1/180
F1 Macro Score: 0.68362
- 19s - loss: 0.4988 - acc: 0.8627 - val_loss: 1.0696 - val_acc: 0.7235
Epoch 2/180
F1 Macro Score: 0.91739
- 7s - loss: 0.1653 - acc: 0.9602 - val_loss: 0.3926 - val_acc: 0.9601
Epoch 3/180
F1 Macro Score: 0.93307
- 7s - loss: 0.1411 - acc: 0.9640 - val_loss: 0.2317 - val_acc: 0.9666
Epoch 4/180
F1 Macro Score: 0.93435
- 7s - loss: 0.1262 - acc: 0.9655 - val_loss: 0.1320 - val_acc: 0.9676
Epoch 5/180
F1 Macro Score: 0.93570
- 7s - loss: 0.1165 - acc: 0.9663 - val_loss: 0.1101 - val_acc: 0.9680
Epoch 6/180
F1 Macro Score: 0.93662
- 7s - loss: 0.1154 - acc: 0.9662 - val_loss: 0.0992 - val_acc: 0.9683
Epoch 7/180
F1 Macro Score: 0.93510
- 7s - loss: 0.1142 - acc: 0.9662 - val_loss: 0.0994 - val_acc: 0.9679
Epoch 8/180
F1 Macro Score: 0.93542
- 7s - loss: 0.1096 - acc: 0.9665 - val_loss: 0.1091 - val_acc: 0.9676
Epoch 9/180
F1 Macro Score: 0.93501
- 7s - loss: 0.1089 - acc: 0.9665 - val_loss: 0.0947 - val_acc: 0.9681
Epoch 10/180
F1 Macro Score: 0.93387
- 7s - loss: 0.1052 - acc: 0.9667 - val_loss: 0.0966 - val_acc: 0.9672
Epoch 11/180
F1 Macro Score: 0.93696
- 7s - loss: 0.1033 - acc: 0.9668 - val_loss: 0.0908 - val_acc: 0.9685
Epoch 12/180
F1 Macro Score: 0.93637
- 7s - loss: 0.1024 - acc: 0.9667 - val_loss: 0.0908 - val_acc: 0.9682
Epoch 13/180
F1 Macro Score: 0.93592
- 7s - loss: 0.1011 - acc: 0.9669 - val_loss: 0.0912 - val_acc: 0.9684
Epoch 14/180
F1 Macro Score: 0.93683
- 7s - loss: 0.0993 - acc: 0.9670 - val_loss: 0.0896 - val_acc: 0.9685
Epoch 15/180
F1 Macro Score: 0.93707
- 7s - loss: 0.0987 - acc: 0.9670 - val_loss: 0.0939 - val_acc: 0.9685
Epoch 16/180
F1 Macro Score: 0.93455
- 7s - loss: 0.0992 - acc: 0.9668 - val_loss: 0.0920 - val_acc: 0.9675
Epoch 17/180
F1 Macro Score: 0.93480
- 7s - loss: 0.0985 - acc: 0.9669 - val_loss: 0.0922 - val_acc: 0.9676
Epoch 18/180
F1 Macro Score: 0.93643
- 7s - loss: 0.0975 - acc: 0.9670 - val_loss: 0.0890 - val_acc: 0.9684
Epoch 19/180
F1 Macro Score: 0.93662
- 7s - loss: 0.0961 - acc: 0.9671 - val_loss: 0.0873 - val_acc: 0.9684
Epoch 20/180
F1 Macro Score: 0.93596
- 7s - loss: 0.0971 - acc: 0.9669 - val_loss: 0.0993 - val_acc: 0.9677
Epoch 21/180
F1 Macro Score: 0.42415
- 7s - loss: 0.2905 - acc: 0.9161 - val_loss: 2.4618 - val_acc: 0.6838
Epoch 22/180
F1 Macro Score: 0.91542
- 8s - loss: 0.2210 - acc: 0.9357 - val_loss: 0.1428 - val_acc: 0.9598
Epoch 23/180
F1 Macro Score: 0.93362
- 7s - loss: 0.1246 - acc: 0.9645 - val_loss: 0.1097 - val_acc: 0.9669
Epoch 24/180
F1 Macro Score: 0.93502
- 7s - loss: 0.1173 - acc: 0.9655 - val_loss: 0.1036 - val_acc: 0.9676
Epoch 25/180
F1 Macro Score: 0.93390
- 7s - loss: 0.1116 - acc: 0.9663 - val_loss: 0.1011 - val_acc: 0.9673
Epoch 26/180
F1 Macro Score: 0.93641
- 7s - loss: 0.1095 - acc: 0.9665 - val_loss: 0.0963 - val_acc: 0.9682
Epoch 27/180
F1 Macro Score: 0.93631
- 7s - loss: 0.1065 - acc: 0.9667 - val_loss: 0.0946 - val_acc: 0.9682
Epoch 28/180
F1 Macro Score: 0.93620
- 7s - loss: 0.1051 - acc: 0.9668 - val_loss: 0.0935 - val_acc: 0.9682
Epoch 29/180
F1 Macro Score: 0.93600
- 7s - loss: 0.1042 - acc: 0.9669 - val_loss: 0.0933 - val_acc: 0.9681
Epoch 30/180
F1 Macro Score: 0.93710
- 7s - loss: 0.1038 - acc: 0.9667 - val_loss: 0.0913 - val_acc: 0.9686
Epoch 31/180
F1 Macro Score: 0.93667
- 7s - loss: 0.1009 - acc: 0.9671 - val_loss: 0.0907 - val_acc: 0.9685
Epoch 32/180
F1 Macro Score: 0.93743
- 7s - loss: 0.1005 - acc: 0.9672 - val_loss: 0.0901 - val_acc: 0.9687
Epoch 33/180
F1 Macro Score: 0.93738
- 7s - loss: 0.0996 - acc: 0.9673 - val_loss: 0.0898 - val_acc: 0.9687
Epoch 34/180
F1 Macro Score: 0.93719
- 7s - loss: 0.1005 - acc: 0.9671 - val_loss: 0.0896 - val_acc: 0.9686
Epoch 35/180
F1 Macro Score: 0.93710
- 7s - loss: 0.0994 - acc: 0.9672 - val_loss: 0.0895 - val_acc: 0.9686
Epoch 36/180
F1 Macro Score: 0.93709
- 7s - loss: 0.0991 - acc: 0.9672 - val_loss: 0.0891 - val_acc: 0.9686
Epoch 37/180
F1 Macro Score: 0.93670
- 7s - loss: 0.0992 - acc: 0.9672 - val_loss: 0.0898 - val_acc: 0.9686
Epoch 38/180
F1 Macro Score: 0.93699
- 7s - loss: 0.0989 - acc: 0.9673 - val_loss: 0.0892 - val_acc: 0.9686
Epoch 39/180
F1 Macro Score: 0.93691
- 7s - loss: 0.0984 - acc: 0.9672 - val_loss: 0.0891 - val_acc: 0.9686
Epoch 40/180
F1 Macro Score: 0.93704
- 7s - loss: 0.0986 - acc: 0.9671 - val_loss: 0.0891 - val_acc: 0.9686
Epoch 41/180
F1 Macro Score: 0.93724
- 7s - loss: 0.0977 - acc: 0.9672 - val_loss: 0.0886 - val_acc: 0.9687
Epoch 42/180
F1 Macro Score: 0.93722
- 7s - loss: 0.0969 - acc: 0.9673 - val_loss: 0.0882 - val_acc: 0.9688
Epoch 43/180
F1 Macro Score: 0.93728
- 7s - loss: 0.0971 - acc: 0.9673 - val_loss: 0.0881 - val_acc: 0.9687
Epoch 44/180
F1 Macro Score: 0.93716
- 7s - loss: 0.0970 - acc: 0.9673 - val_loss: 0.0878 - val_acc: 0.9687
Epoch 45/180
F1 Macro Score: 0.93734
- 7s - loss: 0.0970 - acc: 0.9673 - val_loss: 0.0878 - val_acc: 0.9688
Epoch 46/180
F1 Macro Score: 0.93739
- 8s - loss: 0.0966 - acc: 0.9674 - val_loss: 0.0877 - val_acc: 0.9688
Epoch 47/180
F1 Macro Score: 0.93726
- 7s - loss: 0.0977 - acc: 0.9672 - val_loss: 0.0877 - val_acc: 0.9688
Epoch 48/180
F1 Macro Score: 0.93742
- 7s - loss: 0.0967 - acc: 0.9673 - val_loss: 0.0874 - val_acc: 0.9688
Epoch 49/180
F1 Macro Score: 0.93731
- 7s - loss: 0.0965 - acc: 0.9673 - val_loss: 0.0874 - val_acc: 0.9688
Epoch 50/180
F1 Macro Score: 0.93714
- 7s - loss: 0.0959 - acc: 0.9673 - val_loss: 0.0873 - val_acc: 0.9688
Epoch 51/180
F1 Macro Score: 0.93720
- 7s - loss: 0.0963 - acc: 0.9673 - val_loss: 0.0872 - val_acc: 0.9687
Epoch 52/180
F1 Macro Score: 0.93701
- 7s - loss: 0.0964 - acc: 0.9673 - val_loss: 0.0874 - val_acc: 0.9687
Epoch 53/180
F1 Macro Score: 0.93734
- 7s - loss: 0.0954 - acc: 0.9674 - val_loss: 0.0873 - val_acc: 0.9688
Epoch 54/180
F1 Macro Score: 0.93717
- 7s - loss: 0.0964 - acc: 0.9672 - val_loss: 0.0871 - val_acc: 0.9687
Epoch 55/180
F1 Macro Score: 0.93750
- 7s - loss: 0.0960 - acc: 0.9673 - val_loss: 0.0867 - val_acc: 0.9688
Epoch 56/180
F1 Macro Score: 0.93735
- 7s - loss: 0.0957 - acc: 0.9673 - val_loss: 0.0869 - val_acc: 0.9688
Epoch 57/180
F1 Macro Score: 0.93752
- 7s - loss: 0.0950 - acc: 0.9674 - val_loss: 0.0867 - val_acc: 0.9688
Epoch 58/180
F1 Macro Score: 0.93722
- 7s - loss: 0.0949 - acc: 0.9674 - val_loss: 0.0866 - val_acc: 0.9688
Epoch 59/180
F1 Macro Score: 0.93724
- 7s - loss: 0.0952 - acc: 0.9674 - val_loss: 0.0867 - val_acc: 0.9688
Epoch 60/180
F1 Macro Score: 0.93733
- 7s - loss: 0.0947 - acc: 0.9674 - val_loss: 0.0863 - val_acc: 0.9688
Epoch 61/180
F1 Macro Score: 0.93726
- 7s - loss: 0.0944 - acc: 0.9674 - val_loss: 0.0862 - val_acc: 0.9688
Epoch 62/180
F1 Macro Score: 0.93737
- 7s - loss: 0.0947 - acc: 0.9674 - val_loss: 0.0863 - val_acc: 0.9689
Epoch 63/180
F1 Macro Score: 0.93726
- 7s - loss: 0.0945 - acc: 0.9674 - val_loss: 0.0861 - val_acc: 0.9688
Epoch 64/180
F1 Macro Score: 0.93741
- 7s - loss: 0.0943 - acc: 0.9674 - val_loss: 0.0860 - val_acc: 0.9689
Epoch 65/180
F1 Macro Score: 0.93725
- 7s - loss: 0.0951 - acc: 0.9673 - val_loss: 0.0864 - val_acc: 0.9688
Epoch 66/180
F1 Macro Score: 0.93734
- 7s - loss: 0.0941 - acc: 0.9674 - val_loss: 0.0860 - val_acc: 0.9688
Epoch 67/180
F1 Macro Score: 0.93730
- 7s - loss: 0.0940 - acc: 0.9674 - val_loss: 0.0859 - val_acc: 0.9689
Epoch 68/180
F1 Macro Score: 0.93639
- 7s - loss: 0.0943 - acc: 0.9674 - val_loss: 0.0879 - val_acc: 0.9683
Epoch 69/180
F1 Macro Score: 0.93723
- 7s - loss: 0.0940 - acc: 0.9674 - val_loss: 0.0862 - val_acc: 0.9688
Epoch 70/180
F1 Macro Score: 0.93743
- 7s - loss: 0.0944 - acc: 0.9674 - val_loss: 0.0858 - val_acc: 0.9688
Epoch 71/180
F1 Macro Score: 0.93736
- 7s - loss: 0.0946 - acc: 0.9673 - val_loss: 0.0857 - val_acc: 0.9688
Epoch 72/180
F1 Macro Score: 0.93748
- 7s - loss: 0.0944 - acc: 0.9674 - val_loss: 0.0858 - val_acc: 0.9689
Epoch 73/180
F1 Macro Score: 0.93779
- 7s - loss: 0.0936 - acc: 0.9674 - val_loss: 0.0855 - val_acc: 0.9689
Epoch 74/180
F1 Macro Score: 0.93746
- 7s - loss: 0.0936 - acc: 0.9675 - val_loss: 0.0858 - val_acc: 0.9689
Epoch 75/180
F1 Macro Score: 0.93739
- 7s - loss: 0.0933 - acc: 0.9675 - val_loss: 0.0855 - val_acc: 0.9689
Epoch 76/180
F1 Macro Score: 0.93745
- 7s - loss: 0.0926 - acc: 0.9675 - val_loss: 0.0853 - val_acc: 0.9689
Epoch 77/180
F1 Macro Score: 0.93732
- 7s - loss: 0.0932 - acc: 0.9675 - val_loss: 0.0853 - val_acc: 0.9688
Epoch 78/180
F1 Macro Score: 0.93744
- 7s - loss: 0.0930 - acc: 0.9675 - val_loss: 0.0852 - val_acc: 0.9689
Epoch 79/180
F1 Macro Score: 0.93728
- 7s - loss: 0.0929 - acc: 0.9675 - val_loss: 0.0856 - val_acc: 0.9688
Epoch 80/180
F1 Macro Score: 0.93749
- 7s - loss: 0.0923 - acc: 0.9676 - val_loss: 0.0850 - val_acc: 0.9689
Epoch 81/180
F1 Macro Score: 0.93745
- 7s - loss: 0.0927 - acc: 0.9675 - val_loss: 0.0853 - val_acc: 0.9689
Epoch 82/180
F1 Macro Score: 0.93753
- 7s - loss: 0.0931 - acc: 0.9674 - val_loss: 0.0850 - val_acc: 0.9689
Epoch 83/180
F1 Macro Score: 0.93745
- 7s - loss: 0.0927 - acc: 0.9675 - val_loss: 0.0851 - val_acc: 0.9689
Epoch 84/180
F1 Macro Score: 0.93755
- 7s - loss: 0.0922 - acc: 0.9675 - val_loss: 0.0849 - val_acc: 0.9689
Epoch 85/180
F1 Macro Score: 0.93755
- 7s - loss: 0.0922 - acc: 0.9676 - val_loss: 0.0849 - val_acc: 0.9690
Epoch 86/180
F1 Macro Score: 0.93743
- 7s - loss: 0.0935 - acc: 0.9674 - val_loss: 0.0849 - val_acc: 0.9689
Epoch 87/180
F1 Macro Score: 0.93760
- 7s - loss: 0.0938 - acc: 0.9674 - val_loss: 0.0851 - val_acc: 0.9690
Epoch 88/180
F1 Macro Score: 0.93718
- 7s - loss: 0.0936 - acc: 0.9674 - val_loss: 0.0855 - val_acc: 0.9689
Epoch 89/180
F1 Macro Score: 0.93699
- 7s - loss: 0.0925 - acc: 0.9675 - val_loss: 0.0850 - val_acc: 0.9689
Epoch 90/180
F1 Macro Score: 0.93773
- 7s - loss: 0.0922 - acc: 0.9675 - val_loss: 0.0849 - val_acc: 0.9690
Epoch 91/180
F1 Macro Score: 0.93768
- 7s - loss: 0.0922 - acc: 0.9676 - val_loss: 0.0846 - val_acc: 0.9690
Epoch 92/180
F1 Macro Score: 0.93767
- 7s - loss: 0.0923 - acc: 0.9675 - val_loss: 0.0846 - val_acc: 0.9690
Epoch 93/180
F1 Macro Score: 0.93759
- 7s - loss: 0.0916 - acc: 0.9676 - val_loss: 0.0846 - val_acc: 0.9690
Epoch 94/180
F1 Macro Score: 0.93766
- 7s - loss: 0.0914 - acc: 0.9677 - val_loss: 0.0846 - val_acc: 0.9690
Epoch 95/180
F1 Macro Score: 0.93753
- 7s - loss: 0.0914 - acc: 0.9677 - val_loss: 0.0846 - val_acc: 0.9689
Epoch 96/180
F1 Macro Score: 0.93751
- 7s - loss: 0.0917 - acc: 0.9676 - val_loss: 0.0847 - val_acc: 0.9690
Epoch 97/180
F1 Macro Score: 0.93771
- 7s - loss: 0.0916 - acc: 0.9677 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 98/180
F1 Macro Score: 0.93770
- 7s - loss: 0.0913 - acc: 0.9677 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 99/180
F1 Macro Score: 0.93768
- 7s - loss: 0.0918 - acc: 0.9676 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 100/180
F1 Macro Score: 0.93767
- 7s - loss: 0.0919 - acc: 0.9676 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 101/180
F1 Macro Score: 0.93769
- 7s - loss: 0.0917 - acc: 0.9677 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 102/180
F1 Macro Score: 0.93765
- 7s - loss: 0.0920 - acc: 0.9675 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 103/180
F1 Macro Score: 0.93774
- 7s - loss: 0.0918 - acc: 0.9675 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 104/180
F1 Macro Score: 0.93772
- 7s - loss: 0.0913 - acc: 0.9677 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 105/180
F1 Macro Score: 0.93768
- 7s - loss: 0.0914 - acc: 0.9677 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 106/180
F1 Macro Score: 0.93769
- 7s - loss: 0.0910 - acc: 0.9677 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 107/180
F1 Macro Score: 0.93770
- 7s - loss: 0.0913 - acc: 0.9676 - val_loss: 0.0844 - val_acc: 0.9690
Epoch 108/180
F1 Macro Score: 0.93774
- 7s - loss: 0.0915 - acc: 0.9676 - val_loss: 0.0845 - val_acc: 0.9690
Epoch 109/180
F1 Macro Score: 0.93769
- 7s - loss: 0.0915 - acc: 0.9676 - val_loss: 0.0844 - val_acc: 0.9690
Epoch 110/180
F1 Macro Score: 0.93774
- 7s - loss: 0.0913 - acc: 0.9676 - val_loss: 0.0844 - val_acc: 0.9690
Epoch 111/180
F1 Macro Score: 0.93772
- 7s - loss: 0.0913 - acc: 0.9677 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 112/180
F1 Macro Score: 0.93774
- 8s - loss: 0.0912 - acc: 0.9677 - val_loss: 0.0844 - val_acc: 0.9690
Epoch 113/180
F1 Macro Score: 0.93770
- 8s - loss: 0.0911 - acc: 0.9677 - val_loss: 0.0844 - val_acc: 0.9690
Epoch 114/180
F1 Macro Score: 0.93774
- 8s - loss: 0.0913 - acc: 0.9677 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 115/180
F1 Macro Score: 0.93779
- 7s - loss: 0.0920 - acc: 0.9676 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 116/180
F1 Macro Score: 0.93775
- 8s - loss: 0.0912 - acc: 0.9677 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 117/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0906 - acc: 0.9677 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 118/180
F1 Macro Score: 0.93736
- 7s - loss: 0.0918 - acc: 0.9676 - val_loss: 0.0845 - val_acc: 0.9689
Epoch 119/180
F1 Macro Score: 0.93775
- 7s - loss: 0.0914 - acc: 0.9676 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 120/180
F1 Macro Score: 0.93764
- 8s - loss: 0.0911 - acc: 0.9676 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 121/180
F1 Macro Score: 0.93774
- 7s - loss: 0.0917 - acc: 0.9675 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 122/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0914 - acc: 0.9677 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 123/180
F1 Macro Score: 0.93766
- 7s - loss: 0.0918 - acc: 0.9676 - val_loss: 0.0843 - val_acc: 0.9690
Epoch 124/180
F1 Macro Score: 0.93774
- 8s - loss: 0.0911 - acc: 0.9677 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 125/180
F1 Macro Score: 0.93777
- 7s - loss: 0.0913 - acc: 0.9677 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 126/180
F1 Macro Score: 0.93767
- 8s - loss: 0.0908 - acc: 0.9677 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 127/180
F1 Macro Score: 0.93771
- 7s - loss: 0.0909 - acc: 0.9677 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 128/180
F1 Macro Score: 0.93775
- 8s - loss: 0.0910 - acc: 0.9676 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 129/180
F1 Macro Score: 0.93777
- 9s - loss: 0.0913 - acc: 0.9676 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 130/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0906 - acc: 0.9677 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 131/180
F1 Macro Score: 0.93772
- 7s - loss: 0.0913 - acc: 0.9677 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 132/180
F1 Macro Score: 0.93783
- 7s - loss: 0.0921 - acc: 0.9675 - val_loss: 0.0842 - val_acc: 0.9690
Epoch 133/180
F1 Macro Score: 0.93766
- 7s - loss: 0.0908 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 134/180
F1 Macro Score: 0.93775
- 7s - loss: 0.0911 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 135/180
F1 Macro Score: 0.93748
- 7s - loss: 0.0908 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 136/180
F1 Macro Score: 0.93770
- 7s - loss: 0.0910 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 137/180
F1 Macro Score: 0.93767
- 7s - loss: 0.0910 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 138/180
F1 Macro Score: 0.93767
- 7s - loss: 0.0908 - acc: 0.9676 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 139/180
F1 Macro Score: 0.93774
- 7s - loss: 0.0905 - acc: 0.9678 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 140/180
F1 Macro Score: 0.93779
- 7s - loss: 0.0909 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 141/180
F1 Macro Score: 0.93781
- 7s - loss: 0.0911 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 142/180
F1 Macro Score: 0.93770
- 7s - loss: 0.0905 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 143/180
F1 Macro Score: 0.93770
- 7s - loss: 0.0909 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 144/180
F1 Macro Score: 0.93782
- 7s - loss: 0.0905 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 145/180
F1 Macro Score: 0.93773
- 7s - loss: 0.0907 - acc: 0.9676 - val_loss: 0.0840 - val_acc: 0.9690
Epoch 146/180
F1 Macro Score: 0.93772
- 7s - loss: 0.0906 - acc: 0.9677 - val_loss: 0.0840 - val_acc: 0.9690
Epoch 147/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0908 - acc: 0.9677 - val_loss: 0.0840 - val_acc: 0.9690
Epoch 148/180
F1 Macro Score: 0.93775
- 7s - loss: 0.0912 - acc: 0.9676 - val_loss: 0.0840 - val_acc: 0.9690
Epoch 149/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0913 - acc: 0.9676 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 150/180
F1 Macro Score: 0.93764
- 7s - loss: 0.0907 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 151/180
F1 Macro Score: 0.93767
- 7s - loss: 0.0909 - acc: 0.9676 - val_loss: 0.0840 - val_acc: 0.9690
Epoch 152/180
F1 Macro Score: 0.93759
- 7s - loss: 0.0907 - acc: 0.9677 - val_loss: 0.0841 - val_acc: 0.9690
Epoch 153/180
F1 Macro Score: 0.93778
- 7s - loss: 0.0904 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 154/180
F1 Macro Score: 0.93784
- 7s - loss: 0.0907 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 155/180
F1 Macro Score: 0.93781
- 7s - loss: 0.0903 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 156/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0903 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 157/180
F1 Macro Score: 0.93781
- 7s - loss: 0.0901 - acc: 0.9678 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 158/180
F1 Macro Score: 0.93767
- 7s - loss: 0.0903 - acc: 0.9678 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 159/180
F1 Macro Score: 0.93777
- 7s - loss: 0.0916 - acc: 0.9675 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 160/180
F1 Macro Score: 0.93773
- 7s - loss: 0.0905 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 161/180
F1 Macro Score: 0.93778
- 7s - loss: 0.0904 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 162/180
F1 Macro Score: 0.93769
- 8s - loss: 0.0907 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 163/180
F1 Macro Score: 0.93774
- 8s - loss: 0.0903 - acc: 0.9677 - val_loss: 0.0839 - val_acc: 0.9690
Epoch 164/180
F1 Macro Score: 0.93771
- 8s - loss: 0.0907 - acc: 0.9676 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 165/180
F1 Macro Score: 0.93760
- 8s - loss: 0.0906 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 166/180
F1 Macro Score: 0.93779
- 8s - loss: 0.0904 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 167/180
F1 Macro Score: 0.93780
- 8s - loss: 0.0900 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 168/180
F1 Macro Score: 0.93780
- 8s - loss: 0.0907 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 169/180
F1 Macro Score: 0.93775
- 8s - loss: 0.0905 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 170/180
F1 Macro Score: 0.93773
- 8s - loss: 0.0902 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 171/180
F1 Macro Score: 0.93784
- 8s - loss: 0.0901 - acc: 0.9677 - val_loss: 0.0837 - val_acc: 0.9691
Epoch 172/180
F1 Macro Score: 0.93789
- 7s - loss: 0.0905 - acc: 0.9678 - val_loss: 0.0837 - val_acc: 0.9690
Epoch 173/180
F1 Macro Score: 0.93768
- 8s - loss: 0.0907 - acc: 0.9676 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 174/180
F1 Macro Score: 0.93788
- 8s - loss: 0.0910 - acc: 0.9676 - val_loss: 0.0837 - val_acc: 0.9691
Epoch 175/180
F1 Macro Score: 0.93762
- 8s - loss: 0.0905 - acc: 0.9677 - val_loss: 0.0838 - val_acc: 0.9690
Epoch 176/180
F1 Macro Score: 0.93776
- 8s - loss: 0.0902 - acc: 0.9677 - val_loss: 0.0837 - val_acc: 0.9690
Epoch 177/180
F1 Macro Score: 0.93782
- 8s - loss: 0.0910 - acc: 0.9676 - val_loss: 0.0837 - val_acc: 0.9690
Epoch 178/180
F1 Macro Score: 0.93779
- 7s - loss: 0.0904 - acc: 0.9677 - val_loss: 0.0837 - val_acc: 0.9690
Epoch 179/180
F1 Macro Score: 0.93777
- 8s - loss: 0.0902 - acc: 0.9677 - val_loss: 0.0837 - val_acc: 0.9690
Epoch 180/180
F1 Macro Score: 0.93774
- 8s - loss: 0.0901 - acc: 0.9677 - val_loss: 0.0837 - val_acc: 0.9690
Training fold 3 completed. macro f1 score : 0.93774
Our training dataset shape is (1000, 4000, 19)
Our validation dataset shape is (250, 4000, 19)
Train on 1000 samples, validate on 250 samples
Epoch 1/180
F1 Macro Score: 0.73141
- 22s - loss: 0.5286 - acc: 0.8581 - val_loss: 0.7961 - val_acc: 0.8898
Epoch 2/180
F1 Macro Score: 0.80448
- 7s - loss: 0.1723 - acc: 0.9603 - val_loss: 0.4450 - val_acc: 0.9367
Epoch 3/180
F1 Macro Score: 0.90435
- 7s - loss: 0.1393 - acc: 0.9647 - val_loss: 0.2376 - val_acc: 0.9572
Epoch 4/180
F1 Macro Score: 0.92984
- 7s - loss: 0.1265 - acc: 0.9658 - val_loss: 0.1538 - val_acc: 0.9643
Epoch 5/180
F1 Macro Score: 0.92419
- 7s - loss: 0.1242 - acc: 0.9656 - val_loss: 0.1905 - val_acc: 0.9507
Epoch 6/180
F1 Macro Score: 0.93467
- 7s - loss: 0.1453 - acc: 0.9614 - val_loss: 0.1082 - val_acc: 0.9665
Epoch 7/180
F1 Macro Score: 0.93695
- 7s - loss: 0.1168 - acc: 0.9664 - val_loss: 0.1001 - val_acc: 0.9674
Epoch 8/180
F1 Macro Score: 0.93685
- 7s - loss: 0.1112 - acc: 0.9668 - val_loss: 0.0974 - val_acc: 0.9676
Epoch 9/180
F1 Macro Score: 0.93714
- 7s - loss: 0.1086 - acc: 0.9668 - val_loss: 0.0967 - val_acc: 0.9675
Epoch 10/180
F1 Macro Score: 0.93709
- 7s - loss: 0.1063 - acc: 0.9670 - val_loss: 0.0953 - val_acc: 0.9676
Epoch 11/180
F1 Macro Score: 0.93581
- 7s - loss: 0.1037 - acc: 0.9671 - val_loss: 0.0985 - val_acc: 0.9669
Epoch 12/180
F1 Macro Score: 0.93721
- 7s - loss: 0.1034 - acc: 0.9671 - val_loss: 0.0927 - val_acc: 0.9677
Epoch 13/180
F1 Macro Score: 0.93727
- 7s - loss: 0.1019 - acc: 0.9671 - val_loss: 0.0934 - val_acc: 0.9676
Epoch 14/180
F1 Macro Score: 0.93657
- 7s - loss: 0.1029 - acc: 0.9670 - val_loss: 0.0934 - val_acc: 0.9675
Epoch 15/180
F1 Macro Score: 0.93751
- 7s - loss: 0.1001 - acc: 0.9671 - val_loss: 0.0908 - val_acc: 0.9678
Epoch 16/180
F1 Macro Score: 0.93743
- 7s - loss: 0.0973 - acc: 0.9674 - val_loss: 0.0904 - val_acc: 0.9677
Epoch 17/180
F1 Macro Score: 0.93801
- 8s - loss: 0.0983 - acc: 0.9672 - val_loss: 0.0908 - val_acc: 0.9679
Epoch 18/180
F1 Macro Score: 0.93772
- 8s - loss: 0.0969 - acc: 0.9673 - val_loss: 0.0900 - val_acc: 0.9678
Epoch 19/180
F1 Macro Score: 0.93710
- 8s - loss: 0.0961 - acc: 0.9674 - val_loss: 0.0903 - val_acc: 0.9677
Epoch 20/180
F1 Macro Score: 0.93726
- 7s - loss: 0.0961 - acc: 0.9673 - val_loss: 0.0917 - val_acc: 0.9675
Epoch 21/180
F1 Macro Score: 0.93717
- 7s - loss: 0.0947 - acc: 0.9674 - val_loss: 0.0905 - val_acc: 0.9676
Epoch 22/180
F1 Macro Score: 0.93765
- 7s - loss: 0.0950 - acc: 0.9675 - val_loss: 0.0882 - val_acc: 0.9679
Epoch 23/180
F1 Macro Score: 0.93622
- 7s - loss: 0.0927 - acc: 0.9676 - val_loss: 0.0890 - val_acc: 0.9677
Epoch 24/180
F1 Macro Score: 0.93728
- 7s - loss: 0.0940 - acc: 0.9675 - val_loss: 0.0884 - val_acc: 0.9677
Epoch 25/180
F1 Macro Score: 0.93720
- 7s - loss: 0.0935 - acc: 0.9675 - val_loss: 0.0892 - val_acc: 0.9677
Epoch 26/180
F1 Macro Score: 0.93691
- 7s - loss: 0.0917 - acc: 0.9677 - val_loss: 0.0885 - val_acc: 0.9677
Epoch 27/180
F1 Macro Score: 0.93772
- 7s - loss: 0.0926 - acc: 0.9676 - val_loss: 0.0882 - val_acc: 0.9678
Epoch 28/180
F1 Macro Score: 0.93611
- 7s - loss: 0.0917 - acc: 0.9677 - val_loss: 0.0907 - val_acc: 0.9674
Epoch 29/180
F1 Macro Score: 0.93663
- 7s - loss: 0.0918 - acc: 0.9677 - val_loss: 0.0875 - val_acc: 0.9679
Epoch 30/180
F1 Macro Score: 0.93776
- 7s - loss: 0.0902 - acc: 0.9680 - val_loss: 0.0872 - val_acc: 0.9679
Epoch 31/180
F1 Macro Score: 0.93779
- 7s - loss: 0.0884 - acc: 0.9683 - val_loss: 0.0859 - val_acc: 0.9681
Epoch 32/180
F1 Macro Score: 0.93799
- 7s - loss: 0.0891 - acc: 0.9682 - val_loss: 0.0858 - val_acc: 0.9681
Epoch 33/180
F1 Macro Score: 0.93860
- 7s - loss: 0.0872 - acc: 0.9685 - val_loss: 0.0857 - val_acc: 0.9683
Epoch 34/180
F1 Macro Score: 0.93817
- 7s - loss: 0.0872 - acc: 0.9684 - val_loss: 0.0855 - val_acc: 0.9682
Epoch 35/180
F1 Macro Score: 0.93870
- 7s - loss: 0.0865 - acc: 0.9687 - val_loss: 0.0845 - val_acc: 0.9684
Epoch 36/180
F1 Macro Score: 0.93877
- 8s - loss: 0.0860 - acc: 0.9687 - val_loss: 0.0843 - val_acc: 0.9685
Epoch 37/180
F1 Macro Score: 0.93916
- 7s - loss: 0.0863 - acc: 0.9688 - val_loss: 0.0835 - val_acc: 0.9687
Epoch 38/180
F1 Macro Score: 0.93828
- 7s - loss: 0.0867 - acc: 0.9687 - val_loss: 0.0851 - val_acc: 0.9683
Epoch 39/180
F1 Macro Score: 0.93915
- 7s - loss: 0.0902 - acc: 0.9681 - val_loss: 0.0847 - val_acc: 0.9686
Epoch 40/180
F1 Macro Score: 0.93920
- 7s - loss: 0.0861 - acc: 0.9690 - val_loss: 0.0833 - val_acc: 0.9688
Epoch 41/180
F1 Macro Score: 0.93970
- 7s - loss: 0.0847 - acc: 0.9692 - val_loss: 0.0828 - val_acc: 0.9690
Epoch 42/180
F1 Macro Score: 0.93951
- 7s - loss: 0.0839 - acc: 0.9693 - val_loss: 0.0831 - val_acc: 0.9689
Epoch 43/180
F1 Macro Score: 0.93903
- 7s - loss: 0.0846 - acc: 0.9692 - val_loss: 0.0839 - val_acc: 0.9686
Epoch 44/180
F1 Macro Score: 0.94009
- 7s - loss: 0.0841 - acc: 0.9693 - val_loss: 0.0826 - val_acc: 0.9691
Epoch 45/180
F1 Macro Score: 0.93999
- 7s - loss: 0.0837 - acc: 0.9694 - val_loss: 0.0824 - val_acc: 0.9690
Epoch 46/180
F1 Macro Score: 0.93983
- 7s - loss: 0.0836 - acc: 0.9695 - val_loss: 0.0826 - val_acc: 0.9689
Epoch 47/180
F1 Macro Score: 0.93825
- 7s - loss: 0.0834 - acc: 0.9694 - val_loss: 0.0852 - val_acc: 0.9682
Epoch 48/180
F1 Macro Score: 0.93989
- 7s - loss: 0.0833 - acc: 0.9694 - val_loss: 0.0821 - val_acc: 0.9691
Epoch 49/180
F1 Macro Score: 0.93973
- 7s - loss: 0.0832 - acc: 0.9695 - val_loss: 0.0823 - val_acc: 0.9690
Epoch 50/180
F1 Macro Score: 0.93982
- 7s - loss: 0.0826 - acc: 0.9695 - val_loss: 0.0821 - val_acc: 0.9690
Epoch 51/180
F1 Macro Score: 0.94010
- 7s - loss: 0.0825 - acc: 0.9697 - val_loss: 0.0819 - val_acc: 0.9691
Epoch 52/180
F1 Macro Score: 0.94046
- 8s - loss: 0.0824 - acc: 0.9697 - val_loss: 0.0818 - val_acc: 0.9693
Epoch 53/180
F1 Macro Score: 0.93953
- 8s - loss: 0.0824 - acc: 0.9697 - val_loss: 0.0830 - val_acc: 0.9688
Epoch 54/180
F1 Macro Score: 0.93962
- 8s - loss: 0.0821 - acc: 0.9697 - val_loss: 0.0818 - val_acc: 0.9691
Epoch 55/180
F1 Macro Score: 0.94045
- 7s - loss: 0.0825 - acc: 0.9696 - val_loss: 0.0816 - val_acc: 0.9692
Epoch 56/180
F1 Macro Score: 0.93975
- 7s - loss: 0.0824 - acc: 0.9697 - val_loss: 0.0824 - val_acc: 0.9689
Epoch 57/180
F1 Macro Score: 0.93955
- 7s - loss: 0.0818 - acc: 0.9698 - val_loss: 0.0820 - val_acc: 0.9690
Epoch 58/180
F1 Macro Score: 0.94038
- 7s - loss: 0.0820 - acc: 0.9697 - val_loss: 0.0815 - val_acc: 0.9692
Epoch 59/180
F1 Macro Score: 0.93956
- 7s - loss: 0.0817 - acc: 0.9698 - val_loss: 0.0825 - val_acc: 0.9689
Epoch 60/180
F1 Macro Score: 0.94016
- 7s - loss: 0.0818 - acc: 0.9698 - val_loss: 0.0814 - val_acc: 0.9692
Epoch 61/180
F1 Macro Score: 0.94034
- 7s - loss: 0.0822 - acc: 0.9698 - val_loss: 0.0814 - val_acc: 0.9692
Epoch 62/180
F1 Macro Score: 0.94036
- 7s - loss: 0.0814 - acc: 0.9699 - val_loss: 0.0811 - val_acc: 0.9693
Epoch 63/180
F1 Macro Score: 0.94000
- 7s - loss: 0.0814 - acc: 0.9699 - val_loss: 0.0820 - val_acc: 0.9691
Epoch 64/180
F1 Macro Score: 0.94034
- 7s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0812 - val_acc: 0.9692
Epoch 65/180
F1 Macro Score: 0.94046
- 7s - loss: 0.0811 - acc: 0.9700 - val_loss: 0.0811 - val_acc: 0.9692
Epoch 66/180
F1 Macro Score: 0.93989
- 7s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0816 - val_acc: 0.9691
Epoch 67/180
F1 Macro Score: 0.94014
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0813 - val_acc: 0.9692
Epoch 68/180
F1 Macro Score: 0.94008
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0816 - val_acc: 0.9691
Epoch 69/180
F1 Macro Score: 0.93991
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0817 - val_acc: 0.9691
Epoch 70/180
F1 Macro Score: 0.94023
- 8s - loss: 0.0811 - acc: 0.9699 - val_loss: 0.0818 - val_acc: 0.9690
Epoch 71/180
F1 Macro Score: 0.94020
- 8s - loss: 0.0811 - acc: 0.9700 - val_loss: 0.0813 - val_acc: 0.9692
Epoch 72/180
F1 Macro Score: 0.94003
- 8s - loss: 0.0803 - acc: 0.9701 - val_loss: 0.0812 - val_acc: 0.9692
Epoch 73/180
F1 Macro Score: 0.93995
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0816 - val_acc: 0.9691
Epoch 74/180
F1 Macro Score: 0.93986
- 7s - loss: 0.0805 - acc: 0.9701 - val_loss: 0.0817 - val_acc: 0.9690
Epoch 75/180
F1 Macro Score: 0.94024
- 7s - loss: 0.0807 - acc: 0.9700 - val_loss: 0.0814 - val_acc: 0.9692
Epoch 76/180
F1 Macro Score: 0.94000
- 7s - loss: 0.0800 - acc: 0.9702 - val_loss: 0.0815 - val_acc: 0.9691
Epoch 77/180
F1 Macro Score: 0.94026
- 7s - loss: 0.0801 - acc: 0.9701 - val_loss: 0.0815 - val_acc: 0.9692
Epoch 78/180
F1 Macro Score: 0.93954
- 7s - loss: 0.0811 - acc: 0.9700 - val_loss: 0.0822 - val_acc: 0.9689
Epoch 79/180
F1 Macro Score: 0.94030
- 7s - loss: 0.0800 - acc: 0.9702 - val_loss: 0.0815 - val_acc: 0.9692
Epoch 80/180
F1 Macro Score: 0.93988
- 7s - loss: 0.0804 - acc: 0.9701 - val_loss: 0.0817 - val_acc: 0.9690
Epoch 81/180
F1 Macro Score: 0.94047
- 7s - loss: 0.0798 - acc: 0.9702 - val_loss: 0.0809 - val_acc: 0.9693
Epoch 82/180
F1 Macro Score: 0.94014
- 7s - loss: 0.0796 - acc: 0.9703 - val_loss: 0.0816 - val_acc: 0.9691
Epoch 83/180
F1 Macro Score: 0.94025
- 7s - loss: 0.0797 - acc: 0.9703 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 84/180
F1 Macro Score: 0.94044
- 7s - loss: 0.0802 - acc: 0.9702 - val_loss: 0.0816 - val_acc: 0.9692
Epoch 85/180
F1 Macro Score: 0.94026
- 8s - loss: 0.0794 - acc: 0.9703 - val_loss: 0.0811 - val_acc: 0.9693
Epoch 86/180
F1 Macro Score: 0.94014
- 7s - loss: 0.0794 - acc: 0.9703 - val_loss: 0.0812 - val_acc: 0.9692
Epoch 87/180
F1 Macro Score: 0.94029
- 7s - loss: 0.0792 - acc: 0.9704 - val_loss: 0.0812 - val_acc: 0.9693
Epoch 88/180
F1 Macro Score: 0.94015
- 7s - loss: 0.0792 - acc: 0.9704 - val_loss: 0.0812 - val_acc: 0.9692
Epoch 89/180
F1 Macro Score: 0.94034
- 8s - loss: 0.0798 - acc: 0.9703 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 90/180
F1 Macro Score: 0.94012
- 7s - loss: 0.0792 - acc: 0.9704 - val_loss: 0.0813 - val_acc: 0.9691
Epoch 91/180
F1 Macro Score: 0.94024
- 7s - loss: 0.0788 - acc: 0.9705 - val_loss: 0.0807 - val_acc: 0.9692
Epoch 92/180
F1 Macro Score: 0.94023
- 7s - loss: 0.0789 - acc: 0.9705 - val_loss: 0.0807 - val_acc: 0.9692
Epoch 93/180
F1 Macro Score: 0.94022
- 8s - loss: 0.0787 - acc: 0.9706 - val_loss: 0.0807 - val_acc: 0.9693
Epoch 94/180
F1 Macro Score: 0.94032
- 7s - loss: 0.0795 - acc: 0.9704 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 95/180
F1 Macro Score: 0.94025
- 7s - loss: 0.0787 - acc: 0.9705 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 96/180
F1 Macro Score: 0.94037
- 7s - loss: 0.0783 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 97/180
F1 Macro Score: 0.94032
- 7s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0807 - val_acc: 0.9693
Epoch 98/180
F1 Macro Score: 0.94031
- 7s - loss: 0.0791 - acc: 0.9705 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 99/180
F1 Macro Score: 0.94026
- 7s - loss: 0.0787 - acc: 0.9706 - val_loss: 0.0807 - val_acc: 0.9692
Epoch 100/180
F1 Macro Score: 0.94037
- 8s - loss: 0.0787 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 101/180
F1 Macro Score: 0.94033
- 8s - loss: 0.0789 - acc: 0.9705 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 102/180
F1 Macro Score: 0.94013
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 103/180
F1 Macro Score: 0.94023
- 8s - loss: 0.0785 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 104/180
F1 Macro Score: 0.94018
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 105/180
F1 Macro Score: 0.94026
- 8s - loss: 0.0787 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 106/180
F1 Macro Score: 0.94032
- 8s - loss: 0.0786 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 107/180
F1 Macro Score: 0.94017
- 8s - loss: 0.0790 - acc: 0.9705 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 108/180
F1 Macro Score: 0.94017
- 8s - loss: 0.0790 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 109/180
F1 Macro Score: 0.94033
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 110/180
F1 Macro Score: 0.94033
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 111/180
F1 Macro Score: 0.94027
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0807 - val_acc: 0.9692
Epoch 112/180
F1 Macro Score: 0.94022
- 8s - loss: 0.0786 - acc: 0.9705 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 113/180
F1 Macro Score: 0.94027
- 8s - loss: 0.0787 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 114/180
F1 Macro Score: 0.94011
- 8s - loss: 0.0782 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 115/180
F1 Macro Score: 0.94013
- 8s - loss: 0.0783 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 116/180
F1 Macro Score: 0.94027
- 8s - loss: 0.0785 - acc: 0.9706 - val_loss: 0.0807 - val_acc: 0.9693
Epoch 117/180
F1 Macro Score: 0.94011
- 8s - loss: 0.0782 - acc: 0.9706 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 118/180
F1 Macro Score: 0.94036
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9693
Epoch 119/180
F1 Macro Score: 0.94019
- 7s - loss: 0.0786 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 120/180
F1 Macro Score: 0.94011
- 7s - loss: 0.0783 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 121/180
F1 Macro Score: 0.94006
- 8s - loss: 0.0790 - acc: 0.9705 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 122/180
F1 Macro Score: 0.94025
- 7s - loss: 0.0785 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 123/180
F1 Macro Score: 0.94020
- 7s - loss: 0.0782 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 124/180
F1 Macro Score: 0.94010
- 7s - loss: 0.0785 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 125/180
F1 Macro Score: 0.94025
- 7s - loss: 0.0783 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 126/180
F1 Macro Score: 0.94024
- 7s - loss: 0.0783 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 127/180
F1 Macro Score: 0.94040
- 7s - loss: 0.0786 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9693
Epoch 128/180
F1 Macro Score: 0.94012
- 7s - loss: 0.0783 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 129/180
F1 Macro Score: 0.94035
- 7s - loss: 0.0783 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 130/180
F1 Macro Score: 0.94014
- 7s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 131/180
F1 Macro Score: 0.94025
- 7s - loss: 0.0780 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 132/180
F1 Macro Score: 0.94023
- 7s - loss: 0.0787 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 133/180
F1 Macro Score: 0.94039
- 7s - loss: 0.0782 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 134/180
F1 Macro Score: 0.94004
- 7s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 135/180
F1 Macro Score: 0.94012
- 7s - loss: 0.0786 - acc: 0.9706 - val_loss: 0.0810 - val_acc: 0.9691
Epoch 136/180
F1 Macro Score: 0.94011
- 7s - loss: 0.0787 - acc: 0.9705 - val_loss: 0.0811 - val_acc: 0.9691
Epoch 137/180
F1 Macro Score: 0.94019
- 7s - loss: 0.0780 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 138/180
F1 Macro Score: 0.94010
- 7s - loss: 0.0782 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 139/180
F1 Macro Score: 0.94037
- 7s - loss: 0.0781 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 140/180
F1 Macro Score: 0.94020
- 7s - loss: 0.0783 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 141/180
F1 Macro Score: 0.94019
- 7s - loss: 0.0786 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 142/180
F1 Macro Score: 0.94011
- 7s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 143/180
F1 Macro Score: 0.94006
- 7s - loss: 0.0782 - acc: 0.9707 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 144/180
F1 Macro Score: 0.94008
- 7s - loss: 0.0785 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9691
Epoch 145/180
F1 Macro Score: 0.94027
- 7s - loss: 0.0780 - acc: 0.9707 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 146/180
F1 Macro Score: 0.94023
- 7s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 147/180
F1 Macro Score: 0.94019
- 7s - loss: 0.0780 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 148/180
F1 Macro Score: 0.94025
- 7s - loss: 0.0784 - acc: 0.9705 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 149/180
F1 Macro Score: 0.94027
- 7s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 150/180
F1 Macro Score: 0.94010
- 7s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 151/180
F1 Macro Score: 0.94027
- 8s - loss: 0.0781 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 152/180
F1 Macro Score: 0.94008
- 8s - loss: 0.0781 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 153/180
F1 Macro Score: 0.94021
- 8s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 154/180
F1 Macro Score: 0.94012
- 8s - loss: 0.0781 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 155/180
F1 Macro Score: 0.94017
- 8s - loss: 0.0779 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 156/180
F1 Macro Score: 0.94017
- 8s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 157/180
F1 Macro Score: 0.94023
- 8s - loss: 0.0781 - acc: 0.9706 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 158/180
F1 Macro Score: 0.94017
- 8s - loss: 0.0786 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 159/180
F1 Macro Score: 0.94005
- 8s - loss: 0.0778 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 160/180
F1 Macro Score: 0.94012
- 8s - loss: 0.0784 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 161/180
F1 Macro Score: 0.94019
- 8s - loss: 0.0782 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 162/180
F1 Macro Score: 0.94026
- 8s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 163/180
F1 Macro Score: 0.94007
- 8s - loss: 0.0778 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 164/180
F1 Macro Score: 0.94017
- 8s - loss: 0.0778 - acc: 0.9708 - val_loss: 0.0812 - val_acc: 0.9692
Epoch 165/180
F1 Macro Score: 0.94009
- 8s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0810 - val_acc: 0.9692
Epoch 166/180
F1 Macro Score: 0.94025
- 8s - loss: 0.0778 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 167/180
F1 Macro Score: 0.94019
- 8s - loss: 0.0780 - acc: 0.9706 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 168/180
F1 Macro Score: 0.94021
- 7s - loss: 0.0778 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 169/180
F1 Macro Score: 0.94027
- 7s - loss: 0.0775 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 170/180
F1 Macro Score: 0.94020
- 7s - loss: 0.0778 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 171/180
F1 Macro Score: 0.93992
- 7s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0810 - val_acc: 0.9691
Epoch 172/180
F1 Macro Score: 0.94002
- 7s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9691
Epoch 173/180
F1 Macro Score: 0.94020
- 7s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 174/180
F1 Macro Score: 0.94020
- 7s - loss: 0.0777 - acc: 0.9708 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 175/180
F1 Macro Score: 0.94004
- 7s - loss: 0.0778 - acc: 0.9708 - val_loss: 0.0810 - val_acc: 0.9691
Epoch 176/180
F1 Macro Score: 0.94011
- 7s - loss: 0.0777 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 177/180
F1 Macro Score: 0.94015
- 7s - loss: 0.0779 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 178/180
F1 Macro Score: 0.94019
- 7s - loss: 0.0776 - acc: 0.9708 - val_loss: 0.0808 - val_acc: 0.9692
Epoch 179/180
F1 Macro Score: 0.94028
- 7s - loss: 0.0780 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Epoch 180/180
F1 Macro Score: 0.94027
- 7s - loss: 0.0777 - acc: 0.9707 - val_loss: 0.0809 - val_acc: 0.9692
Training fold 4 completed. macro f1 score : 0.94027
Our training dataset shape is (1000, 4000, 19)
Our validation dataset shape is (250, 4000, 19)
Train on 1000 samples, validate on 250 samples
Epoch 1/180
F1 Macro Score: 0.72154
- 24s - loss: 0.5772 - acc: 0.8375 - val_loss: 0.9280 - val_acc: 0.8896
Epoch 2/180
F1 Macro Score: 0.91849
- 8s - loss: 0.1851 - acc: 0.9583 - val_loss: 0.4552 - val_acc: 0.9598
Epoch 3/180
F1 Macro Score: 0.93262
- 8s - loss: 0.1425 - acc: 0.9641 - val_loss: 0.2407 - val_acc: 0.9657
Epoch 4/180
F1 Macro Score: 0.93494
- 8s - loss: 0.1270 - acc: 0.9658 - val_loss: 0.1442 - val_acc: 0.9664
Epoch 5/180
F1 Macro Score: 0.93494
- 8s - loss: 0.1200 - acc: 0.9662 - val_loss: 0.1215 - val_acc: 0.9663
Epoch 6/180
F1 Macro Score: 0.93678
- 8s - loss: 0.1174 - acc: 0.9662 - val_loss: 0.1030 - val_acc: 0.9674
Epoch 7/180
F1 Macro Score: 0.93727
- 8s - loss: 0.1128 - acc: 0.9666 - val_loss: 0.0981 - val_acc: 0.9676
Epoch 8/180
F1 Macro Score: 0.93544
- 8s - loss: 0.1108 - acc: 0.9666 - val_loss: 0.1025 - val_acc: 0.9666
Epoch 9/180
F1 Macro Score: 0.93726
- 8s - loss: 0.1083 - acc: 0.9668 - val_loss: 0.0947 - val_acc: 0.9676
Epoch 10/180
F1 Macro Score: 0.93630
- 8s - loss: 0.1061 - acc: 0.9669 - val_loss: 0.0963 - val_acc: 0.9672
Epoch 11/180
F1 Macro Score: 0.93748
- 8s - loss: 0.1035 - acc: 0.9670 - val_loss: 0.0919 - val_acc: 0.9678
Epoch 12/180
F1 Macro Score: 0.93715
- 8s - loss: 0.1033 - acc: 0.9670 - val_loss: 0.0936 - val_acc: 0.9675
Epoch 13/180
F1 Macro Score: 0.93762
- 8s - loss: 0.1010 - acc: 0.9670 - val_loss: 0.0909 - val_acc: 0.9678
Epoch 14/180
F1 Macro Score: 0.93618
- 8s - loss: 0.1003 - acc: 0.9670 - val_loss: 0.0924 - val_acc: 0.9675
Epoch 15/180
F1 Macro Score: 0.93663
- 8s - loss: 0.1028 - acc: 0.9669 - val_loss: 0.0925 - val_acc: 0.9675
Epoch 16/180
F1 Macro Score: 0.93657
- 7s - loss: 0.1003 - acc: 0.9670 - val_loss: 0.0923 - val_acc: 0.9673
Epoch 17/180
F1 Macro Score: 0.93615
- 7s - loss: 0.0980 - acc: 0.9671 - val_loss: 0.0935 - val_acc: 0.9672
Epoch 18/180
F1 Macro Score: 0.93763
- 8s - loss: 0.0979 - acc: 0.9671 - val_loss: 0.0886 - val_acc: 0.9679
Epoch 19/180
F1 Macro Score: 0.85357
- 8s - loss: 0.1268 - acc: 0.9609 - val_loss: 0.2296 - val_acc: 0.9320
Epoch 20/180
F1 Macro Score: 0.93335
- 8s - loss: 0.1287 - acc: 0.9632 - val_loss: 0.1158 - val_acc: 0.9662
Epoch 21/180
F1 Macro Score: 0.93648
- 8s - loss: 0.1062 - acc: 0.9668 - val_loss: 0.0943 - val_acc: 0.9677
Epoch 22/180
F1 Macro Score: 0.93706
- 8s - loss: 0.1001 - acc: 0.9672 - val_loss: 0.0918 - val_acc: 0.9677
Epoch 23/180
F1 Macro Score: 0.93643
- 8s - loss: 0.0990 - acc: 0.9672 - val_loss: 0.0910 - val_acc: 0.9676
Epoch 24/180
F1 Macro Score: 0.93716
- 7s - loss: 0.0977 - acc: 0.9672 - val_loss: 0.0898 - val_acc: 0.9677
Epoch 25/180
F1 Macro Score: 0.93605
- 7s - loss: 0.0983 - acc: 0.9671 - val_loss: 0.0916 - val_acc: 0.9673
Epoch 26/180
F1 Macro Score: 0.93608
- 8s - loss: 0.0957 - acc: 0.9674 - val_loss: 0.0890 - val_acc: 0.9677
Epoch 27/180
F1 Macro Score: 0.93690
- 8s - loss: 0.0939 - acc: 0.9675 - val_loss: 0.0885 - val_acc: 0.9677
Epoch 28/180
F1 Macro Score: 0.93605
- 7s - loss: 0.0962 - acc: 0.9672 - val_loss: 0.0895 - val_acc: 0.9675
Epoch 29/180
F1 Macro Score: 0.93706
- 9s - loss: 0.0937 - acc: 0.9675 - val_loss: 0.0884 - val_acc: 0.9677
Epoch 30/180
F1 Macro Score: 0.93769
- 8s - loss: 0.0926 - acc: 0.9675 - val_loss: 0.0871 - val_acc: 0.9680
Epoch 31/180
F1 Macro Score: 0.93761
- 8s - loss: 0.0910 - acc: 0.9678 - val_loss: 0.0867 - val_acc: 0.9679
Epoch 32/180
F1 Macro Score: 0.93817
- 8s - loss: 0.0905 - acc: 0.9678 - val_loss: 0.0856 - val_acc: 0.9682
Epoch 33/180
F1 Macro Score: 0.93796
- 8s - loss: 0.0903 - acc: 0.9678 - val_loss: 0.0854 - val_acc: 0.9682
Epoch 34/180
F1 Macro Score: 0.93814
- 8s - loss: 0.0900 - acc: 0.9678 - val_loss: 0.0850 - val_acc: 0.9683
Epoch 35/180
F1 Macro Score: 0.93807
- 8s - loss: 0.0898 - acc: 0.9679 - val_loss: 0.0849 - val_acc: 0.9682
Epoch 36/180
F1 Macro Score: 0.93776
- 8s - loss: 0.0898 - acc: 0.9679 - val_loss: 0.0857 - val_acc: 0.9681
Epoch 37/180
F1 Macro Score: 0.93823
- 8s - loss: 0.0891 - acc: 0.9680 - val_loss: 0.0844 - val_acc: 0.9683
Epoch 38/180
F1 Macro Score: 0.93833
- 8s - loss: 0.0890 - acc: 0.9680 - val_loss: 0.0845 - val_acc: 0.9683
Epoch 39/180
F1 Macro Score: 0.93789
- 8s - loss: 0.0887 - acc: 0.9680 - val_loss: 0.0847 - val_acc: 0.9683
Epoch 40/180
F1 Macro Score: 0.93774
- 8s - loss: 0.0889 - acc: 0.9680 - val_loss: 0.0848 - val_acc: 0.9683
Epoch 41/180
F1 Macro Score: 0.93822
- 8s - loss: 0.0891 - acc: 0.9680 - val_loss: 0.0844 - val_acc: 0.9683
Epoch 42/180
F1 Macro Score: 0.93842
- 8s - loss: 0.0879 - acc: 0.9682 - val_loss: 0.0838 - val_acc: 0.9684
Epoch 43/180
F1 Macro Score: 0.93841
- 8s - loss: 0.0883 - acc: 0.9680 - val_loss: 0.0838 - val_acc: 0.9685
Epoch 44/180
F1 Macro Score: 0.93838
- 7s - loss: 0.0883 - acc: 0.9681 - val_loss: 0.0839 - val_acc: 0.9684
Epoch 45/180
F1 Macro Score: 0.93821
- 8s - loss: 0.0878 - acc: 0.9683 - val_loss: 0.0838 - val_acc: 0.9684
Epoch 46/180
F1 Macro Score: 0.93795
- 8s - loss: 0.0879 - acc: 0.9682 - val_loss: 0.0841 - val_acc: 0.9683
Epoch 47/180
F1 Macro Score: 0.93858
- 8s - loss: 0.0878 - acc: 0.9682 - val_loss: 0.0839 - val_acc: 0.9684
Epoch 48/180
F1 Macro Score: 0.93842
- 8s - loss: 0.0874 - acc: 0.9682 - val_loss: 0.0836 - val_acc: 0.9685
Epoch 49/180
F1 Macro Score: 0.93859
- 7s - loss: 0.0872 - acc: 0.9683 - val_loss: 0.0837 - val_acc: 0.9685
Epoch 50/180
F1 Macro Score: 0.93839
- 8s - loss: 0.0872 - acc: 0.9683 - val_loss: 0.0835 - val_acc: 0.9684
Epoch 51/180
F1 Macro Score: 0.93863
- 8s - loss: 0.0871 - acc: 0.9683 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 52/180
F1 Macro Score: 0.93873
- 8s - loss: 0.0865 - acc: 0.9685 - val_loss: 0.0830 - val_acc: 0.9686
Epoch 53/180
F1 Macro Score: 0.93885
- 8s - loss: 0.0864 - acc: 0.9685 - val_loss: 0.0830 - val_acc: 0.9686
Epoch 54/180
F1 Macro Score: 0.93827
- 8s - loss: 0.0861 - acc: 0.9685 - val_loss: 0.0834 - val_acc: 0.9685
Epoch 55/180
F1 Macro Score: 0.93875
- 8s - loss: 0.0862 - acc: 0.9685 - val_loss: 0.0829 - val_acc: 0.9686
Epoch 56/180
F1 Macro Score: 0.93862
- 8s - loss: 0.0860 - acc: 0.9686 - val_loss: 0.0831 - val_acc: 0.9685
Epoch 57/180
F1 Macro Score: 0.93887
- 8s - loss: 0.0863 - acc: 0.9685 - val_loss: 0.0831 - val_acc: 0.9686
Epoch 58/180
F1 Macro Score: 0.93925
- 8s - loss: 0.0857 - acc: 0.9686 - val_loss: 0.0828 - val_acc: 0.9687
Epoch 59/180
F1 Macro Score: 0.93867
- 8s - loss: 0.0855 - acc: 0.9687 - val_loss: 0.0827 - val_acc: 0.9686
Epoch 60/180
F1 Macro Score: 0.93929
- 8s - loss: 0.0852 - acc: 0.9687 - val_loss: 0.0828 - val_acc: 0.9688
Epoch 61/180
F1 Macro Score: 0.93924
- 8s - loss: 0.0853 - acc: 0.9688 - val_loss: 0.0821 - val_acc: 0.9689
Epoch 62/180
F1 Macro Score: 0.93958
- 8s - loss: 0.0848 - acc: 0.9689 - val_loss: 0.0821 - val_acc: 0.9690
Epoch 63/180
F1 Macro Score: 0.93846
- 8s - loss: 0.0852 - acc: 0.9689 - val_loss: 0.0827 - val_acc: 0.9686
Epoch 64/180
F1 Macro Score: 0.93968
- 7s - loss: 0.0860 - acc: 0.9687 - val_loss: 0.0820 - val_acc: 0.9691
Epoch 65/180
F1 Macro Score: 0.93931
- 8s - loss: 0.0866 - acc: 0.9686 - val_loss: 0.0824 - val_acc: 0.9689
Epoch 66/180
F1 Macro Score: 0.93918
- 8s - loss: 0.0847 - acc: 0.9690 - val_loss: 0.0821 - val_acc: 0.9690
Epoch 67/180
F1 Macro Score: 0.93950
- 8s - loss: 0.0839 - acc: 0.9691 - val_loss: 0.0819 - val_acc: 0.9690
Epoch 68/180
F1 Macro Score: 0.94007
- 8s - loss: 0.0842 - acc: 0.9691 - val_loss: 0.0813 - val_acc: 0.9692
Epoch 69/180
F1 Macro Score: 0.93934
- 8s - loss: 0.0839 - acc: 0.9692 - val_loss: 0.0815 - val_acc: 0.9691
Epoch 70/180
F1 Macro Score: 0.94007
- 8s - loss: 0.0837 - acc: 0.9693 - val_loss: 0.0816 - val_acc: 0.9692
Epoch 71/180
F1 Macro Score: 0.93993
- 8s - loss: 0.0834 - acc: 0.9693 - val_loss: 0.0813 - val_acc: 0.9692
Epoch 72/180
F1 Macro Score: 0.93994
- 8s - loss: 0.0845 - acc: 0.9691 - val_loss: 0.0813 - val_acc: 0.9691
Epoch 73/180
F1 Macro Score: 0.94001
- 7s - loss: 0.0831 - acc: 0.9694 - val_loss: 0.0810 - val_acc: 0.9693
Epoch 74/180
F1 Macro Score: 0.93960
- 8s - loss: 0.0840 - acc: 0.9693 - val_loss: 0.0828 - val_acc: 0.9690
Epoch 75/180
F1 Macro Score: 0.93991
- 8s - loss: 0.0841 - acc: 0.9692 - val_loss: 0.0816 - val_acc: 0.9692
Epoch 76/180
F1 Macro Score: 0.94036
- 8s - loss: 0.0834 - acc: 0.9694 - val_loss: 0.0810 - val_acc: 0.9693
Epoch 77/180
F1 Macro Score: 0.94039
- 8s - loss: 0.0828 - acc: 0.9695 - val_loss: 0.0811 - val_acc: 0.9694
Epoch 78/180
F1 Macro Score: 0.94008
- 8s - loss: 0.0834 - acc: 0.9694 - val_loss: 0.0808 - val_acc: 0.9693
Epoch 79/180
F1 Macro Score: 0.94026
- 8s - loss: 0.0830 - acc: 0.9695 - val_loss: 0.0805 - val_acc: 0.9694
Epoch 80/180
F1 Macro Score: 0.94054
- 8s - loss: 0.0829 - acc: 0.9696 - val_loss: 0.0805 - val_acc: 0.9695
Epoch 81/180
F1 Macro Score: 0.94068
- 8s - loss: 0.0825 - acc: 0.9696 - val_loss: 0.0803 - val_acc: 0.9695
Epoch 82/180
F1 Macro Score: 0.94034
- 8s - loss: 0.0821 - acc: 0.9696 - val_loss: 0.0805 - val_acc: 0.9694
Epoch 83/180
F1 Macro Score: 0.94063
- 8s - loss: 0.0822 - acc: 0.9697 - val_loss: 0.0804 - val_acc: 0.9695
Epoch 84/180
F1 Macro Score: 0.94031
- 8s - loss: 0.0822 - acc: 0.9696 - val_loss: 0.0809 - val_acc: 0.9694
Epoch 85/180
F1 Macro Score: 0.94041
- 8s - loss: 0.0831 - acc: 0.9695 - val_loss: 0.0806 - val_acc: 0.9695
Epoch 86/180
F1 Macro Score: 0.93967
- 8s - loss: 0.0823 - acc: 0.9696 - val_loss: 0.0811 - val_acc: 0.9692
Epoch 87/180
F1 Macro Score: 0.94047
- 8s - loss: 0.0817 - acc: 0.9697 - val_loss: 0.0806 - val_acc: 0.9695
Epoch 88/180
F1 Macro Score: 0.94054
- 8s - loss: 0.0816 - acc: 0.9698 - val_loss: 0.0807 - val_acc: 0.9695
Epoch 89/180
F1 Macro Score: 0.94052
- 8s - loss: 0.0816 - acc: 0.9698 - val_loss: 0.0805 - val_acc: 0.9694
Epoch 90/180
F1 Macro Score: 0.94078
- 8s - loss: 0.0823 - acc: 0.9697 - val_loss: 0.0799 - val_acc: 0.9696
Epoch 91/180
F1 Macro Score: 0.94100
- 8s - loss: 0.0812 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 92/180
F1 Macro Score: 0.94095
- 8s - loss: 0.0813 - acc: 0.9699 - val_loss: 0.0798 - val_acc: 0.9697
Epoch 93/180
F1 Macro Score: 0.94085
- 8s - loss: 0.0812 - acc: 0.9699 - val_loss: 0.0798 - val_acc: 0.9696
Epoch 94/180
F1 Macro Score: 0.94101
- 8s - loss: 0.0813 - acc: 0.9699 - val_loss: 0.0798 - val_acc: 0.9697
Epoch 95/180
F1 Macro Score: 0.94095
- 8s - loss: 0.0811 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 96/180
F1 Macro Score: 0.94096
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0798 - val_acc: 0.9697
Epoch 97/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0826 - acc: 0.9696 - val_loss: 0.0798 - val_acc: 0.9697
Epoch 98/180
F1 Macro Score: 0.94090
- 8s - loss: 0.0816 - acc: 0.9698 - val_loss: 0.0798 - val_acc: 0.9697
Epoch 99/180
F1 Macro Score: 0.94093
- 8s - loss: 0.0811 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 100/180
F1 Macro Score: 0.94096
- 8s - loss: 0.0809 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 101/180
F1 Macro Score: 0.94097
- 8s - loss: 0.0811 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 102/180
F1 Macro Score: 0.94100
- 8s - loss: 0.0809 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 103/180
F1 Macro Score: 0.94090
- 8s - loss: 0.0820 - acc: 0.9697 - val_loss: 0.0797 - val_acc: 0.9696
Epoch 104/180
F1 Macro Score: 0.94096
- 8s - loss: 0.0811 - acc: 0.9699 - val_loss: 0.0798 - val_acc: 0.9697
Epoch 105/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0809 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 106/180
F1 Macro Score: 0.94084
- 8s - loss: 0.0807 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9696
Epoch 107/180
F1 Macro Score: 0.94097
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 108/180
F1 Macro Score: 0.94089
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 109/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0807 - acc: 0.9699 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 110/180
F1 Macro Score: 0.94087
- 8s - loss: 0.0812 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 111/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 112/180
F1 Macro Score: 0.94097
- 8s - loss: 0.0808 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 113/180
F1 Macro Score: 0.94099
- 8s - loss: 0.0804 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 114/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0809 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 115/180
F1 Macro Score: 0.94086
- 8s - loss: 0.0808 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 116/180
F1 Macro Score: 0.94089
- 7s - loss: 0.0810 - acc: 0.9699 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 117/180
F1 Macro Score: 0.94090
- 8s - loss: 0.0815 - acc: 0.9699 - val_loss: 0.0797 - val_acc: 0.9696
Epoch 118/180
F1 Macro Score: 0.94085
- 8s - loss: 0.0805 - acc: 0.9701 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 119/180
F1 Macro Score: 0.94086
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 120/180
F1 Macro Score: 0.94079
- 8s - loss: 0.0810 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9696
Epoch 121/180
F1 Macro Score: 0.94087
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9696
Epoch 122/180
F1 Macro Score: 0.94092
- 8s - loss: 0.0807 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 123/180
F1 Macro Score: 0.94087
- 8s - loss: 0.0820 - acc: 0.9698 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 124/180
F1 Macro Score: 0.94096
- 8s - loss: 0.0811 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 125/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 126/180
F1 Macro Score: 0.94103
- 8s - loss: 0.0807 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 127/180
F1 Macro Score: 0.94097
- 8s - loss: 0.0807 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 128/180
F1 Macro Score: 0.94102
- 8s - loss: 0.0804 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 129/180
F1 Macro Score: 0.94082
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9696
Epoch 130/180
F1 Macro Score: 0.94092
- 8s - loss: 0.0806 - acc: 0.9699 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 131/180
F1 Macro Score: 0.94093
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0797 - val_acc: 0.9697
Epoch 132/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0805 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 133/180
F1 Macro Score: 0.94095
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 134/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0805 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 135/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0809 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 136/180
F1 Macro Score: 0.94107
- 7s - loss: 0.0804 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 137/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0808 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 138/180
F1 Macro Score: 0.94100
- 8s - loss: 0.0802 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 139/180
F1 Macro Score: 0.94086
- 8s - loss: 0.0808 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 140/180
F1 Macro Score: 0.94098
- 8s - loss: 0.0804 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 141/180
F1 Macro Score: 0.94100
- 8s - loss: 0.0807 - acc: 0.9699 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 142/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 143/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 144/180
F1 Macro Score: 0.94086
- 8s - loss: 0.0803 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9696
Epoch 145/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9696
Epoch 146/180
F1 Macro Score: 0.94099
- 8s - loss: 0.0803 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 147/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0804 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9696
Epoch 148/180
F1 Macro Score: 0.94089
- 7s - loss: 0.0807 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 149/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0801 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 150/180
F1 Macro Score: 0.94107
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0794 - val_acc: 0.9697
Epoch 151/180
F1 Macro Score: 0.94091
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9696
Epoch 152/180
F1 Macro Score: 0.94095
- 8s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 153/180
F1 Macro Score: 0.94093
- 7s - loss: 0.0806 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 154/180
F1 Macro Score: 0.94099
- 8s - loss: 0.0801 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 155/180
F1 Macro Score: 0.94103
- 9s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 156/180
F1 Macro Score: 0.94099
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 157/180
F1 Macro Score: 0.94098
- 8s - loss: 0.0801 - acc: 0.9702 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 158/180
F1 Macro Score: 0.94094
- 8s - loss: 0.0801 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 159/180
F1 Macro Score: 0.94101
- 8s - loss: 0.0799 - acc: 0.9702 - val_loss: 0.0794 - val_acc: 0.9697
Epoch 160/180
F1 Macro Score: 0.94101
- 8s - loss: 0.0802 - acc: 0.9702 - val_loss: 0.0794 - val_acc: 0.9697
Epoch 161/180
F1 Macro Score: 0.94106
- 8s - loss: 0.0805 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 162/180
F1 Macro Score: 0.94100
- 8s - loss: 0.0805 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 163/180
F1 Macro Score: 0.94101
- 8s - loss: 0.0800 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 164/180
F1 Macro Score: 0.94100
- 9s - loss: 0.0802 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 165/180
F1 Macro Score: 0.94097
- 9s - loss: 0.0800 - acc: 0.9702 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 166/180
F1 Macro Score: 0.94102
- 8s - loss: 0.0803 - acc: 0.9700 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 167/180
F1 Macro Score: 0.94098
- 8s - loss: 0.0802 - acc: 0.9701 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 168/180
F1 Macro Score: 0.94100
- 8s - loss: 0.0798 - acc: 0.9702 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 169/180
F1 Macro Score: 0.94099
- 8s - loss: 0.0801 - acc: 0.9702 - val_loss: 0.0794 - val_acc: 0.9697
Epoch 170/180
F1 Macro Score: 0.94103
- 8s - loss: 0.0800 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 171/180
F1 Macro Score: 0.94086
- 8s - loss: 0.0813 - acc: 0.9699 - val_loss: 0.0796 - val_acc: 0.9696
Epoch 172/180
F1 Macro Score: 0.94096
- 8s - loss: 0.0803 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9696
Epoch 173/180
F1 Macro Score: 0.94101
- 8s - loss: 0.0810 - acc: 0.9700 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 174/180
F1 Macro Score: 0.94096
- 8s - loss: 0.0803 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 175/180
F1 Macro Score: 0.94097
- 8s - loss: 0.0799 - acc: 0.9702 - val_loss: 0.0796 - val_acc: 0.9697
Epoch 176/180
F1 Macro Score: 0.94099
- 8s - loss: 0.0802 - acc: 0.9702 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 177/180
F1 Macro Score: 0.94098
- 8s - loss: 0.0801 - acc: 0.9702 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 178/180
F1 Macro Score: 0.94092
- 8s - loss: 0.0801 - acc: 0.9701 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 179/180
F1 Macro Score: 0.94101
- 8s - loss: 0.0801 - acc: 0.9702 - val_loss: 0.0795 - val_acc: 0.9697
Epoch 180/180
F1 Macro Score: 0.94095
- 8s - loss: 0.0799 - acc: 0.9701 - val_loss: 0.0794 - val_acc: 0.9697
Training fold 5 completed. macro f1 score : 0.94095
Training completed. oof macro f1 score : 0.94030
save path: ./../data/output/submission_nb035_cv_0.9403.csv
Training completed...
CPU times: user 1h 41min 24s, sys: 11min 25s, total: 1h 52min 49s
Wall time: 1h 57min 57s
| MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
analysis | df_tr = pd.read_csv(PATH_TRAIN)
batch_list = []
for n in range(10):
batchs = np.ones(500000)*n
batch_list.append(batchs.astype(int))
batch_list = np.hstack(batch_list)
df_tr['batch'] = batch_list
# group ็นๅพด้ใไฝๆ
group = group_feat_train(df_tr)
df_tr = pd.concat([df_tr, group], axis=1)
y = df_tr['open_channels'].values
oof = np.argmax(oof_, axis=1).astype(int)
for group in sorted(df_tr['group'].unique()):
idxs = df_tr['group'] == group
oof_grp = oof[idxs].astype(int)
y_grp = y[idxs]
print(f'group_score({group}): {f1_macro(y_grp, oof_grp):4f}') | group_score(0): 0.332464
group_score(1): 0.779841
group_score(2): 0.973168
group_score(3): 0.997029
group_score(4): 0.847571
| MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
ๅฏ่ฆๅ | x_idx = np.arange(len(df_tr))
idxs = y != oof
failed = np.zeros(len(df_tr))
failed[idxs] = 1
n = 200
b = np.ones(n)/n
failed_move = np.convolve(failed, b, mode='same')
fig, axs = plt.subplots(2, 1, figsize=(20, 6))
axs = axs.ravel()
# fig = plt.figure(figsize=(20, 3))
for i_gr, group in enumerate(sorted(df_tr['group'].unique())):
idxs = df_tr['group'] == group
axs[0].plot(np.arange(len(df_tr))[idxs], df_tr['signal'].values[idxs], color=cp[i_gr], label=f'group={group}')
for x in range(10):
axs[0].axvline(x*500000 + 500000, color='gray')
axs[0].text(x*500000 + 250000, 0.6, x)
axs[0].plot(x_idx, failed_move, '.', color='black', label='failed_mv')
axs[0].set_xlim(0, 5500000)
axs[0].legend()
axs[1].plot(x_idx, y)
axs[1].set_xlim(0, 5500000)
# fig.legend()
| _____no_output_____ | MIT | nb/035_submission.ipynb | fkubota/kaggle-University-of-Liverpool-Ion-Switching |
Using Interrupts and asyncio for Buttons and SwitchesThis notebook provides a simple example for using asyncio I/O to interact asynchronously with multiple input devices. A task is created for each input device and coroutines used to process the results. To demonstrate, we recreate the flashing LEDs example in the getting started notebook but using interrupts to avoid polling the GPIO devices. The aim is have holding a button result in the corresponding LED flashing. Initialising the EnvironmentFirst we import an instantiate all required classes to interact with the buttons, switches and LED and ensure the base overlay is loaded. | from pynq import PL
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit") | _____no_output_____ | BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Define the flash LED taskNext step is to create a task that waits for the button to be pressed and flash the LED until the button is released. The `while True` loop ensures that the coroutine keeps running until cancelled so that multiple presses of the same button can be handled. | import asyncio
async def flash_led(num):
while True:
await base.buttons[num].wait_for_value_async(1)
while base.buttons[num].read():
base.leds[num].toggle()
await asyncio.sleep(0.1)
base.leds[num].off() | _____no_output_____ | BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Create the taskAs there are four buttons we want to check, we create four tasks. The function `asyncio.ensure_future` is used to convert the coroutine to a task and schedule it in the event loop. The tasks are stored in an array so they can be referred to later when we want to cancel them. | tasks = [asyncio.ensure_future(flash_led(i)) for i in range(4)] | _____no_output_____ | BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Monitoring the CPU UsageOne of the advantages of interrupt-based I/O is to minimised CPU usage while waiting for events. To see how CPU usages is impacted by the flashing LED tasks we create another task that prints out the current CPU utilisation every 3 seconds. | import psutil
async def print_cpu_usage():
# Calculate the CPU utilisation by the amount of idle time
# each CPU has had in three second intervals
last_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
while True:
await asyncio.sleep(3)
next_idle = [c.idle for c in psutil.cpu_times(percpu=True)]
usage = [(1-(c2-c1)/3) * 100 for c1,c2 in zip(last_idle, next_idle)]
print("CPU Usage: {0:3.2f}%, {1:3.2f}%".format(*usage))
last_idle = next_idle
tasks.append(asyncio.ensure_future(print_cpu_usage())) | _____no_output_____ | BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Run the event loopAll of the blocking wait_for commands will run the event loop until the condition is met. All that is needed is to call the blocking `wait_for_level` method on the switch we are using as the termination condition. While waiting for switch 0 to get high, users can press any push button on the board to flash the corresponding LED. While this loop is running, try opening a terminal and running `top` to see that python is consuming no CPU cycles while waiting for peripherals. As this code runs until the switch 0 is high, make sure it is low before running the example. | if base.switches[0].read():
print("Please set switch 0 low before running")
else:
base.switches[0].wait_for_value(1) | CPU Usage: 0.67%, 11.67%
CPU Usage: 0.00%, 0.33%
CPU Usage: 0.00%, 0.33%
CPU Usage: 0.00%, 0.33%
CPU Usage: 0.00%, 0.33%
CPU Usage: 0.00%, 0.33%
| BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Clean upEven though the event loop has stopped running, the tasks are still active and will run again when the event loop is next used. To avoid this, the tasks should be cancelled when they are no longer needed. | [t.cancel() for t in tasks] | _____no_output_____ | BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Now if we re-run the event loop, nothing will happen when we press the buttons. The process will block until the switch is set back down to the low position. | base.switches[0].wait_for_value(0) | _____no_output_____ | BSD-3-Clause | Pynq-ZU/base/notebooks/board/asyncio_buttons.ipynb | Xilinx/PYNQ-ZU |
Making Simple Plots Objectives+ Learn how to make a simple 1D plot in Python.+ Learn how to find the maximum/minimum of a function in Python.We will use [Problem 4.B.2](https://youtu.be/w-IGNU2i3F8) of the lecturebook as a motivating example.We find that the moment of the force $\vec{F}$ about point A is:$$\vec{M_A} = (bF\cos\theta - dF\sin\theta)\hat{k}.$$Let's plot the component of the moment as a function of $\theta$.For this, we will use the Python module [matplotlib](https://matplotlib.org). | import numpy as np # for numerical algebra
import matplotlib.pyplot as plt # this is where the plotting capabilities are
# The following line is need so that the plots are embedded in the Jupyter notebook (remove when not using Jupyter)
%matplotlib inline
# Define a function that computes the moment magnitude as a function of all other parameters
def M_A(theta, b, d, F):
"""
Compute the k component of the moment of F about point A given all the problem parameters.
"""
return b * F * np.cos(theta) - d * F * np.sin(theta)
# Choose some parameters
b = 0.5 # In meters
d = 2. # In meters
F = 2. # In kN
# The thetas on which we will evaluate the moment for plotting
thetas = np.linspace(0, 2 * np.pi, 100)
# The moment on these thetas:
M_As = M_A(thetas, b, d, F)
# Let's plot
plt.plot(thetas / (2. * np.pi) * 360, M_As, lw=2)
plt.xlabel(r'$\theta$ (degrees)')
plt.ylabel('$M_A$ (kN)'); | _____no_output_____ | MIT | making_simple_plots.ipynb | PurdueMechanicalEngineering/me270 |
Now, let's put two lines in the same plot.Let's compare the moments when we change $d$ from 2 meters to 3.5 meters. | # We already have M_A for d=2 m (and all other paramters to whichever values we gave them)
# Let's copy it:
M_As_case_1 = M_As
# And let's compute it again for d=3.5 m
d = 3.5 # In m
M_As_case_2 = M_A(thetas, b, d, F)
# Let's plot both of them in the same figure
plt.plot(thetas / (2. * np.pi) * 360, M_As_case_1, lw=2, label='Case 1')
plt.plot(thetas / (2. * np.pi) * 360, M_As_case_2, '--', lw=2, label='Case 2')
plt.xlabel(r'$\theta$ (degrees)')
plt.ylabel('$M_A$ (kN)')
plt.legend(loc='best') | _____no_output_____ | MIT | making_simple_plots.ipynb | PurdueMechanicalEngineering/me270 |
Finally, let's see how we can make interactive plots.We will use the Python module [ipywidgets](https://ipywidgets.readthedocs.io/en/stable/) and in particular the function [ipywidgets.interact](https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html). | from ipywidgets import interact # Loading the module
# Interact needs a function that does the plotting given the parameters.
# Let's make it:
def make_plots(b=0.5, d=3., F=1.): # X=val defines default values for the function
"""
Make the plot.
"""
thetas = np.linspace(0, 2. * np.pi, 100)
M_As = M_A(thetas, b, d, F)
plt.plot(thetas / (2. * np.pi) * 360, M_As, lw=2, label='Case 1')
plt.ylim([-10., 10.])
plt.xlabel(r'$\theta$ (degrees)')
plt.ylabel('$M_A$ (kN)') | _____no_output_____ | MIT | making_simple_plots.ipynb | PurdueMechanicalEngineering/me270 |
Let's just check that the function works by calling it a few times: | # With no inputs it should use the default values
make_plots()
# You can specify all the inputs like this:
make_plots(2., 3., 2.)
# Or even by name (whatever is not specified gets the default value):
make_plots(F=2.3) | _____no_output_____ | MIT | making_simple_plots.ipynb | PurdueMechanicalEngineering/me270 |
Ok. Let's use interact now: | interact(make_plots,
b=(0., 5., 0.1), # Range for b: (min, max, increment)
d=(0., 5, 0.1), # Range for d
F=(0., 2, 0.1) # Range for F
); | _____no_output_____ | MIT | making_simple_plots.ipynb | PurdueMechanicalEngineering/me270 |
VacationPy---- Note* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. | # Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key | _____no_output_____ | ADSL | VacationPy/VacationPy.ipynb | jgmoore10/python-api-challenge |
Store Part I results into DataFrame* Load the csv exported in Part I to a DataFrame | city_data = pd.read_csv("../output_data/cities.csv")
city_data.head()
| _____no_output_____ | ADSL | VacationPy/VacationPy.ipynb | jgmoore10/python-api-challenge |
Humidity Heatmap* Configure gmaps.* Use the Lat and Lng as locations and Humidity as the weight.* Add Heatmap layer to map. | gmaps.configure(api_key=g_key)
locations = city_data[["Lat", "Lng"]].astype(float)
humidity = city_data["Humidity"].astype(float)
fig = gmaps.figure()
heat_layer = gmaps.heatmap_layer(locations, weights = humidity, dissipating = False, max_intensity = 100, point_radius = 1)
fig.add_layer(heat_layer)
fig | _____no_output_____ | ADSL | VacationPy/VacationPy.ipynb | jgmoore10/python-api-challenge |
Create new DataFrame fitting weather criteria* Narrow down the cities to fit weather conditions.* Drop any rows will null values. | narrowed_city_df = city_data.loc[(city_data["Humidity"]>=70) & (city_data["Wind Speed"]>=10) & \
(city_data["Cloudiness"] <= 20)].dropna()
narrowed_city_df.head() | _____no_output_____ | ADSL | VacationPy/VacationPy.ipynb | jgmoore10/python-api-challenge |
Hotel Map* Store into variable named `hotel_df`.* Add a "Hotel Name" column to the DataFrame.* Set parameters to search for hotels with 5000 meters.* Hit the Google Places API for each city's coordinates.* Store the first Hotel result into the DataFrame.* Plot markers on top of the heatmap. | hotel_df = narrowed_city_df.reset_index(drop=True)
hotel_df["Hotel Name"] = ""
hotel_df
# geocoordinates
target_search = "Hotel"
target_radius = 5000
target_type = "Hotels"
params={
"radius":target_radius,
"types":target_type,
"keyword":target_search,
"key":g_key
}
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations)
fig.add_layer(markers)
# Display figure
fig | _____no_output_____ | ADSL | VacationPy/VacationPy.ipynb | jgmoore10/python-api-challenge |
BEFORE YOU DO ANYTHING...In the terminal:1. Navigate to __inside__ your ILAS_Python repository.2. __COMMIT__ any un-commited work on your personal computer.3. __PULL__ any changes *you* have made using another computer.4. __PULL__ textbook updates (including homework answers). Control Flow Lesson GoalCompose simple programs to control the order in which the operators we have studied so far are executed. ObjectivesControl the flow of a program using:- __control statements__- __loops__ Why we are studying this:Control flow allows us to make __choices__ using our program.Control statements result in a decision being made as to which of __two or more possible paths__ to follow. Lesson structure: - Control Statements - `if` and `else`statements - `for` loops - `while` loops - `break` and `continue` statements - Review Exercises - Summary What is a *__control statement__*?Let's start with an example from the last seminar... Control StatementsIn the last seminar we looked at a simple computer program that returned Boolean (True or False) variables... Based on the current time of day, the program answers two questions:>__Is it lunchtime?__>`True`if it is lunch time.>__Is it time for work?__>`True`if it is `not`:- before work (`time < work_starts`)- after work (`time > work_ends `)- lunchtime (the previous question assigns the value `True` or `False` to variable `lunchtime`). | # Time-telling program
time = 13.05 # current time
work_starts = 8.00 # time work starts
work_ends = 17.00 # time work ends
lunch_starts = 13.00 # time lunch starts
lunch_ends = 14.00 # time lunch ends
# variable lunchtime is True if the time is between the start and end of lunchtime
lunchtime = time >= lunch_starts and time < lunch_ends
# variable work_time is True if the time is not...
work_time = not ( time < work_starts # ... before work
or time > work_ends # ... or after work
or lunchtime) # ... or lunchtime
print("Is it work time?")
print(work_time)
print("Is it lunchtime?")
print(lunchtime) | Is it work time?
False
Is it lunchtime?
True
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
What if we now want our computer program to do something based on these answers?To do this, we need to use *control statements*.Control statements allow us to make decisions in a program.This decision making is known as *control flow*. Control statements are a fundamental part of programming. Here is a control statement in pseudo code:This is an `if` statement. if A is true Perform task X For example if lunchtime is true Eat lunch We can check if an alternative to the `if` statement is true using an `else if` statement. if A is true Perform task X (only) else if B is true Perform task Y (only) Example: if lunchtime is true Eat lunch else if work_time is true Do work Often it is useful to include an `else` statement.If none of the `if` and `else if` statements are satisfied, the code following the `else` statement will be executed. if A is true Perform task X (only) else if B is true Perform task Y (only) else Perform task Z (only) if lunchtime is true Eat lunch else if work_time is true Do work else Go home Let's get a better understanding of control flow statements by completing some examples. `if` and `else` statementsLet's consider a simple example that demonstrates a Python if-else control statement. It uses the lunch/work example from the previous seminar.__Note:__ In Python, "else if" is written: `elif` | # Time-telling program
time = 13.05 # current time
work_starts = 8.00 # time work starts
work_ends = 17.00 # time work ends
lunch_starts = 13.00 # time lunch starts
lunch_ends = 14.00 # time lunch ends
# variable lunchtime is True if the time is between the start and end of lunchtime
lunchtime = time >= lunch_starts and time < lunch_ends
# variable work_time is True if the time is not...
work_time = not ( time < work_starts # ... before work
or time > work_ends # ... or after work
or lunchtime) # ... or lunchtime
#print("Is it work time?")
#print(work_time)
#print("Is it lunchtime?")
#print(lunchtime)
if lunchtime: # if lunchtime == True:
print("Eat lunch")
elif work_time: # elif work_time == True:
print("Do work")
else:
print("Go home")
| Eat lunch
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
__Remember:__ The program assigns the variables lunchtime and work_time the values `True` or `False`.Therefore when we type: `if lunchtime`the meaning is the same as: `if lunchtime == True` Here is another example, using algebraic operators to modify the value of an initial variable, `x`. The modification of `x` and the message printed depend on the initial value of `x`. | #The input to the program is variable `x`.
x = -10.0 # Initial x value
if x > 0.0:
print('Initial x is greater than zero') #The program prints a message...
x -= 20.0 # ...and modifies `x`.
elif x < 0.0:
print('Initial x is less than zero')
x += 21.0
else:
print('Initial x is not less than zero and not greater than zero, therefore it must be zero')
x *= 2.5
print("Modified x = ", x) | Initial x is less than zero
Modified x = 11.0
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
__Note:__ The program uses the short-cut algebraic operators that you learnt to use in the last seminar. __Try it yourself__In the cell code cell above, try:- changing the operations performed on `x`- changing the value of `x` a few times.Re-run the cell to see the different paths the program can follow. Look carefully at the structure of the `if`, `elif`, `else`, control statement:__The control statement begins with an `if`__, followed by the expression to check. At the end of the `if` statement you must put a colon (`:`) ````pythonif x > 0.0: ```` After the `if` statement, indent the code to be run in the case that the `if` statement is `True`. To end the code to be run, simply stop indenting: ````pythonif x > 0.0: print('Initial x is greater than zero') x -= 20.0```` The indent can be any number of spaces.The number of spaces must be the same for all lines of code to be run if the `if` statement is True.Jupyter Notebooks automatically indent 4 spaces.This is considered best practise. `if x > 0.0` is: - `True`: - The indented code is executed. - The control block is exited. - The program moves past any subsequent `elif` or `else` statements. - `False`: the program moves past the inented code to the next (non-indented) part of the program... In this the next (non-indented) part of the program is `elif` (else if).The elif statement is evaluated.(Notice that the code is structured in the same way as the `if` statement.):```pythonif x > 0.0: print('Initial x is greater than zero') x -= 20.0 elif x < 0.0: print('Initial x is less than zero') x += 21.0``` `elif x < 0.0`:- `True`: - The indented code is executed. - The control block is exited. - The program moves past any subsequent `elif` or `else` statements. - `False`: the program moves past the indented code to the next (non-indented) part of the program. If none of the preceding `if` or `elif` stements are true. e.g. in this example: - `x > 0.0` is `False` - `x < 0.0` is `False`the code following the `else` statement is executed.```pythonif x > 0.0: print('Initial x is greater than zero') x -= 20.0elif x < 0.0: print('Initial x is less than zero') x += 21.0else: print('Initial x is not less than zero and not greater than zero, therefore it must be zero')``` Evaluating data against different criteria is extremely useful for solving real-world mathematical problems. Let's look at a simple example... Real-World Example: currency tradingTo make a comission (profit), a currency trader sells US dollars to travellers above the market rate. The multiplier used to calculate the amount recieved by customer is shown in the table:|Amount (JPY) |Multiplier ||--------------------------------------------|-------------------------|| Less than $100$ | 0.9 | | From $100$ and less than $1,000$ | 0.925 | | From $1,000$ and less than $10,000$ | 0.95 | | From $10,000$ and less than $100,000$ | 0.97 | | Over $100,000$ | 0.98 | The currency trader charges more if the customer pays with cash. If the customer pays with cash, the currency trader reduces the rate by an __additional__ 10% after conversion. (If the transaction is made electronically, they do not). __Current market rate:__ 1 JPY = 0.0091 USD.__Effective rate:__ The rate that the customer receives based on the amount in JPY to be changed. The program calculates the __effective rate__ using: - The reduction based on the values in the table. - An additional 10% reduction (mutiplier = 0.9) if the transaction is made in cash. | JPY = 1_000_000 # The amount in JPY to be changed into USD
cash = False # True if transaction is in cash, otherwise False
market_rate = 0.0091 # 1 JPY is worth this many dollars at the market rate
# Apply the appropriate reduction depending on the amount being sold
if JPY < 10_000:
multiplier = 0.9
elif JPY < 100_000:
multiplier = 0.925 * market_rate * JPY
elif JPY < 1_000_000:
multiplier = 0.95 * market_rate * JPY
elif JPY < 10_000_000:
multiplier = 0.97 * market_rate * JPY
else: # JPY > 10,000,000
multiplier = 0.98 * market_rate * JPY
# Apply the appropriate reduction depending if the transaction is made in cash or not
if cash:
cash_multiplier = 0.9
else:
cash_multiplier = 1
# Calculate the total amount sold to the customer
USD = JPY * market_rate * multiplier * cash_multiplier
print("Amount in JPY sold:", JPY)
print("Amount in USD purchased:", USD)
print("Effective rate:", USD/JPY) | Amount in JPY sold: 1000000
Amount in USD purchased: 80325700.0
Effective rate: 80.3257
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
__Note:__ - We can use multiple `elif` statements within a control block. - We can use multipe `if` statements. When the program executes and exits a control block, it moves to the next `if` statement. - __Readability:__ Underscores _ are placed between 0s in long numbers to make them easier to read. You DO NOT need to include underscores for Python to interpret the number correctly. You can place the underscores wherever you like in the sequence of digits that make up the number. __Try it yourself__In your textbook, try changing the values of `JPY` and `cash` a few times.Re-run the cell to see the different paths the program can follow. `for` loops*Loops* are used to execute a command repeatedly.A loop is a block that repeats an operation a specified number of times (loops). To learn about loops we are going to use the function `range()`. `range`The function `range` gives us a sequence of *integer* numbers.`range(3, 6)` returns integer values starting from 3 and ending at 6.i.e.> 3, 4, 5Note this does not include 6. We can change the starting value. For example for integer values starting at 0 and ending at 4: `range(0,4)`returns:> 0, 1, 2, 3`range(4)` is a __shortcut__ for range(0, 4) range (0,5)range (5,9) Simple `for` loops The statement ```pythonfor i in range(0, 5):```says that we want to run the indented code five times. | for i in range(0, 6):
print(i)
| 0
1
2
3
4
5
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
The first time through, the value of i is equal to 0.The second time through, its value is 1.Each loop the value `i` increases by 1 (0, 1, 2, 3, 4) until the last time when its value is 4. Look carefully at the structure of the `for` loop: - `for` is followed by the condition being checked. - : colon at the end of the `for` statement. - The indented code that follows is run each time the code loops. (The __same of spaces__ should be used for all indents) - To end the `for` loop, simply stop indenting. | for i in range(-2, 3):
print(i)
print('The end of the loop') | -2
-1
0
1
2
The end of the loop
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
The above loop starts from -2 and executes the indented code for each value of i in the range (-2, -1, 0, 1, 2).When the loop has executed the code for the final value `i = 2`, it moves on to the next unindented line of code. | for n in range(4):
print("----")
print(n, n**2) | ----
0 0
----
1 1
----
2 4
----
3 9
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
The above executes 4 loops.The statement ```pythonfor n in range(4):```says that we want to loop over four integers, starting from 0. Each loop the value `n` increases by 1 (0, 1, 2 3). __Try it yourself__Go back and change the __range__ of input values in the last three cells and observe the change in output. If we want to step by three rather than one: | for n in range(0, 10, 3):
print(n) | 0
3
6
9
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
If we want to step backwards rather than forwards we __must__ include the step size: | for n in range(10, 0, -1):
print(n) | 10
9
8
7
6
5
4
3
2
1
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
For example... | for n in range(10, 0):
print(n) | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
...does not return any values because there are no values that lie between 10 and 0 when counting in the positive direction from 10. __Try it yourself.__In the cell below write a `for` loop that:- starts at `n = 9`- ends at `n = 3` (and includes `n = 3`)- loops __backwards__ through the range in steps of -3 - prints `n`$^2$ at each loop. | # For loop
for n in range(9, 2, -3):
print ("-----")
print(n, n**2) | -----
9 81
-----
6 36
-----
3 9
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
For loops are useful for performing operations on large data sets.We often encounter large data sets in real-world mathematical problems. A simple example of this is converting multiple values using the same mathematical equation to create a look-up table... Real-world Example: conversion table from degrees Fahrenheit to degrees CelsiusWe can use a `for` loop to create a conversion table from degrees Fahrenheit ($T_F$) to degrees Celsius ($T_c$).Conversion formula:$$T_c = 5(T_f - 32)/9$$Computing the conversion from -100 F to 200 F in steps of 20 F (not including 200 F): | print("T_f, T_c")
for Tf in range(-100, 200, 20):
print(Tf, "\t", round(((Tf - 32) * 5 / 9), 3)) | T_f, T_c
-100 -73.333
-80 -62.222
-60 -51.111
-40 -40.0
-20 -28.889
0 -17.778
20 -6.667
40 4.444
60 15.556
80 26.667
100 37.778
120 48.889
140 60.0
160 71.111
180 82.222
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
`while` loopsA __`for`__ loop performs an operation a specified number of times. ```python for x in range(5): print(x)``` A __`while`__ loop performs a task *while* a specified statement is true. ```pythonx = 0while x < 5: print(x)``` The structure of a `while` loop is similar to a `for` loop.- `while` is followed by the condition being checked.- : colon at the end of the `while` statement. - The indented code that follows is repeatedly executed until the `while` statement (e.g. `x It can be quite easy to crash your computer using a `while` loop. e.g. if we don't modify the value of x each time the code loops:```pythonx = 0while x < 5: print(x) x += 1 ```will continue indefinitely since `x < 5 == False` will never be satisfied.This is called an *infinite loop*. To perform the same function as the `for` loop we need to increment the value of `x` within the loop: | x = 0
print("Start of while statement")
while x < 5:
print(x)
x += 1 # Increment x
print("End of while statement") | Start of while statement
0
1
2
3
4
End of while statement
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
`for` loops are often safer when performing an operation on a set range of values. | x = -2
print("Start of for statement")
for y in range(x,5):
print(y)
print("End of for statement") | Start of for statement
-2
-1
0
1
2
3
4
End of for statement
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
Here is another example of a `while` loop. | x = 0.9
while x > 0.001:
# Square x (shortcut x *= x)
x = x * x
print(round(x, 6)) | 0.81
0.6561
0.430467
0.185302
0.034337
0.001179
1e-06
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
If we use an initial value of $x \ge 1$, an infinite loop will be generted.`x` will increase with each loop, meaning `x` will always be greater than 0.001.e.g. ```pythonx = 2while x > 0.001: x = x * x print(x)``` However, using a `for` loop is a less appropriate solution in this case.We may not know beforehand how many steps are required before `x > 0.001` becomes false. To avoid errors, it is good practice to check that $x < 1$ before entering the `while` loop e.g. | x = 0.9
if x < 1:
while x > 0.001:
# Square x (shortcut x *= x)
x = x * x
print(round(x, 6))
else:
print("x is greater than one, infinite loop avoided") | 0.81
0.6561
0.430467
0.185302
0.034337
0.001179
1e-06
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
__Try it for yourself:__In the cell above change the value of x to above or below 1.Observe the output. __Try it for yourself:__In the cell below: - Create a variable,`x`, with the initial value 50 - Each loop: 1. print x 1. reduce the value of x by half - Exit the loop when `x` < 3 | # While loop | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
`break` and `continue`. `break`Sometimes we want to break out of a `for` or `while` loop. For example in a `for` loop we can check if something is true, and then exit the loop prematurely, e.g | for x in range(10):
print(x)
if x == 5:
print("Time to break out")
break | 0
1
2
3
4
5
Time to break out
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
Let's look at how we can use this in a program... The following program __finds prime numbers__.__Prime number:__ A positive integer, greater than 1, that has no positive divisors other than 1 and itself (2, 3, 5, 11, 13, 17....)The program checks (integer) numbers, `n` up to a limit `N` and prints the prime numbers. We can determine in `n` is a prime nunber by diving it by every number in the range 2 to `n`.If any of these calculations has a remainder equal to zero, n is not a prime number. | N = 50 # Check numbers up 50 for primes (excludes 50)
# Loop over all numbers from 2 to 50 (excluding 50)
for n in range(2, N):
# Assume that n is prime
n_is_prime = True
# Check if n divided by (any number in the range 2 to n) returns a remainder equal to 0
for m in range(2, n):
# If the remainder is zero, n is not a prime number
if n % m == 0:
n_is_prime = False
# If n is prime, print to screen
if n_is_prime:
print(n) | 2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
Notice that our program contains a second `for` loop. For each value of n, it loops through incrementing values of m in the range (2 to n):```python Check if n can be divided by m m ranges from 2 to n (excluding n)for m in range(2, n):```before incrementing to the next value of n.We call this a *nested* loop.The indents in the code show where loops are nested. Here it is again without the comments: | N = 50
# for loop 1
for n in range(2, N):
n_is_prime = True
# for loop 2
for m in range(2, n):
if n % m == 0:
n_is_prime = False
if n_is_prime:
print(n) | 2
3
5
7
11
13
17
19
23
29
31
37
41
43
47
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
As n gets larger, dividing it by *every* number in the range (2, n) becomes more and more inefficient. A `break` statement allows us to exit the loop as soon as a remainder equal to zero is returned (indicating that n is not a prime number). In the program below, a break statement is added.As soon as a number is found to be not prime, the program breaks out of loop 2 and goes to the next value of n in loop 1.By placing `else` *one level up* from `if` the program will iterate through all values of m before printing n if n is prime. | N = 55
# for loop 1
for n in range(2, N):
# for loop 2
for m in range(2, n):
if n % m == 0:
break
else:
# if n is prime
print(n) | 3
5
5
5
7
7
7
7
7
9
11
11
11
11
11
11
11
11
11
13
13
13
13
13
13
13
13
13
13
13
15
17
17
17
17
17
17
17
17
17
17
17
17
17
17
17
19
19
19
19
19
19
19
19
19
19
19
19
19
19
19
19
19
21
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
23
25
25
25
27
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
29
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
31
33
35
35
35
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
37
39
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
41
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
43
45
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
47
49
49
49
49
49
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
`continue`Sometimes, instead of stopping the loop we want to go to the next iteration in a loop, skipping the remaining code.For this we use `continue`. The example below loops over 20 numbers (0 to 19) and checks if the number is divisible by 4. If the number is not divisible by 4:- it prints a message - it moves to the next value. If the number is divisible by 4 it *continues* to the next value in the loop, without printing. | for j in range(1, 20):
if j % 4 == 0: # Check remainer of j/4
continue # continue to next value of j
print(j, "is not a multiple of 4") | 1 is not a multiple of 4
2 is not a multiple of 4
3 is not a multiple of 4
5 is not a multiple of 4
6 is not a multiple of 4
7 is not a multiple of 4
9 is not a multiple of 4
10 is not a multiple of 4
11 is not a multiple of 4
13 is not a multiple of 4
14 is not a multiple of 4
15 is not a multiple of 4
17 is not a multiple of 4
18 is not a multiple of 4
19 is not a multiple of 4
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
To compare, if we used `break` instead of `continue`: | for j in range(1, 20):
if j % 4 == 0: # Check remainer of j/4
break # continue to next value of j
print(j, "is not a multiple of 4") | 1 is not a multiple of 4
2 is not a multiple of 4
3 is not a multiple of 4
| MIT | 3_Control_flow.ipynb | konshte/Python_K |
__Try it yourself__We can use a `for` loop to perform an operation on each character of a string.```Pythonstring = "string"for i in range(len(sting)): print(sting[i])``` In the cell below, loop through the characters of the string.Use `continue` to only print the letters of the word *sting*. | # Print the letters of the word sting
string = "string" | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
Review ExercisesHere are a series of engineering problems for you to practise each of the new Python skills that you have learnt today. Review Exercise: `while` loops.In the cell below, write a while loop that with each loop:- prints the value of `x`- then decreases the value of x by 0.5as long as `x` remains positive.Jump to While Loops | x = 4
while x > 0:
print(x)
x -= 0.5
# Example Solution
while (x > 0):
print(x)
x -= 0.5 | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
Review Exercise: `for` loopsIn the cell below, write a `for` loop to print the even numbers from 2 to 100, inclusive. | # for loop to print the even numbers from 2 to 20, inclusive.
for n in range (2, 21):
print (n)
# Example Solution
for i in range(2, 21, 2):
print(i) | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
Review Excercise: `for` loops and `if` statementsIn the cell below, write a for loop to alternately print `Red` then `Blue` 3 times. i.e. Red Blue Red Blue Red Blue | # Alternately print Red and Blue
for n in range (1, 7):
if n % 2 == 0:
print("red")
elif
# Example Solution
colour = "Red"
for n in range(6):
print(colour)
if colour == "Red":
colour = "Blue"
else:
colour = "Red" | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
Review Exercise: `continue`In the cell below, loop through the characters of the string.Use `continue` to only print the letters of the word *sing*.Hint: Refer to __Logical Operators__ (Seminar 2). Jump to continue | # Print the letters of the word sing
string = "string"
# Example Solution
string = "string"
for i in range(len(string)):
if string[i] == "r" or string[i] == "t":
continue
print(string[i]) | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
Review Exercise: `for` loops and `if`, `else` and `continue` statements.__(A)__ In the cell below, use a for loop to print the square roots of the first 25 odd positive integers. (Remember, the square root of a number, $x$ can be found by $x^{1/2}$)__(B)__ If the number generated is greater than 3 and smaller than 5, print "`skip`" and __`continue`__ to the next iteration *without* printing the number.Hint: Refer to __Logical Operators__ (Seminar 2). Jump to for loopsJump to if and else statementsJump to continue | # square roots of the first 25 odd positive integers
# Example Solution
for x in range(1, 50, 2):
if((x ** (1/2) > 3) and (x ** (1/2) < 5)):
print("skip")
continue
print(x ** (1/2)) | _____no_output_____ | MIT | 3_Control_flow.ipynb | konshte/Python_K |
Prove that for integers $a,\;b,\;\dots$(1) $(a, b) = 1, \; c | a, \; d | a \implies (c, d) = 1$Suppose $(c, d) = e > 1$. Then $e | c$ and $c | a$ implies $e | a$; similarly $e | b$ so $(a, b) > 1$, a contradiction, and therefore $(c, d) = 1$. $\;\;\;\boxdot$(2) $(a, b) = (a, c) = 1 \implies (a, bc) = 1$(3) $(a, b) = 1 \implies (a^n, b^k) = 1 \; \; \forall \; \; n \ge 1, k \ge 1$(4) $(a, b) = 1 \implies (a + b, a - b) = 1 \; or \; 2$(5) $(a, b) = 1 \implies (a + b, a^2 - ab + b^2) = 1 \; or \; 3$(6) $(a, b) = 1, \; d|(a + b) \implies (a, d) = (b, d) = 1$ (7) A rational number $a/b$ with $(a, b) = 1$ is a *reduced fraction*. If the sum of two reduced fractions is an integer, say $(a/b) + (c/d) = n$, prove that $|b| = |d|$.(8) An integer is called *squarefree* if it is not divisible by the square of any prime. Prove that for every $n \ge 1$ there exist uniquely determined $a > 0$ and $b > 0$ such that $n=a^2b$, where $b$ is *squarefree*. ...(11) Prove that $n^4 + 4$ is composite if $n > 1$. ***Solution***I first tried cases for the ones-digit. For example $n$ even gives $n^4 + 4$ also even and $n$ ending in 1, 3, 7 or 9 gives $n^4 + 4$ ending in 5. However (particularly because the last case does not resolve in this manner) the right thing to try is factoring $n^4 + 4$ in some obvious way: Constants 1 and 4 or 2 and 2. $n^4 + 4 = (n^2 + a \cdot n + 2) (n^2 + b \cdot n + 2)$This gives $n^4 + b \cdot n^3 + 2 n^2 + a \cdot n^3 + a \cdot b \cdot n^2 + 2 \cdot a \cdot n + 2 n^2 + 2 \cdot b \cdot n + 4$$n^4 + 4$ plus stuff that needs to be zero: $(b + a)\cdot n^3 + (4 + a \cdot b)\cdot n^2 + (2 \cdot (a + b))\cdot n$This means $a = -b$ and $a \cdot b = -4$. Great: $a = 2$ and $b = -2$. $n^4 + 4 = (n^2 + 2n + 2)(n^2 - 2n + 2)\;\;\;\;\boxdot$ | def pf(n):
pfn, i = [], 2
while i * i < n:
while n%i == 0: pfn.append(i); n = n / i
i = i + 1
pfn.append(int(n))
return pfn
def npf(n): return len(pf(n))
def isprime(n):
if npf(n) == 1: return True
return False
for a in range(3):
s = a * 10 + 5
t = s*s*s*s + 4
if isprime(t): print(str(t) + ' is prime')
else: print(str(t) + ' factors are ' + str(pf(t))) | 629 factors are [17, 37]
50629 factors are [197, 257]
390629 factors are [577, 677]
| MIT | T1_chapter1_exercises.ipynb | robfatland/boojum |
... ...(20) Let $d = (826, 1890)$. Use the Euclidean algorithm to compute $d$, then express $d$ as a linear combination of 826 and 1890Solution$1890 = 826 \cdot 2 + 238$$826 = 238 \cdot 3 + 112$$238 = 112 \cdot 2 + 14$$112 = 14 \cdot 8 + 0$$d = 14$$d = u \cdot 826 + v \cdot 1890$ or equivalently $1 = u \cdot 59 + v \cdot 135$Taking $u$ positive it can take on values ${ 4, 9, 14, 19, \dots }$.*--a miracle occurs--*$(d = 14) = 254 \cdot 826 - 111 \cdot 1890$ | 254*826-111*1890 | _____no_output_____ | MIT | T1_chapter1_exercises.ipynb | robfatland/boojum |
Table of Contents1 Plot Validation and Train loss2 Extract relevant Data to df2.1 Get best result2.2 Consider Outliers3 Results by model3.1 Remove Duplicates4 Each variable plotted against loss:5 Investigate "band" in loss-model plot5.1 Extract the different bands and inpsect6 Investigate Duplicates7 Investigate Best | %load_ext autoreload
%autoreload 2
%cd ..
import os
import sys
from notebooks import utils
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
#import pipeline
# parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# sys.path.append(parent_dir) #to import pipeline
%ls experiments
###CHANGE THIS FILE TO THE SUBDIRECTORY OF INTEREST:
#exp_dirs = ["experiments/07b/", "experiments/DA3_2/07a/0", "experiments/DA3_2/07a/1"]
exp_dirs = ["experiments/retrain/"]
results = utils.extract_res_from_files(exp_dirs)
#load data when utils isnt working:
if False:
import pickle
res_fp = "experiments/results/ResNeXt/res.txt"
with open(res_fp, "rb") as f:
results = pickle.load(f) | _____no_output_____ | MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
Plot Validation and Train loss |
ylim = (0, 3000)
ylim2 = (70,100)
utils.plot_results_loss_epochs(results, ylim1=ylim, ylim2=ylim2)
| (2, 3)
| MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
Extract relevant Data to dfUse minimum validation loss as criterion. In theory (if we had it) it would be better to use DA MAE | df_res = utils.create_res_df(results)
df_res_original = df_res.copy() #save original (in case you substitute out)
df_res | _____no_output_____ | MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
Get best result | df_res["valid_loss"].idxmin()
print(df_res.loc[df_res["valid_loss"].idxmin()])
df_res.loc[df_res["valid_loss"].idxmin()]["path"]
| model CLIC
valid_loss 397.938
activation prelu
latent_dims ??
num_layers ??
total_channels None
channels/layer ??
augmentation 1
batch_norm 0
channels see model def
conv_changeover 10
dropout 0
first_channel e
learning_rate 0.0002
path experiments/DA3_2/07a/0
Name: 4, dtype: object
| MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
Consider Outliers | #consider third experiment run (lots of outliers)
df3 = df_res[df_res["path"].str.contains("CAE_zoo3")]
df_outlier = df_res[df_res["valid_loss"] > 150000]
df_outlier | _____no_output_____ | MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
Results by model | relu = df_res[df_res.activation == "relu"]
lrelu = df_res[df_res.activation == "lrelu"]
plt.scatter('model', "valid_loss", data=relu, marker="+", color='r')
plt.scatter('model', "valid_loss", data=lrelu, marker="+", color='g')
plt.ylabel("Loss")
plt.xlabel("Model")
plt.ylim(16000, 70000)
plt.legend(labels=["relu", "lrelu"])
plt.show()
#investigate number of layers
eps = 1e-5
reluNBN = df_res[(df_res.activation == "relu") & (abs(df_res.batch_norm - 0.) < eps)]
reluBN = df_res[(df_res.activation == "relu") & (abs(df_res.batch_norm - 1.) < eps)]
lreluNBN = df_res[(df_res.activation == "lrelu") & (abs(df_res.batch_norm - 0.0) < eps)]
lreluBN = df_res[(df_res.activation == "lrelu") & (abs(df_res.batch_norm - 1.) < eps)]
plt.scatter('model', "valid_loss", data=reluNBN, marker="+", color='r')
plt.scatter('model', "valid_loss", data=reluBN, marker="+", color='g')
plt.scatter('model', "valid_loss", data=lreluNBN, marker="o", color='r')
plt.scatter('model', "valid_loss", data=lreluBN, marker="o", color='g')
plt.ylabel("Loss")
plt.xlabel("Model")
plt.ylim(16000, 70000)
plt.legend(labels=["relu, NBN", "relu, BN", "lrelu, NBN", "lrelu, BN"])
plt.show() | _____no_output_____ | MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
It turns out that there are lots of duplicates in the above data (as a result of a bug in my code that was giving all models the same number of channels). So remove duplicates and go again: Remove Duplicates | #remove duplicates
columns = list(df_res_original.columns)
columns.remove("model")
columns.remove("path")
print(columns)
df_res_new = df_res_original.loc[df_res_original.astype(str).drop_duplicates(subset=columns, keep="last").index]
#df_res_new = df_res_original.drop_duplicates(subset=columns, keep="last")
df_res_new.shape
df_res = df_res_new
df_res.shape
##Plot same graph again:
#investigate number of layers
relu6 = df_res[(df_res.activation == "relu") & (df_res.num_layers == 6)]
relu11 = df_res[(df_res.activation == "relu") & (df_res.num_layers != 6)]
lrelu6 = df_res[(df_res.activation == "lrelu") & (df_res.num_layers == 6)]
lrelu11 = df_res[(df_res.activation == "lrelu") & (df_res.num_layers != 6)]
plt.scatter('model', "valid_loss", data=relu6, marker="+", color='r')
plt.scatter('model', "valid_loss", data=lrelu6, marker="+", color='g')
plt.scatter('model', "valid_loss", data=relu11, marker="o", color='r')
plt.scatter('model', "valid_loss", data=lrelu11, marker="o", color='g')
plt.ylabel("Loss")
plt.xlabel("Model")
plt.ylim(16000, 60000)
plt.legend(labels=["relu, 6", "lrelu, 6", "relu, not 6", "lrelu, not 6"])
plt.show() | _____no_output_____ | MIT | notebooks/.ipynb_checkpoints/CAE_zoo_analysis-checkpoint.ipynb | scheng1992/Data_Assimilation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.