input stringlengths 6 17.2k | output stringclasses 1 value | instruction stringclasses 1 value |
|---|---|---|
= '%Y-%m-%dT%H:%M:%S.%fZ'
time_str = datetime.strptime(row['time'], p)
del row['time']
else:
time_str = None
if 'model_ver' in row.keys():
self.tags['model_ver']= row[ | ||
_data import tabularDataReader
from mlac.timeseries.core.transformer import transformer as profiler
from mlac.timeseries.core.selector import selector
from mlac.timeseries.core.trainer import learner
from mlac.timeseries.core.register import register
from mlac.timeseries.core.deploy import deploy
from mlac.timeseries.core.drift_analysis import drift
from mlac.timeseries.core.functions import global_function
from mlac.timeseries.core.data_reader import data_reader
from mlac.timeseries.core.utility import utility_function
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
class input_drift():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ''
def addInputDriftClass(self):
text = "\\
\\nclass inputdrift():\\
\\n\\
\\n def __init__(self,base_config):\\
\\n self.usecase = base_config['modelName'] + '_' + base_config['modelVersion']\\
\\n self.currentDataLocation = base_config['currentDataLocation']\\
\\n home = Path.home()\\
\\n if platform.system() == 'Windows':\\
\\n from pathlib import WindowsPath\\
\\n output_data_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = WindowsPath(home)/'AppData'/'Local'/'HCLT'/'AION'/'target'/self.usecase\\
\\n else:\\
\\n from pathlib import PosixPath\\
\\n output_data_dir = PosixPath(home)/'HCLT'/'AION'/'Data'\\
\\n output_model_dir = PosixPath(home)/'HCLT'/'AION'/'target'/self.usecase\\
\\n if not output_model_dir.exists():\\
\\n raise ValueError(f'Configuration file not found at {output_model_dir}')\\
\\n\\
\\n tracking_uri = 'file:///' + str(Path(output_model_dir)/'mlruns')\\
\\n registry_uri = 'sqlite:///' + str(Path(output_model_dir)/'mlruns.db')\\
\\n mlflow.set_tracking_uri(tracking_uri)\\
\\n mlflow.set_registry_uri(registry_uri)\\
\\n client = mlflow.tracking.MlflowClient(\\
\\n tracking_uri=tracking_uri,\\
\\n registry_uri=registry_uri,\\
\\n )\\
\\n model_version_uri = 'models:/{model_name}/production'.format(model_name=self.usecase)\\
\\n model = mlflow.pyfunc.load_model(model_version_uri)\\
\\n run = client.get_run(model.metadata.run_id)\\
\\n if run.info.artifact_uri.startswith('file:'):\\
\\n artifact_path = Path(run.info.artifact_uri[len('file:///') : ])\\
\\n else:\\
\\n artifact_path = Path(run.info.artifact_uri)\\
\\n self.trainingDataPath = artifact_path/(self.usecase + '_data.csv')\\
\\n\\
\\n def get_input_drift(self,current_data, historical_data):\\
\\n curr_num_feat = current_data.select_dtypes(include='number')\\
\\n hist_num_feat = historical_data.select_dtypes(include='number')\\
\\n num_features = [feat for feat in historical_data.columns if feat in curr_num_feat]\\
\\n alert_count = 0\\
\\n data = {\\
\\n 'current':{'data':current_data},\\
\\n 'hist': {'data': historical_data}\\
\\n }\\
\\n dist_changed_columns = []\\
\\n dist_change_message = []\\
\\n for feature in num_features:\\
\\n curr_static_value = st.ks_2samp( hist_num_feat[feature], curr_num_feat[feature]).pvalue\\
\\n if (curr_static_value < 0.05):\\
\\n distribution = {}\\
\\n distribution['hist'] = self.DistributionFinder( historical_data[feature])\\
\\n distribution['curr'] = self.DistributionFinder( current_data[feature])\\
\\n if(distribution['hist']['name'] == distribution['curr']['name']):\\
\\n pass\\
\\n else:\\
\\n alert_count = alert_count + 1\\
\\n dist_changed_columns.append(feature)\\
\\n changed_column = {}\\
\\n changed_column['Feature'] = feature\\
\\n changed_column['KS_Training'] = curr_static_value\\
\\n changed_column['Training_Distribution'] = distribution['hist']['name']\\
\\n changed_column['New_Distribution'] = distribution['curr']['name']\\
\\n dist_change_message.append(changed_column)\\
\\n if alert_count:\\
\\n resultStatus = dist_change_message\\
\\n else :\\
\\n resultStatus='Model is working as expected'\\
\\n return(alert_count, resultStatus)\\
\\n\\
\\n def DistributionFinder(self,data):\\
\\n best_distribution =''\\
\\n best_sse =0.0\\
\\n if(data.dtype in ['int','int64']):\\
\\n distributions= {'bernoulli':{'algo':st.bernoulli},\\
\\n 'binom':{'algo':st.binom},\\
\\n 'geom':{'algo':st.geom},\\
\\n 'nbinom':{'algo':st.nbinom},\\
\\n 'poisson':{'algo':st.poisson}\\
\\n }\\
\\n index, counts = np.unique(data.astype(int),return_counts=True)\\
\\n if(len(index)>=2):\\
\\n best_sse = np.inf\\
\\n y1=[]\\
\\n total=sum(counts)\\
\\n mean=float(sum(index*counts))/total\\
\\n variance=float((sum(index**2*counts) -total*mean**2))/(total-1)\\
\\n dispersion=mean/float(variance)\\
\\n theta=1/float(dispersion)\\
\\n r=mean*(float(theta)/1-theta)\\
\\n\\
\\n for j in counts:\\
\\n y1.append(float(j)/total)\\
\\n distributions['bernoulli']['pmf'] = distributions['bernoulli']['algo'].pmf(index,mean)\\
\\n distributions['binom']['pmf'] = distributions['binom']['algo'].pmf(index,len(index),p=mean/len(index))\\
\\n distributions['geom']['pmf'] = distributions['geom']['algo'].pmf(index,1/float(1+mean))\\
\\n distributions['nbinom']['pmf'] = distributions['nbinom']['algo'].pmf(index,mean,r)\\
\\n distributions['poisson']['pmf'] = distributions['poisson']['algo'].pmf(index,mean)\\
\\n\\
\\n sselist = []\\
\\n for dist in distributions.keys():\\
\\n distributions[dist]['sess'] = np.sum(np.power(y1 - distributions[dist]['pmf'], 2.0))\\
\\n if np.isnan(distributions[dist]['sess']):\\
\\n distributions[dist]['sess'] = float('inf')\\
\\n best_dist = min(distributions, key=lambda v: distributions[v]['sess'])\\
\\n best_distribution = best_dist\\
\\n best_sse = distributions[best_dist]['sess']\\
\\n\\
\\n elif (len(index) == 1):\\
\\n best_distribution = 'Constant Data-No Distribution'\\
\\n best_sse = 0.0\\
\\n elif(data.dtype in ['float64','float32']):\\
\\n distributions = [st.uniform,st.expon,st.weibull_max,st.weibull_min,st.chi,st.norm,st.lognorm,st.t,st.gamma,st.beta]\\
\\n best_distribution = st.norm.name\\
\\n best_sse = np.inf\\
\\n nrange = data.max() - data.min()\\
\\n\\
\\n y, x = np.histogram(data.astype(float), bins='auto', density=True)\\
\\n x = (x + np.roll(x, -1))[:-1] / 2.0\\
\\n\\
\\n for distribution in distributions:\\
\\n with warnings.catch_warnings():\\
\\n warnings.filterwarnings('ignore')\\
\\n params = distribution.fit(data.astype(float))\\
\\n arg = params[:-2]\\
\\n loc = params[-2]\\
\\n scale = params[-1]\\
\\n pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)\\
\\n sse = np.sum(np.power(y - pdf, 2.0))\\
\\n if( sse < best_sse):\\
\\n best_distribution = distribution.name\\
\\n best_sse = sse\\
\\n\\
\\n return {'name':best_distribution, 'sse': best_sse}\\
\\n\\
"
return text
def addSuffixCode(self, indent=1):
text ="\\n\\
\\ndef check_drift( config):\\
\\n inputdriftObj = inputdrift(config)\\
\\n historicaldataFrame=pd.read_csv(inputdriftObj.trainingDataPath)\\
\\n currentdataFrame=pd.read_csv(inputdriftObj.currentDataLocation)\\
\\n dataalertcount,message = inputdriftObj.get_input_drift(currentdataFrame,historicaldataFrame)\\
\\n if message == 'Model is working as expected':\\
\\n output_json = {'status':'SUCCESS','data':{'Message':'Model is working as expected'}}\\
\\n else:\\
\\n output_json = {'status':'SUCCESS','data':{'Affected Columns':message}}\\
\\n return(output_json)\\
\\n\\
\\nif __name__ == '__main__':\\
\\n try:\\
\\n if len(sys.argv) < 2:\\
\\n raise ValueError('config file not present')\\
\\n config = sys.argv[1]\\
\\n if Path(config).is_file() and Path(config).suffix == '.json':\\
\\n with open(config, 'r') as f:\\
\\n config = json.load(f)\\
\\n else:\\
\\n config = json.loads(config)\\
\\n output = check_drift(config)\\
\\n status = {'Status':'Success','Message':output}\\
\\n print('input_drift:'+json.dumps(status))\\
\\n except Exception as e:\\
\\n status = {'Status':'Failure','Message':str(e)}\\
\\n print('input_drift:'+json.dumps(status))"
return text
def addStatement(self, statement, indent=1):
self.codeText += '\\n' + self.tab * indent + statement
def generateCode(self):
self.codeText += self.addInputDriftClass()
self.codeText += self.addSuffixCode()
def getCode(self):
return self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class drift():
def __init__(self, indent=0, tab_size=4):
self.tab = " "*tab_size
self.codeText = ""
self.function_code = ""
self.input_files = {}
self.output_files = {}
self.addInputFiles({'log' : 'aion.log', 'metaData' : 'modelMetaData.json'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def __addValidateConfigCode(self):
text = "\\n\\
\\ndef validateConfig():\\
\\n config_file = Path(__file__).parent/'config.json'\\
\\n if not Path(config_file).exists():\\
\\n raise ValueError(f'Config file is missing: {config_file}')\\
\\n config = utils.read_json(config_file)\\
\\n return config\\
"
return text
def addLocalFunctionsCode(self):
self.function_code += self.__addValidateConfigCode()
def addPrefixCode(self, smaller_is_better=False, indent=1):
self.codeText += """
def monitoring(config, targetPath, log):
retrain = False
last_run_id = 0
retrain_threshold = config.get('retrainThreshold', 100)
meta_data_file = | ||
targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
if not meta_data.get('register', None):
log.info('Last time Pipeline not executed properly')
retrain = True
else:
last_run_id = meta_data['register']['runId']
df = utils.read_data(config['dataLocation'])
df_len = len(df)
if not meta_data['monitoring'].get('endIndex', None):
meta_data['monitoring']['endIndex'] = int(meta_data['load_data']['Status']['Records'])
meta_data['monitoring']['endIndexTemp'] = meta_data['monitoring']['endIndex']
if meta_data['register'].get('registered', False):
meta_data['monitoring']['endIndex'] = meta_data['monitoring']['endIndexTemp']
meta_data['register']['registered'] = False #ack registery
if (meta_data['monitoring']['endIndex'] + retrain_threshold) < df_len:
meta_data['monitoring']['endIndexTemp'] = df_len
retrain = True
else:
log.info('Pipeline running first time')
meta_data = {}
meta_data['monitoring'] = {}
retrain = True
if retrain:
meta_data['monitoring']['runId'] = last_run_id + 1
meta_data['monitoring']['retrain'] = retrain
utils.write_json(meta_data, targetPath/IOFiles['metaData'])
status = {'Status':'Success','retrain': retrain, 'runId':meta_data['monitoring']['runId']}
log.info(f'output: {status}')
return json.dumps(status)
"""
def getMainCodeModules(self):
modules = [{'module':'Path', 'mod_from':'pathlib'}
,{'module':'pandas','mod_as':'pd'}
,{'module':'json'}
]
return modules
def addMainCode(self, indent=1):
self.codeText += """
if __name__ == '__main__':
config = validateConfig()
targetPath = Path('aion') / config['targetPath']
targetPath.mkdir(parents=True, exist_ok=True)
log_file = targetPath / IOFiles['log']
log = utils.logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
print(monitoring(config, targetPath, log))
except Exception as e:
status = {'Status': 'Failure', 'Message': str(e)}
print(json.dumps(status))
"""
def addStatement(self, statement, indent=1):
self.codeText += f"\\n{self.tab * indent}{statement}"
def getCode(self, indent=1):
return self.function_code + '\\n' + self.codeText
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import json
class deploy():
def __init__(self, tab_size=4):
self.tab = ' ' * tab_size
self.codeText = ""
self.input_files = {}
self.output_files = {}
self.addInputFiles({'metaData' : 'modelMetaData.json','log':'predict.log'})
def addInputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def addOutputFiles(self, files):
if not isinstance(files, dict):
raise TypeError(f"Required dict type got {type(files)} type")
for k,v in files.items():
self.input_files[k] = v
def getInputFiles(self):
text = 'IOFiles = '
if not self.input_files:
text += '{ }'
else:
text += json.dumps(self.input_files, indent=4)
return text
def getOutputFiles(self):
text = 'output_file = '
if not self.output_files:
text += '{ }'
else:
text += json.dumps(self.output_files, indent=4)
return text
def getInputOutputFiles(self, indent=0):
text = '\\n'
text += self.getInputFiles()
text += '\\n'
text += self.getOutputFiles()
if indent:
text = text.replace('\\n', self.tab * indent + '\\n')
return text
def addStatement(self, statement, indent=1):
pass
def getPredictionCodeModules(self):
modules = [{'module':'json'}
,{'module':'joblib'}
,{'module':'pandas', 'mod_as':'pd'}
,{'module':'numpy', 'mod_as':'np'}
,{'module':'Path', 'mod_from':'pathlib'}
,{'module':'json_normalize', 'mod_from':'pandas'}
,{'module':'load_model', 'mod_from':'tensorflow.keras.models'}
]
return modules
def addPredictionCode(self):
self.codeText += """
class deploy():
def __init__(self, base_config, log=None):
self.targetPath = (Path('aion') / base_config['targetPath']).resolve()
if log:
self.logger = log
else:
log_file = self.targetPath / IOFiles['log']
self.logger = logger(log_file, mode='a', logger_name=Path(__file__).parent.stem)
try:
self.initialize(base_config)
except Exception as e:
self.logger.error(e, exc_info=True)
def initialize(self, base_config):
targetPath = Path('aion') / base_config['targetPath']
meta_data_file = targetPath / IOFiles['metaData']
if meta_data_file.exists():
meta_data = utils.read_json(meta_data_file)
self.dateTimeFeature = meta_data['training']['dateTimeFeature']
self.targetFeature = meta_data['training']['target_feature']
normalization_file = meta_data['transformation']['Status']['Normalization_file']
self.normalizer = joblib.load(normalization_file)
self.lag_order = base_config['lag_order']
self.noofforecasts = base_config['noofforecasts']
run_id = str(meta_data['register']['runId'])
model_path = str(targetPath/'runs'/str(meta_data['register']['runId'])/meta_data['register']['model']/'model')
self.model = load_model(model_path)
self.model_name = meta_data['register']['model']
def predict(self, data=None):
try:
return self.__predict(data)
except Exception as e:
if self.logger:
self.logger.error(e, exc_info=True)
raise ValueError(json.dumps({'Status': 'Failure', 'Message': str(e)}))
def __predict(self, data=None):
jsonData = json.loads(data)
dataFrame = json_normalize(jsonData)
xtrain = dataFrame
if len(dataFrame) == 0:
raise ValueError('No data record found')
df_l = len(dataFrame)
pred_threshold = 0.1
max_pred_by_user = round((df_l) * pred_threshold)
# prediction for 24 steps or next 24 hours
if self.noofforecasts == -1:
self.noofforecasts = max_pred_by_user
no_of_prediction = self.noofforecasts
if (str(no_of_prediction) > str(max_pred_by_user)):
no_of_prediction = max_pred_by_user
noofforecasts = no_of_prediction
# self.sfeatures.remove(self.datetimeFeature)
features = self.targetFeature
if len(features) == 1:
xt = xtrain[features].values
else:
xt = xtrain[features].values
xt = xt.astype('float32')
xt = self.normalizer.transform(xt)
pred_data = xt
y_future = []
self.lag_order = int(self.lag_order)
for i in range(int(no_of_prediction)):
pdata = pred_data[-self.lag_order:]
if len(features) == 1:
pdata = pdata.reshape((1, self.lag_order))
else:
pdata = pdata.reshape((1, self.lag_order, len(features)))
if (len(features) > 1):
pred = self.model.predict(pdata)
predout = self.normalizer.inverse_transform(pred)
y_future.append(predout)
pred_data = np.append(pred_data, pred, axis=0)
else:
pred = self.model.predict(pdata)
predout = self.normalizer.inverse_transform(pred)
y_future.append(predout.flatten()[-1])
pred_data = np.append(pred_data, pred)
pred = pd.DataFrame(index=range(0, len(y_future)), columns=self.targetFeature)
for i in range(0, len(y_future)):
pred.iloc[i] = y_future[i]
predictions = pred
forecast_output = predictions.to_json(orient='records')
return forecast_output
"""
def getCode(self):
return self.codeText
def getServiceCode(self):
return """
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
import os
from os.path import expanduser
import platform
import threading
import subprocess
import argparse
import re
import cgi
import json
import shutil
import logging
import sys
import time
import seaborn as sns
from pathlib import Path
from predict import deploy
import pandas as pd
import scipy.stats as st
import numpy as np
import warnings
from utility import *
warnings.filterwarnings("ignore")
config_input = None
IOFiles = {
"inputData": "rawData.dat",
"metaData": "modelMetaData.json",
"production": "production.json",
"log": "aion.log",
"monitoring":"monitoring.json",
"prodData": "prodData",
"prodDataGT":"prodDataGT"
}
def DistributionFinder(data):
try:
distributionName = ""
sse = 0.0
KStestStatic = 0.0
dataType = ""
if (data.dtype == "float64" or data.dtype == "float32"):
dataType = "Continuous"
elif (data.dtype == "int"):
dataType = "Discrete"
elif (data.dtype == "int64"):
dataType = "Discrete"
if (dataType == "Discrete"):
distributions = [st.bernoulli, st.binom, st.geom, st.nbinom, st.poisson]
index, counts = np.unique(data.astype(int), return_counts=True)
if (len(index) >= 2):
best_sse = np.inf
y1 = []
total = sum(counts)
mean = float(sum(index * counts)) / total
variance = float((sum(index ** 2 * counts) - total * mean ** 2)) / (total - 1)
dispersion = mean / float(variance)
theta = 1 / float(dispersion)
r = mean * (float(theta) / 1 - theta)
for j in counts:
y1.append(float(j) / total)
pmf1 = st.bernoulli.pmf(index, mean)
pmf2 = st.binom.pmf(index, len(index), p=mean / len(index))
pmf3 = st.geom.pmf(index, 1 / float(1 + mean))
pmf4 = st.nbinom.pmf(index, mean, r)
pmf5 = st.poisson.pmf(index, mean)
sse1 = np.sum(np.power(y1 - pmf1, 2.0))
sse2 = np.sum(np.power(y1 - pmf2, 2.0))
sse3 = np.sum(np.power(y1 - pmf3, 2.0))
sse4 = np.sum(np.power(y1 - pmf4, 2.0))
sse5 = np.sum(np.power(y1 - pmf5, 2.0))
sselist = [sse1, sse2, sse3, sse4, sse5]
best_distribution = 'NA'
for i in range(0, len(sselist)):
if best_sse > sselist[i] > 0:
best_distribution = distributions[i].name
best_sse = sselist[i]
elif (len(index) == 1):
best_distribution = "Constant Data-No Distribution"
best_sse = 0.0
distributionName = best_distribution
sse = best_sse
elif (dataType == "Continuous"):
distributions = [st.uniform, st.expon, st.weibull_max, st.weibull_min, st.chi, st.norm, st.lognorm, st.t,
st.gamma, st.beta]
best_distribution = st.norm.name
best_sse = np.inf
datamin = data.min()
datamax = data.max()
nrange = datamax - datamin
y, x = np.histogram(data.astype(float), bins='auto', density=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
for distribution in distributions:
params = distribution.fit(data.astype(float))
arg = params[:-2]
loc = params[-2]
scale = params[-1]
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
if (best_sse > sse > 0):
best_distribution = distribution.name
best_sse = sse
distributionName = best_distribution
sse = best_sse
except:
response = str(sys.exc_info()[0])
message = 'Job has Failed' + response
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
print(str(exc_type) + ' ' + str(fname) + ' ' + str(exc_tb. | ||
tb_lineno))
print(message)
return distributionName, sse
def getDriftDistribution(feature, dataframe, newdataframe=pd.DataFrame()):
import matplotlib.pyplot as plt
import math
import io, base64, urllib
np.seterr(divide='ignore', invalid='ignore')
try:
plt.clf()
except:
pass
plt.rcParams.update({'figure.max_open_warning': 0})
sns.set(color_codes=True)
pandasNumericDtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
if len(feature) > 4:
numneroffeatures = len(feature)
plt.figure(figsize=(10, numneroffeatures*2))
else:
plt.figure(figsize=(10,5))
for i in enumerate(feature):
dataType = dataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
dataframe[i[1]] = pd.Categorical(dataframe[i[1]])
dataframe[i[1]] = dataframe[i[1]].cat.codes
dataframe[i[1]] = dataframe[i[1]].astype(int)
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mode()[0])
else:
dataframe[i[1]] = dataframe[i[1]].fillna(dataframe[i[1]].mean())
plt.subplots_adjust(hspace=0.5, wspace=0.7, top=1)
plt.subplot(math.ceil((len(feature) / 2)), 2, i[0] + 1)
distname, sse = DistributionFinder(dataframe[i[1]])
print(distname)
ax = sns.distplot(dataframe[i[1]], label=distname)
ax.legend(loc='best')
if newdataframe.empty == False:
dataType = newdataframe[i[1]].dtypes
if dataType not in pandasNumericDtypes:
newdataframe[i[1]] = pd.Categorical(newdataframe[i[1]])
newdataframe[i[1]] = newdataframe[i[1]].cat.codes
newdataframe[i[1]] = newdataframe[i[1]].astype(int)
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mode()[0])
else:
newdataframe[i[1]] = newdataframe[i[1]].fillna(newdataframe[i[1]].mean())
distname, sse = DistributionFinder(newdataframe[i[1]])
print(distname)
ax = sns.distplot(newdataframe[i[1]],label=distname)
ax.legend(loc='best')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return uri
def read_json(file_path):
data = None
with open(file_path,'r') as f:
data = json.load(f)
return data
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
if ctype == 'application/json':
length = int(self.headers.get('content-length'))
data = self.rfile.read(length)
usecase = self.path.split('/')[-2]
if usecase.lower() == config_input['targetPath'].lower():
operation = self.path.split('/')[-1]
data = json.loads(data)
dataStr = json.dumps(data)
if operation.lower() == 'predict':
output=deployobj.predict(dataStr)
resp = output
elif operation.lower() == 'groundtruth':
gtObj = groundtruth(config_input)
output = gtObj.actual(dataStr)
resp = output
elif operation.lower() == 'delete':
targetPath = Path('aion')/config_input['targetPath']
for file in data:
x = targetPath/file
if x.exists():
os.remove(x)
resp = json.dumps({'Status':'Success'})
else:
outputStr = json.dumps({'Status':'Error','Msg':'Operation not supported'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'Error','Msg':'Wrong URL'})
resp = outputStr
else:
outputStr = json.dumps({'Status':'ERROR','Msg':'Content-Type Not Present'})
resp = outputStr
resp=resp+'\\\\n'
resp=resp.encode()
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(resp)
else:
print('python ==> else1')
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
print('PYTHON ######## REQUEST ####### ENDED')
return
def do_GET(self):
print('PYTHON ######## REQUEST ####### STARTED')
if None != re.search('/AION/', self.path) or None != re.search('/aion/', self.path):
usecase = self.path.split('/')[-2]
self.send_response(200)
self.targetPath = Path('aion')/config_input['targetPath']
meta_data_file = self.targetPath/IOFiles['metaData']
if meta_data_file.exists():
meta_data = read_json(meta_data_file)
else:
raise ValueError(f'Configuration file not found: {meta_data_file}')
production_file = self.targetPath/IOFiles['production']
if production_file.exists():
production_data = read_json(production_file)
else:
raise ValueError(f'Production Details not found: {production_file}')
operation = self.path.split('/')[-1]
if (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'metrices'):
self.send_header('Content-Type', 'text/html')
self.end_headers()
ModelString = production_data['Model']
ModelPerformance = ModelString+'_performance.json'
performance_file = self.targetPath/ModelPerformance
if performance_file.exists():
performance_data = read_json(performance_file)
else:
raise ValueError(f'Production Details not found: {performance_data}')
Scoring_Creteria = performance_data['scoring_criteria']
train_score = round(performance_data['metrices']['train_score'],2)
test_score = round(performance_data['metrices']['test_score'],2)
current_score = 'NA'
monitoring = read_json(self.targetPath/IOFiles['monitoring'])
reader = dataReader(reader_type=monitoring['prod_db_type'],target_path=self.targetPath, config=monitoring['db_config'])
inputDatafile = self.targetPath/IOFiles['inputData']
NoOfPrediction = 0
NoOfGroundTruth = 0
inputdistribution = ''
if reader.file_exists(IOFiles['prodData']):
dfPredict = reader.read(IOFiles['prodData'])
dfinput = pd.read_csv(inputDatafile)
features = meta_data['training']['features']
inputdistribution = getDriftDistribution(features,dfinput,dfPredict)
NoOfPrediction = len(dfPredict)
if reader.file_exists(IOFiles['prodDataGT']):
dfGroundTruth = reader.read(IOFiles['prodDataGT'])
NoOfGroundTruth = len(dfGroundTruth)
common_col = [k for k in dfPredict.columns.tolist() if k in dfGroundTruth.columns.tolist()]
proddataDF = pd.merge(dfPredict, dfGroundTruth, on =common_col,how = 'inner')
if Scoring_Creteria.lower() == 'accuracy':
from sklearn.metrics import accuracy_score
current_score = accuracy_score(proddataDF[config_input['target_feature']], proddataDF['prediction'])
current_score = round((current_score*100),2)
elif Scoring_Creteria.lower() == 'recall':
from sklearn.metrics import accuracy_score
current_score = recall_score(proddataDF[config_input['target_feature']], proddataDF['prediction'],average='macro')
current_score = round((current_score*100),2)
msg = \\"""<html>
<head>
<title>Performance Details</title>
</head>
<style>
table, th, td {border}
</style>
<body>
<h2><b>Deployed Model:</b>{ModelString}</h2>
<br/>
<table style="width:50%">
<tr>
<td>No of Prediction</td>
<td>{NoOfPrediction}</td>
</tr>
<tr>
<td>No of GroundTruth</td>
<td>{NoOfGroundTruth}</td>
</tr>
</table>
<br/>
<table style="width:100%">
<tr>
<th>Score Type</th>
<th>Train Score</th>
<th>Test Score</th>
<th>Production Score</th>
</tr>
<tr>
<td>{Scoring_Creteria}</td>
<td>{train_score}</td>
<td>{test_score}</td>
<td>{current_score}</td>
</tr>
</table>
<br/>
<br/>
<img src="data:image/png;base64,{newDataDrift}" alt="" >
</body>
</html>
\\""".format(border='{border: 1px solid black;}',ModelString=ModelString,Scoring_Creteria=Scoring_Creteria,NoOfPrediction=NoOfPrediction,NoOfGroundTruth=NoOfGroundTruth,train_score=train_score,test_score=test_score,current_score=current_score,newDataDrift=inputdistribution)
elif (usecase.lower() == config_input['targetPath'].lower()) and (operation.lower() == 'logs'):
self.send_header('Content-Type', 'text/plain')
self.end_headers()
log_file = self.targetPath/IOFiles['log']
if log_file.exists():
with open(log_file) as f:
msg = f.read()
f.close()
else:
raise ValueError(f'Log Details not found: {log_file}')
else:
self.send_header('Content-Type', 'application/json')
self.end_headers()
features = meta_data['load_data']['selected_features']
bodydes='['
for x in features:
if bodydes != '[':
bodydes = bodydes+','
bodydes = bodydes+'{"'+x+'":"value"}'
bodydes+=']'
urltext = '/AION/'+config_input['targetPath']+'/predict'
urltextgth='/AION/'+config_input['targetPath']+'/groundtruth'
urltextproduction='/AION/'+config_input['targetPath']+'/metrices'
msg=\\"""
Version:{modelversion}
RunNo: {runNo}
URL for Prediction
==================
URL:{url}
RequestType: POST
Content-Type=application/json
Body: {displaymsg}
Output: prediction,probability(if Applicable),remarks corresponding to each row.
URL for GroundTruth
===================
URL:{urltextgth}
RequestType: POST
Content-Type=application/json
Note: Make Sure that one feature (ID) should be unique in both predict and groundtruth. Otherwise outputdrift will not work
URL for Model In Production Analysis
====================================
URL:{urltextproduction}
RequestType: GET
Content-Type=application/json
\\""".format(modelversion=config_input['modelVersion'],runNo=config_input['deployedRunNo'],url=urltext,urltextgth=urltextgth,urltextproduction=urltextproduction,displaymsg=bodydes)
self.wfile.write(msg.encode())
else:
self.send_response(403)
self.send_header('Content-Type', 'application/json')
self.end_headers()
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
allow_reuse_address = True
def shutdown(self):
self.socket.close()
HTTPServer.shutdown(self)
class file_status():
def __init__(self, reload_function, params, file, logger):
self.files_status = {}
self.initializeFileStatus(file)
self. | ||
reload_function = reload_function
self.params = params
self.logger = logger
def initializeFileStatus(self, file):
self.files_status = {'path': file, 'time':file.stat().st_mtime}
def is_file_changed(self):
if self.files_status['path'].stat().st_mtime > self.files_status['time']:
self.files_status['time'] = self.files_status['path'].stat().st_mtime
return True
return False
def run(self):
global config_input
while( True):
time.sleep(30)
if self.is_file_changed():
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config_input = read_json(config_file)
config_input['deployedModel'] = productionmodel['Model']
config_input['deployedRunNo'] = productionmodel['runNo']
self.logger.info('Model changed Reloading.....')
self.logger.info(f'Model: {config_input["deployedModel"]}')
self.logger.info(f'Version: {str(config_input["modelVersion"])}')
self.logger.info(f'runNo: {str(config_input["deployedRunNo"])}')
self.reload_function(config_input)
class SimpleHttpServer():
def __init__(self, ip, port, model_file_path,reload_function,params, logger):
self.server = ThreadedHTTPServer((ip,port), HTTPRequestHandler)
self.status_checker = file_status( reload_function, params, model_file_path, logger)
def start(self):
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
self.status_thread = threading.Thread(target=self.status_checker.run)
self.status_thread.start()
def waitForThread(self):
self.server_thread.join()
self.status_thread.join()
def stop(self):
self.server.shutdown()
self.waitForThread()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='HTTP Server')
parser.add_argument('-ip','--ipAddress', help='HTTP Server IP')
parser.add_argument('-pn','--portNo', type=int, help='Listening port for HTTP Server')
args = parser.parse_args()
config_file = Path(__file__).parent/'config.json'
if not Path(config_file).exists():
raise ValueError(f'Config file is missing: {config_file}')
config = read_json(config_file)
if args.ipAddress:
config['ipAddress'] = args.ipAddress
if args.portNo:
config['portNo'] = args.portNo
targetPath = Path('aion')/config['targetPath']
if not targetPath.exists():
raise ValueError(f'targetPath does not exist')
production_details = targetPath/IOFiles['production']
if not production_details.exists():
raise ValueError(f'Model in production details does not exist')
productionmodel = read_json(production_details)
config['deployedModel'] = productionmodel['Model']
config['deployedRunNo'] = productionmodel['runNo']
#server = SimpleHttpServer(config['ipAddress'],int(config['portNo']))
config_input = config
logging.basicConfig(filename= Path(targetPath)/IOFiles['log'], filemode='a', format='%(asctime)s %(name)s- %(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger(Path(__file__).parent.name)
deployobj = deploy(config_input, logger)
server = SimpleHttpServer(config['ipAddress'],int(config['portNo']),targetPath/IOFiles['production'],deployobj.initialize,config_input, logger)
logger.info('HTTP Server Running...........')
logger.info(f"IP Address: {config['ipAddress']}")
logger.info(f"Port No.: {config['portNo']}")
print('HTTP Server Running...........')
print('For Prediction')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/predict')
print('\\\\nFor GroundTruth')
print('================')
print('Request Type: Post')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/groundtruth')
print('\\\\nFor Help')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/help')
print('\\\\nFor Model In Production Analysis')
print('================')
print('Request Type: Get')
print('Content-Type: application/json')
print('URL: /AION/'+config['targetPath']+'/metrices')
server.start()
server.waitForThread()
"""<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
output_file_map = {
'text' : {'text' : 'text_profiler.pkl'},
'targetEncoder' : {'targetEncoder' : 'targetEncoder.pkl'},
'featureEncoder' : {'featureEncoder' : 'inputEncoder.pkl'},
'normalizer' : {'normalizer' : 'normalizer.pkl'}
}
def add_common_imports(importer):
common_importes = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
for mod in common_importes:
importer.addModule(mod['module'], mod_from=mod['mod_from'], mod_as=mod['mod_as'])
def get_transformer_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","text_features","profiler","test_ratio","dateTimeFeature"] #BugID:13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_transformer(config):
transformer = profiler()
importer = importModule()
function = global_function()
importModules(importer, transformer.getPrefixModules())
importer.addModule('warnings')
transformer.addPrefixCode()
importModules(importer, transformer.getMainCodeModules())
transformer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataTransformation'
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('transformer')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += "\\nimport os\\nos.path.abspath(os.path.join(__file__, os.pardir))\\n" #chdir to import from current dir
code += importer.getCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += transformer.getInputOutputFiles()
code += function.getCode()
transformer.addLocalFunctionsCode()
code += transformer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_transformer_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('transformer', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
output_file_map = {
'feature_reducer' : {'feature_reducer' : 'feature_reducer.pkl'}
}
def get_selector_params(config):
param_keys = ["modelVersion","problem_type","target_feature","train_features","cat_features","n_components"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_selector(config):
select = selector()
importer = importModule()
function = global_function()
importModules(importer,select.getPrefixModules())
importModules(importer, select.getSuffixModules())
importModules(importer, select.getMainCodeModules())
select.addPrefixCode()
select.addSuffixCode()
select.addMainCode()
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'FeatureEngineering'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('selector')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += select.getInputOutputFiles()
code += function.getCode()
select.addLocalFunctionsCode()
code += select.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_selector_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('selector', deploy_path,config['modelName'], generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from mlac.timeseries.app import utility as utils
def get_model_name(algo, method):
if method == 'modelBased':
return algo + '_' + 'MLBased'
if method == 'statisticalBased':
return algo + '_' + 'StatisticsBased'
else:
return algo
def get_training_params(config, algo):
param_keys = ["modelVersion","problem_type","target_feature","train_features","scoring_criteria","test_ratio","optimization_param","dateTimeFeature"]#BugID:13217
data = {key:value for (key,value) in config.items() if key in param_keys}
data['algorithms'] = {algo: config['algorithms'][algo]}
data['targetPath'] = config['modelName']
return data
def update_score_comparer(scorer):
smaller_is_better_scorer = ['neg_mean_squared_error','mse','neg_root_mean_squared_error','rmse','neg_mean_absolute_error','mae']
if scorer | ||
.lower() in smaller_is_better_scorer:
utils.update_variable('smaller_is_better', True)
else:
utils.update_variable('smaller_is_better', False)
def run_trainer(config):
trainer = learner()
importer = importModule()
function = global_function()
utils.importModules(importer,trainer.getPrefixModules())
update_score_comparer(config['scoring_criteria'])
model_name = list(config['algorithms'].keys())[0]
if model_name == 'MLP':
utils.importModules(importer,trainer.getMlpCodeModules())
trainer.addMlpCode()
elif model_name == 'LSTM':
utils.importModules(importer,trainer.getLstmCodeModules())
trainer.addLstmCode()
trainer.addMainCode()
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/('ModelTraining'+'_' + model_name)
deploy_path.mkdir(parents=True, exist_ok=True)
generated_files = []
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('train')
with open(deploy_path/"utility.py", 'w') as f:
f.write(utils.file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(utils.file_header(usecase))
generated_files.append("__init__.py")
importer.addModule("warnings")
code = importer.getCode()
code += 'warnings.filterwarnings("ignore")\\n'
code += f"\\nmodel_name = '{model_name}'\\n"
utils.append_variable('models_name',model_name)
out_files = {'log':f'{model_name}_aion.log','model':f'{model_name}_model.pkl','metrics':'metrics.json','metaDataOutput':f'{model_name}_modelMetaData.json','production':'production.json'}
trainer.addOutputFiles(out_files)
code += trainer.getInputOutputFiles()
code += function.getCode()
trainer.addLocalFunctionsCode()
code += trainer.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
with open (deploy_path/"config.json", "w") as f:
json.dump(get_training_params(config, model_name), f, indent=4)
generated_files.append("config.json")
utils.create_docker_file('train', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
import platform
from mlac.timeseries.core import *
from .utility import *
imported_modules = [
{'module': 'json', 'mod_from': None, 'mod_as': None},
{'module': 'Path', 'mod_from': 'pathlib', 'mod_as': None},
{'module': 'pandas', 'mod_from': None, 'mod_as': 'pd'},
{'module': 'argparse', 'mod_from': None, 'mod_as': None},
{'module': 'platform', 'mod_from': None, 'mod_as': None }
]
def get_load_data_params(config):
param_keys = ["modelVersion","problem_type","target_feature","selected_features","dateTimeFeature","dataLocation"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
return data
def run_loader(config):
generated_files = []
importer = importModule()
loader = tabularDataReader()
importModules(importer, imported_modules)
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'DataIngestion'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('*', mod_from='utility')
utility_obj = utility_function('load_data')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create the production data reader file
importer.addLocalModule('dataReader', mod_from='data_reader')
readers = ['sqlite','influx']
if 's3' in config.keys():
readers.append('s3')
reader_obj = data_reader(readers)
with open(deploy_path/"data_reader.py", 'w') as f:
f.write(file_header(usecase) + reader_obj.get_code())
generated_files.append("data_reader.py")
# create empty init file to make a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = file_header(usecase)
code += importer.getCode()
code += loader.getInputOutputFiles()
loader.addLocalFunctionsCode()
loader.addLoadDataCode()
loader.addMainCode()
code += loader.getCode()
with open(deploy_path/"aionCode.py", "w") as f:
f.write(code)
generated_files.append("aionCode.py")
with open(deploy_path/"requirements.txt", "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer(), reader_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
config_file = deploy_path/"config.json"
config_data = get_load_data_params(config)
with open (config_file, "w") as f:
json.dump(config_data, f, indent=4)
generated_files.append("config.json")
create_docker_file('load_data', deploy_path,config['modelName'],generated_files)<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
from pathlib import Path
import json
from mlac.timeseries.core import *
from .utility import *
def get_register_params(config, models):
param_keys = ["modelVersion","problem_type"]
data = {key:value for (key,value) in config.items() if key in param_keys}
data['targetPath'] = config['modelName']
data['models'] = models
return data
def run_register(config):
importer = importModule()
registration = register(importer)
models = get_variable('models_name')
smaller_is_better = get_variable('smaller_is_better', False)
registration.addLocalFunctionsCode(models)
registration.addPrefixCode(smaller_is_better)
registration.addMainCode(models)
importModules(importer, registration.getMainCodeModules())
importer.addModule('warnings')
generated_files = []
usecase = config['modelName']+'_'+config['modelVersion']
deploy_path = Path(config["deploy_path"])/'MLaC'/'ModelRegistry'
deploy_path.mkdir(parents=True, exist_ok=True)
# create the utility file
importer.addLocalModule('utility', mod_as='utils')
utility_obj = utility_function('register')
with open(deploy_path/"utility.py", 'w') as f:
f.write(file_header(usecase) + utility_obj.get_code())
generated_files.append("utility.py")
# create empty init file required for creating a package
with open(deploy_path/"__init__.py", 'w') as f:
f.write(file_header(usecase))
generated_files.append("__init__.py")
code = importer.getCode()
code += '\\nwarnings.filterwarnings("ignore")\\n'
code += registration.getInputOutputFiles()
code += registration.getCode()
# create serving file
with open(deploy_path/"aionCode.py", 'w') as f:
f.write(file_header(usecase) + code)
generated_files.append("aionCode.py")
# create requirements file
req_file = deploy_path/"requirements.txt"
with open(req_file, "w") as f:
req=importer.getBaseModule(extra_importers=[utility_obj.get_importer()])
f.write(req)
generated_files.append("requirements.txt")
# create config file
with open (deploy_path/"config.json", "w") as f:
json.dump(get_register_params(config, models), f, indent=4)
generated_files.append("config.json")
# create docker file
create_docker_file('register', deploy_path,config['modelName'], generated_files)
<s> """
/**
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* © Copyright HCL Technologies Ltd. 2021, 2022
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*/
"""
import datetime
from pathlib import Path
variables = {}
def update_variable(name, value):
variables[name] = value
def get_variable(name, default=None):
return variables.get(name, default)
def append_variable(name, value):
data = get_variable(name)
if not data:
update_variable(name, [value])
elif not isinstance(data, list):
update_variable(name, [data, value])
else:
data.append(value)
update_variable(name, data)
def addDropFeature(feature, features_list, coder, indent=1):
coder.addStatement(f'if {feature} in {features_list}:', indent=indent)
coder.addStatement(f'{features_list}.remove({feature})', indent=indent+1)
def importModules(importer, modules_list):
for module in modules_list:
mod_from = module.get('mod_from',None)
mod_as = module.get('mod_as',None)
importer.addModule(module['module'], mod_from=mod_from, mod_as=mod_as)
def file_header(use_case, module_name=None):
time_str = datetime.datetime.now().isoformat(timespec='seconds', sep=' ')
text = "#!/usr/bin/env python\\n# -*- coding: utf-8 -*-\\n"
return text + f"'''\\nThis file is automatically generated by AION for {use_case} usecase.\\nFile generation time: {time_str}\\n'''"
def get_module_mapping(module):
mapping = {
"LogisticRegression": {'module':'LogisticRegression', 'mod_from':'sklearn.linear_model'}
,"GaussianNB": {'module':'GaussianNB', 'mod_from':'sklearn.naive_bayes'}
,"DecisionTreeClassifier": {'module':'DecisionTreeClassifier', 'mod_from':'sklearn.tree'}
,"SVC": {'module':'SVC', 'mod_from':'sklearn.svm'}
,"KNeighborsClassifier": {'module':'KNeighborsClassifier', 'mod_from':'sklearn.neighbors'}
,"GradientBoostingClassifier": {'module':'GradientBoostingClassifier', 'mod_from':'sklearn.ensemble'}
,'RandomForestClassifier':{'module':'RandomForestClassifier','mod_from':'sklearn.ensemble'}
,'XGBClassifier':{'module':'XGBClassifier','mod_from':'xgboost'}
,'LGBMClassifier':{'module':'LGBMClassifier','mod_from':'lightgbm'}
,'CatBoostClassifier':{'module':'CatBoostClassifier','mod_from':'catboost'}
,"LinearRegression": {'module':'LinearRegression', 'mod_from':'sklearn.linear_model'}
,"Lasso": {'module':'Lasso', 'mod_from':'sklearn.linear_model'}
,"Ridge": {'module':'Ridge', 'mod_from':'sklearn.linear_model'}
,"DecisionTreeRegressor": {'module':'DecisionTreeRegressor', 'mod_from':'sklearn.tree'}
,'RandomForestRegressor':{'module':'RandomForestRegressor','mod_from':'sklearn.ensemble'}
,'XGBRegressor':{'module':'XGBRegressor','mod_from':'xgboost'}
,'LGBMRegressor':{'module':'LGBMRegressor','mod_from':'lightgbm'}
,'CatBoostRegressor':{'module':'CatBoostRegressor','mod_from':'catboost'}
}
return mapping.get(module, None)
def create_docker_file(name, path,usecasename,files=[],text_feature=False):
text = ""
if name == 'load_data':
text='FROM python:3.8-slim-buster'
text+='\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
for file in files:
text+=f'\\nCOPY {file} {file}'
text+='\\n'
text+='RUN pip install --no-cache-dir -r requirements.txt'
elif name == 'transformer':
text='FROM python:3.8-slim-buster\\n'
text+='LABEL "usecase"="'+str(usecasename)+'"'
text | ||
+='\\n'
text+='LABEL "usecase_test"="'+str(usecasename)+'_test'+'"'
text+='\\n'
for file in files:
text+=f'\\nCOPY {file} {file}'
if text_feature:
text+='COPY AIX-0.1-py3-none-any.whl AIX-0.1-py3 | ||
_names = {}
encoders = {}
dataFrame = dataFrame.replace('Unknown', 'NA')
dataFrame = dataFrame.replace(np.nan, 'NA')
try:
# Label-Encoding
for feature in dataFrame.columns:
le = LabelEncoder()
le.fit(data_encoded[feature])
data_encoded[feature] = le.transform(data_encoded[feature])
categorical_names[feature] = le.classes_
encoders[feature] = le
privileged_class = np.where(categorical_names[protected_feature] == privileged_className)[0]
target_feature_count = len(data_encoded[target_feature].value_counts())
# Check if it's BinaryLabel
if target_feature_count == 2:
binaryLabelDataset = aif360.datasets.BinaryLabelDataset(
favorable_label='1',
unfavorable_label='0',
df=data_encoded,
label_names=[target_feature],
protected_attribute_names=[protected_feature])
data_orig = binaryLabelDataset
# Check if it's Non-BinaryLabel
if target_feature_count > 2:
data_orig = StandardDataset(data_encoded,
label_name=target_feature,
favorable_classes=[1],
protected_attribute_names=[protected_feature],
privileged_classes=[privileged_class])
if algorithm == 'DIR':
DIR = DisparateImpactRemover(repair_level=0.9)
data_transf_train = DIR.fit_transform(data_orig)
# log.info('Status:-|... DIR applied on input dataset')
else:
privileged_groups, unprivileged_groups = self.get_attributes(data_orig, selected_attr=[protected_feature])
RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
data_transf_train = RW.fit_transform(data_orig)
# log.info('Status:-|... Reweighing applied on input dataset')
transf_dataFrame = data_transf_train.convert_to_dataframe()[0]
data_decoded = transf_dataFrame.copy().astype('int')
for column in data_decoded.columns:
data_decoded[column] = encoders[column].inverse_transform(data_decoded[column])
debiased_dataFrame = data_decoded
except Exception as e:
print(e)
debiased_dataFrame = dataFrame
return debiased_dataFrame
<s><s> import warnings
import sys
warnings.simplefilter(action='ignore', category=FutureWarning)
import xgboost as xgb
import dask.array as da
import shutil
import dask.distributed
import dask.dataframe as dd
import dask_ml
import logging
from sklearn.metrics import accuracy_score, recall_score, \\
roc_auc_score, precision_score, f1_score, \\
mean_squared_error, mean_absolute_error, \\
r2_score, classification_report, confusion_matrix, \\
mean_absolute_percentage_error
import lightgbm as lgb
import re
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
from dask_ml.impute import SimpleImputer
from dask_ml.compose import ColumnTransformer
from dask_ml.decomposition import TruncatedSVD, PCA
from dask_ml.preprocessing import StandardScaler, \\
MinMaxScaler, \\
OneHotEncoder, LabelEncoder
from dask_ml.wrappers import ParallelPostFit
import numpy as np
import json
import time
from sklearn.ensemble import IsolationForest
import joblib
import pickle as pkl
import os
predict_config={}
dask.config.set({"distributed.workers.memory.terminate": 0.99})
dask.config.set({"array.chunk-size": "128 MiB"})
dask.config.set({"distributed.admin.tick.limit": "3h"})
# dask.config.set({"distributed.workers.memory.pause": 0.9})
class MinImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# to_fillna = ['public_meeting', 'scheme_management', 'permit']
# X[to_fillna] = X[to_fillna].fillna(value='NaN')
# X[to_fillna] = X[to_fillna].astype(str)
X = X.fillna(value=X.min())
# X = X.astype(str)
return X
class MaxImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.fillna(value=X.max())
return X
class DropImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.dropna()
return X
class ModeCategoricalImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X.fillna(value=X.mode())
return X
class IsoForestOutlierExtractor(TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X, y):
lcf = IsolationForest()
with joblib.parallel_backend('dask'):
lcf.fit(X)
y_pred_train = lcf.predict(X)
y_pred_train = y_pred_train == 1
return X
def load_config_json(json_file):
with open(json_file, 'r') as j:
contents = json.loads(j.read())
return contents
def load_data_dask(data_file, npartitions=500):
big_df = dd.read_csv(data_file, # sep=r'\\s*,\\s*',
assume_missing=True,
parse_dates=True, infer_datetime_format=True,
sample=1000000,
# dtype={'caliper': 'object',
# 'timestamp': 'object'},
# dtype='object',
na_values=['-','?']
)
big_df = big_df.repartition(npartitions)
return big_df
def get_dask_eda(df_dask):
descr = df_dask.describe().compute()
corr = df_dask.corr().compute()
return descr, corr
def normalization(config):
scaler = config["advance"] \\
["profiler"]["normalization"]
scaler_method = None
if scaler["minMax"] == "True":
scaler_method = MinMaxScaler()
if scaler["standardScaler"] == "True":
scaler_method = StandardScaler()
return scaler_method
def categorical_encoding(config):
encoder = config["advance"]["profiler"] \\
["categoryEncoding"]
encoder_method = None
if encoder["OneHotEncoding"] == "True":
encoder_method = OneHotEncoder()
# OneHotEncoder(handle_unknown='ignore', sparse=False)
if encoder["LabelEncoding"] == "True":
encoder_method = LabelEncoder()
return encoder_method
def numeric_feature_imputing(config):
imputer_numeric_method = None
imputer_numeric = config["advance"] \\
["profiler"]["numericalFillMethod"]
if imputer_numeric["Median"] == "True":
print("Median Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='median')
if imputer_numeric["Mean"] == "True":
print("Mean Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='mean')
if imputer_numeric["Min"] == "True":
print("Min Simple Imputer")
imputer_numeric_method = MinImputer()
if imputer_numeric["Max"] == "True":
print("Max Simple Imputer")
imputer_numeric_method = MaxImputer()
if imputer_numeric["Zero"] == "True":
print("Zero Simple Imputer")
imputer_numeric_method = SimpleImputer(strategy='constant',
fill_value=0)
# if imputer_numeric["Drop"] == "True":
# print("Median Simple Imputer")
# imputer_numeric_method = DropImputer()
return imputer_numeric_method
def categorical_feature_imputing(config):
imputer_categorical_method = None
imputer_categorical = config["advance"] \\
["profiler"]["categoricalFillMethod"]
if imputer_categorical["MostFrequent"] == "True":
imputer_categorical_method = SimpleImputer(strategy='most_frequent')
if imputer_categorical["Mode"] == "True":
imputer_categorical_method = ModeCategoricalImputer()
if imputer_categorical["Zero"] == "True":
imputer_categorical_method = SimpleImputer(strategy='constant',
fill_value=0)
return imputer_categorical_method
def preprocessing_pipeline(config, X_train):
print("Start preprocessing")
scaler_method = normalization(config)
encoding_method = categorical_encoding(config)
imputer_numeric_method = numeric_feature_imputing(config)
imputer_categorical_method = categorical_feature_imputing(config)
numeric_pipeline = Pipeline(steps=[
('impute', imputer_numeric_method),
('scale', scaler_method)
])
categorical_pipeline = Pipeline(steps=[
('impute', imputer_categorical_method),
('encoding', encoding_method)
])
numerical_features = X_train._get_numeric_data().columns.values.tolist()
categorical_features = list(set(X_train.columns) - set(X_train._get_numeric_data().columns))
print("numerical_features: ", numerical_features)
print("categorical_features: ", categorical_features)
full_processor = ColumnTransformer(transformers=[
('number', numeric_pipeline, numerical_features),
# ('category', categorical_pipeline, categorical_features)
])
return full_processor
def full_pipeline(X_train, X_test, config):
full_processor = preprocessing_pipeline(config, X_train)
reduce_dim = config["advance"] \\
["selector"]["featureEngineering"]
feature_reduce = None
if reduce_dim["SVD"] == "True":
feature_reduce = TruncatedSVD(n_components=3)
if reduce_dim["PCA"] == "True":
feature_reduce = PCA(n_components=3)
X_train = full_processor.fit_transform(X_train)
# joblib.dump(full_processor, 'full_processor_pipeline.pkl')
deploy_location = config["basic"]["modelLocation"]
profiler_file = os.path.join(deploy_location,'model','profiler.pkl')
selector_file = os.path.join(deploy_location,'model','selector.pkl')
save_pkl(full_processor, profiler_file)
X_test = full_processor.transform(X_test)
predict_config['profilerLocation'] = 'profiler.pkl'
if feature_reduce != None:
X_train = feature_reduce.fit_transform(X_train.to_dask_array(lengths=True))
save_pkl(feature_reduce, selector_file)
predict_config['selectorLocation'] = 'selector.pkl'
# joblib.dump(feature_reduce, 'feature_reduce_pipeline.pkl')
X_test = feature_reduce.transform(X_test.to_dask_array(lengths=True))
X_train = dd.from_dask_array(X_train)
X_test = dd.from_dask_array(X_test)
else:
predict_config['selectorLocation'] = ''
return X_train, X_test
def train_xgb_classification(client, X_train, y_train, X_test, config):
print("Training XGBoost classification")
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["classifierModelParams"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
dask_model = xgb.dask.DaskXGBClassifier(
tree_method=model_hyperparams["tree_method"],
n_estimators=int(model_hyperparams["n_estimators"]),
max_depth=int(model_hyperparams["max_depth"]),
gamma=float(model_hyperparams["gamma"]),
min_child_weight=float(model_hyperparams["min_child_weight"]),
subsample=float(model_hyperparams["subsample"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
learning_rate=float(model_hyperparams["learning_rate"]),
reg_alpha=float(model_hyperparams["reg_alpha"]),
reg_lambda=float(model_hyperparams["reg_lambda"]),
random_state=int(model_hyperparams["random_state"]),
verbosity=3)
dask_model.client = client
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_xgb_regression(client, X_train, y_train, X_test, config):
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["regressorModelParams"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
print("Training XGBoost regression")
dask_model = xgb.dask.DaskXGBRegressor(
tree_method=model_hyperparams["tree_method"],
n_estimators=int(model_hyperparams["n_estimators"]),
max_depth=int(model_hyperparams["max_depth"]),
gamma=float(model_hyperparams["gamma"]),
min_child_weight=float(model_hyperparams["min_child_weight"]),
subsample=float(model_hyperparams["subsample"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
learning_rate=float(model_hyperparams["learning_rate"]),
reg_alpha=float(model_hyperparams["reg_ | ||
alpha"]),
reg_lambda=float(model_hyperparams["reg_lambda"]),
random_state=int(model_hyperparams["random_state"]),
verbosity=3)
dask_model.client = client
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train, eval_set=[(X_test, y_test)])
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_lgbm_regression(client, X_train, y_train, X_test, config):
print("Training lightGBM regression")
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["regressorModelParams"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
dask_model = lgb.DaskLGBMRegressor(
client=client,
n_estimators=int(model_hyperparams["n_estimators"]),
num_leaves=int(model_hyperparams["num_leaves"]),
max_depth =int(model_hyperparams["max_depth"]),
learning_rate=float(model_hyperparams["learning_rate"]),
min_child_samples=int(model_hyperparams["min_child_samples"]),
reg_alpha=int(model_hyperparams["reg_alpha"]),
subsample=float(model_hyperparams["subsample"]),
reg_lambda=int(model_hyperparams["reg_lambda"]),
colsample_bytree=float(model_hyperparams["colsample_bytree"]),
n_jobs=4,
verbosity=3)
X_train, X_test = full_pipeline(X_train, X_test, config)
# print("before X_train.shape, y_train.shape",
# X_train.shape,
# y_train.shape)
# indices = dask_findiforestOutlier(X_train)
# print("X_train type: ", type(X_train))
# print("y_train type: ", type(y_train))
# X_train, y_train = X_train.iloc[indices, :], \\
# y_train.iloc[indices]
# print("after X_train.shape, y_train.shape",
# X_train.shape,
# y_train.shape)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train,
# # eval_set=[(X_test,y_test),
# # (X_train,y_train)],
# verbose=20,eval_metric='l2')
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def train_lgbm_classification(client, X_train, y_train, X_test, config):
print("Training lightGBM classification")
model_hyperparams = config["advance"] \\
["distributedlearner_config"] \\
["modelParams"] \\
["classifierModelParams"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
dask_model = lgb.DaskLGBMClassifier(
client=client,
num_leaves=int(model_hyperparams["num_leaves"]),
learning_rate=float(model_hyperparams["learning_rate"]),
feature_fraction=float(model_hyperparams["feature_fraction"]),
bagging_fraction=float(model_hyperparams["bagging_fraction"]),
bagging_freq=int(model_hyperparams["bagging_freq"]),
max_depth=int(model_hyperparams["max_depth"]),
min_data_in_leaf=int(model_hyperparams["min_data_in_leaf"]),
n_estimators=int(model_hyperparams["n_estimators"]),
verbosity=3)
X_train, X_test = full_pipeline(X_train, X_test, config)
dask_model.fit(X_train, y_train)
# dask_model.fit(X_train, y_train,
# eval_set=[(X_test,y_test),
# (X_train,y_train)],
# verbose=20,eval_metric='logloss')
save_model(config, dask_model)
save_config(config)
return dask_model, X_train, X_test
def evaluate_model_classification(model, config, X_test, y_test, class_names):
metrics = config["basic"]["scoringCriteria"]["classification"]
y_test = y_test.to_dask_array().compute()
log = logging.getLogger('eion')
X_test = X_test.to_dask_array(lengths=True)
y_pred = model.predict(X_test)
if metrics["Accuracy"] == "True":
# ParallelPostFit(estimator=model, scoring='accuracy')
# score = model.score(X_test, y_test) * 100.0
score = accuracy_score(y_test, y_pred) * 100.0
type = 'Accuracy'
log.info('Status:-|... Accuracy Score '+str(score))
if metrics["Recall"] == "True":
score = recall_score(y_test, y_pred)
type = 'Recall'
log.info('Status:-|... Recall Score '+str(score))
if metrics["Precision"] == "True":
score = precision_score(y_test, y_pred)
type = 'Precision'
log.info('Status:-|... Precision Score '+str(score))
if metrics["F1_Score"] == "True":
score = f1_score(y_test, y_pred)
type = 'F1'
log.info('Status:-|... F1 Score '+str(score))
y_pred_prob = model.predict_proba(X_test)
if len(class_names) == 2:
roc_auc = roc_auc_score(y_test, y_pred)
else:
roc_auc = roc_auc_score(y_test, y_pred_prob, multi_class='ovr')
if metrics["ROC_AUC"] == "True":
score = roc_auc
type = 'ROC_AUC'
log.info('Status:-|... ROC AUC Score '+str(score))
class_report = classification_report(y_test, y_pred, output_dict=True, target_names=class_names)
conf_matrix = confusion_matrix(y_test, y_pred)
return type, score, class_report, conf_matrix, roc_auc
def evaluate_model_regression(model, config, X_test, y_test):
metrics = config["basic"]["scoringCriteria"]["regression"]
y_pred = model.predict(X_test).compute()
y_test = y_test.to_dask_array().compute()
X_test = X_test.to_dask_array(lengths=True)
log = logging.getLogger('eion')
mse = mean_squared_error(y_test, y_pred)
rmse = mean_squared_error(y_test, y_pred, squared=False)
norm_rmse = rmse * 100 / (y_test.max() - y_test.min())
mape = mean_absolute_percentage_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
if metrics["Mean Squared Error"] == "True":
type = 'Mean Squared Error'
score = mse
log.info('Status:-|... Mean Squared Error '+str(score))
if metrics["Root Mean Squared Error"] == "True":
type = 'Root Mean Squared Error'
score = rmse
log.info('Status:-|... Root Mean Square Error '+str(score))
if metrics["R-Squared"] == "True":
type = 'R-Squared'
score = r2
log.info('Status:-|... R Squared Error '+str(score))
if metrics["Mean Absolute Error"] == "True":
type = 'Mean Absolute Error'
score = mae
log.info('Status:-|... Mean Absolute Error '+str(score))
return type, score, mse, rmse, norm_rmse, r2, mae, mape
def save_config(config):
deploy_location = config["basic"]["modelLocation"]
saved_model_file = os.path.join(deploy_location,'etc','config.json')
print(predict_config)
with open (saved_model_file,'w') as f:
json.dump(predict_config, f)
f.close()
def save_model(config, model):
model_name = config["basic"]["modelName"]
model_version = config["basic"]["modelVersion"]
analysis_type = config["basic"]["analysisType"]
deploy_location = config["basic"]["modelLocation"]
if analysis_type["classification"] == "True":
problem_type = "classification"
if analysis_type["regression"] == "True":
problem_type = "regression"
print("model_name", model_name)
print("model_version", model_version)
print("problem_type", problem_type)
print("deploy_location", deploy_location)
file_name = problem_type + '_' + model_version + ".sav"
saved_model = os.path.join(deploy_location,'model',file_name)
print("Save trained model to directory: ", save_model)
with open (saved_model,'wb') as f:
pkl.dump(model,f)
f.close()
predict_config['modelLocation'] = file_name
def save_pkl(model, filename):
with open(filename, 'wb') as f:
pkl.dump(model, f,
protocol=pkl.HIGHEST_PROTOCOL)
def dask_findiforestOutlier(X):
print("Outlier removal with Isolation Forest...")
isolation_forest = IsolationForest(n_estimators=100)
with joblib.parallel_backend('dask'):
isolation_forest.fit(X)
y_pred_train = isolation_forest.fit_predict(X)
mask_isoForest = y_pred_train != -1
return mask_isoForest
def training(configFile):
start_time = time.time()
config = load_config_json(configFile)
data_dir = config["basic"]["dataLocation"]
n_workers = int(config["advance"]
["distributedlearner_config"]
["n_workers"])
npartitions = int(config["advance"]
["distributedlearner_config"]
["npartitions"])
threads_per_worker = int(config["advance"]
["distributedlearner_config"]
["threads_per_worker"])
predict_config['modelName'] = config["basic"]["modelName"]
predict_config['modelVersion'] = config["basic"]["modelVersion"]
predict_config['targetFeature'] = config["basic"]["targetFeature"]
predict_config['trainingFeatures'] = config["basic"]["trainingFeatures"]
predict_config['dataLocation'] = config["basic"]["dataLocation"]
predict_config['n_workers'] = n_workers
predict_config['npartitions'] = npartitions
predict_config['threads_per_worker'] = threads_per_worker
if config['basic']['analysisType']["classification"] == "True":
problemType = "classification"
oProblemType = "Distributed Classification"
if config['basic']['analysisType']["regression"] == "True":
problemType = "regression"
oProblemType = "Distributed Regression"
predict_config['analysisType'] = problemType
predict_config['scoringCriteria'] = ''
target_feature = config["basic"]["targetFeature"]
training_features = config["basic"]["trainingFeatures"]
deploy_location = config["basic"]["deployLocation"]
is_xgb_class = config["basic"] \\
["algorithms"]["classification"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
is_lgbm_class = config["basic"] \\
["algorithms"]["classification"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
is_xgb_regress = config["basic"] \\
["algorithms"]["regression"] \\
["Distributed Extreme Gradient Boosting (XGBoost)"]
is_lgbm_regress = config["basic"] \\
["algorithms"]["regression"] \\
["Distributed Light Gradient Boosting (LightGBM)"]
if is_xgb_class=="True" or is_xgb_regress=="True":
algorithm = "Distributed Extreme Gradient Boosting (XGBoost)"
predict_config['algorithm'] = algorithm
if is_lgbm_class=="True" or is_lgbm_regress=="True":
algorithm = "Distributed Light Gradient Boosting (LightGBM)"
predict_config['algorithm'] = algorithm
cluster = dask.distributed.LocalCluster(n_workers=n_workers,
threads_per_worker=threads_per_worker,
# dashboard_address="127.0.0.1:8787"
)
client = dask.distributed.Client(cluster)
df_dask = load_data_dask(data_dir, npartitions=npartitions)
deployFolder = config["basic"]["deployLocation"]
modelName = config["basic"]["modelName"]
modelName = modelName.replace(" ", "_")
modelVersion = config["basic"]["modelVersion"]
modelLocation = os.path.join(deployFolder,modelName)
os.makedirs(modelLocation,exist_ok = True)
deployLocation = os.path.join(modelLocation,modelVersion)
predict_config['deployLocation'] = deployLocation
try:
os.makedirs(deployLocation)
except OSError as e:
shutil.rmtree(deployLocation)
time.sleep(2)
os.makedirs(deployLocation)
modelFolderLocation = os.path.join(deployLocation,'model')
try:
os.makedirs(modelFolderLocation)
except OSError as e:
print("\\nModel Folder Already Exists")
etcFolderLocation = os.path.join(deployLocation,'etc')
try:
os.makedirs(etcFolderLocation)
except OSError as e:
print("\\ETC Folder Already Exists")
logFolderLocation = os.path.join(deployLocation,'log')
try:
os.makedirs(logFolderLocation)
except OSError as e:
print("\\nLog Folder Already Exists")
logFileName=os.path.join(logFolderLocation,'model_training_ | ||
logs.log')
outputjsonFile=os.path.join(deployLocation,'etc','output.json')
filehandler = logging.FileHandler(logFileName, 'w','utf-8')
formatter = logging.Formatter('%(message)s')
filehandler.setFormatter(formatter)
log = logging.getLogger('eion')
log.propagate = False
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
log.removeHandler(hdlr)
log.addHandler(filehandler)
log.setLevel(logging.INFO)
log.info('Status:-|... Distributed Learning Started')
config['basic']['modelLocation'] = deployLocation
# Get input for EDA
# descr, corr = get_dask_eda(df_dask=df_dask)
#print(descr)
# print(corr)
#print(df_dask.columns)
#print("target feature", target_feature)
df_dask = df_dask.dropna(subset=[target_feature])
if is_xgb_class == "True" or is_lgbm_class == "True":
df_dask = df_dask.categorize(columns=[target_feature])
df_dask[target_feature] = df_dask[target_feature].astype('category')
df_dask[target_feature] = df_dask[target_feature].cat.as_known()
label_mapping = dict(enumerate(df_dask[target_feature].cat.categories))
df_dask[target_feature] = df_dask[target_feature].cat.codes
label_mapping_file =os.path.join(deployLocation,'etc','label_mapping.json')
with open(label_mapping_file, 'w') as f:
json.dump(label_mapping, f)
if config["advance"]["profiler"]["removeDuplicate"] == "True":
df_dask = df_dask.drop_duplicates()
# Need to dropna for case of categoricalFillMethod
# if config["advance"]["profiler"]["numericalFillMethod"]["Drop"] == "True":
# df_dask = df_dask.dropna()
trainingFeatures = config["basic"]["trainingFeatures"].split(',')
if target_feature not in trainingFeatures:
trainingFeatures.append(target_feature)
df_dask = df_dask[trainingFeatures]
y = df_dask[target_feature]
X = df_dask.drop(target_feature, axis=1)
print("after X.shape, y.shape", X.shape, y.shape)
X_train, X_test, y_train, y_test = dask_ml.model_selection.train_test_split(X, y,
test_size=0.2, random_state=0)
trainingFeatures = config["basic"]["trainingFeatures"].split(',')
outputJson = None
conf_matrix_dict = {}
train_conf_matrix_dict = {}
try:
if is_xgb_class == "True":
modelName = 'Distributed Extreme Gradient Boosting (XGBoost)'
dask_model, X_train, X_test = train_xgb_classification(client, X_train, y_train, X_test, config)
class_names = list(label_mapping.values())
_, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config,
X_train, y_train, class_names)
scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config,
X_test, y_test, class_names)
for i in range(len(conf_matrix)):
conf_matrix_dict_1 = {}
for j in range(len(conf_matrix[i])):
conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j])
conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1
for i in range(len(train_conf_matrix)):
train_conf_matrix_dict_1 = {}
for j in range(len(train_conf_matrix[i])):
train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j])
train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1
# print(roc_auc)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\\
'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_lgbm_class == "True":
modelName = 'Distributed Light Gradient Boosting (LightGBM)'
dask_model, X_train, X_test = train_lgbm_classification(client, X_train, y_train, X_test, config)
class_names = list(label_mapping.values())
_, _, train_class_report, train_conf_matrix, train_roc_auc = evaluate_model_classification(dask_model, config,
X_train, y_train, class_names)
scoringCreteria,score, class_report, conf_matrix, roc_auc = evaluate_model_classification(dask_model, config,
X_test, y_test, class_names)
for i in range(len(conf_matrix)):
conf_matrix_dict_1 = {}
for j in range(len(conf_matrix[i])):
conf_matrix_dict_1['pre:' + str(class_names[j])] = int(conf_matrix[i][j])
conf_matrix_dict['act:'+ str(class_names[i])] = conf_matrix_dict_1
for i in range(len(train_conf_matrix)):
train_conf_matrix_dict_1 = {}
for j in range(len(train_conf_matrix[i])):
train_conf_matrix_dict_1['pre:' + str(class_names[j])] = int(train_conf_matrix[i][j])
train_conf_matrix_dict['act:'+ str(class_names[i])] = train_conf_matrix_dict_1
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'ConfusionMatrix':conf_matrix_dict,'ClassificationReport':class_report,'ROC_AUC_SCORE':roc_auc},\\
'trainmatrix':{'ConfusionMatrix':train_conf_matrix_dict,'ClassificationReport':train_class_report,'ROC_AUC_SCORE':train_roc_auc},\\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_xgb_regress == "True":
modelName = 'Distributed Extreme Gradient Boosting (XGBoost)'
dask_model, X_train, X_test = train_xgb_regression(client, X_train, y_train, X_test, config)
_, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config,
X_train, y_train)
scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config,
X_test, y_test)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \\
'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
if is_lgbm_regress == "True":
modelName = 'Distributed Light Gradient Boosting (LightGBM)'
dask_model, X_train, X_test = train_lgbm_regression(client, X_train, y_train, X_test, config)
_, _, train_mse, train_rmse, train_norm_rmse, train_r2, train_mae, train_mape = evaluate_model_regression(dask_model, config,
X_train, y_train)
scoringCreteria, score, mse, rmse, norm_rmse, r2, mae, mape = evaluate_model_regression(dask_model, config,
X_test, y_test)
outputJson = {'status':'SUCCESS','data':{'ModelType':oProblemType,\\
'deployLocation':deployLocation,'BestModel':modelName,'BestScore':score,'ScoreType':scoringCreteria,\\
'matrix':{'MAE':mae,'R2Score':r2,'MSE':mse,'MAPE':mape,'RMSE':rmse,'Normalised RMSE(%)':norm_rmse}, \\
'trainmatrix':{'MAE':train_mae,'R2Score':train_r2,'MSE':train_mse,'MAPE':train_mape,'RMSE':train_rmse,'Normalised RMSE(%)':train_norm_rmse}, \\
'featuresused':trainingFeatures,'targetFeature':target_feature,'EvaluatedModels':[{'Model':modelName,'Score':score}],
'LogFile':logFileName}}
src = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','utilities','dl_aion_predict.py')
shutil.copy2(src,deployLocation)
os.rename(os.path.join(deployLocation,'dl_aion_predict.py'),os.path.join(deployLocation,'aion_predict.py'))
except Exception as e:
outputJson = {"status":"FAIL","message":str(e)}
print(e)
client.close()
cluster.close()
log.info('Status:-|... Distributed Learning Completed')
with open(outputjsonFile, 'w') as f:
json.dump(outputJson, f)
f.close()
output_json = json.dumps(outputJson)
log.info('aion_learner_status:'+str(output_json))
for hdlr in log.handlers[:]: # remove the existing file handlers
if isinstance(hdlr,logging.FileHandler):
hdlr.close()
log.removeHandler(hdlr)
print("\\n")
print("aion_learner_status:",output_json)
print("\\n")
end_time = time.time()
print("--- %s processing time (sec) ---" % (end_time - start_time)) <s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter, CoxPHFitter
from lifelines.utils import datetimes_to_durations
import logging
import numpy as np
import re
import sys
import os
class SurvivalAnalysis(object):
def __init__(self, df, pipe, method, event_column, duration_column, filterExpression, train_features_type,start=None, end=None):
pd.options.display.width = 30
self.df = df
self.pipe = pipe
self.train_features_type = train_features_type
self.filterExpression = filterExpression
self.covariateExpression = filterExpression
self.method = method
self.event_column = event_column
if start is not None and end is not None:
self.df['duration'], _ = datetimes_to_durations(start, end)
self.duration_column = 'duration'
else:
self.duration_column = duration_column
self.models = []
self.score = 0
self.log = logging.getLogger('eion')
self.plots = []
def transform_filter_expression(self, covariate, covariate_input):
'''
Filter expression given by user will be encoded if it is categorical and if it is a numerical feature that
is normalised in data profiler, in filter expression feature also it will be converted to normalised value
'''
cols = list(self.df.columns)
if self.duration_column in cols:
cols.remove(self.duration_column)
if self.event_column in cols:
cols.remove(self.event_column)
df_filter = pd.DataFrame([{covariate:covariate_input}], columns=cols)
df_filter[covariate] = df_filter[covariate].astype(self.train_features_type[covariate])
df_transform_array = self.pipe.transform(df_filter)
df_transform = pd.DataFrame(df_transform_array, columns=cols)
return df_transform[covariate].iloc[0]
def learn(self):
self.log.info('\\n---------- SurvivalAnalysis learner has started ----------')
self.log.info('\\n---------- SurvivalAnalysis learner method is "%s" ----------' % self.method)
if self.method.lower() in ['kaplanmeierfitter', 'kaplanmeier', 'kaplan-meier', 'kaplan meier', 'kaplan', 'km',
'kmf']:
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" | ||
has started ----------' % self.method)
kmf = KaplanMeierFitter()
T = self.df[self.duration_column]
E = self.df[self.event_column]
self.log.info('\\n T : \\n%s' % str(T))
self.log.info('\\n E : \\n%s' % str(E))
K = kmf.fit(T, E)
kmf_sf = K.survival_function_
kmf_sf_json = self.survival_probability_to_json(kmf_sf)
self.models.append(K)
if isinstance(self.filterExpression, str):
df_f, df_n, refined_filter_expression = self.parse_filterExpression()
kmf1 = KaplanMeierFitter()
kmf2 = KaplanMeierFitter()
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has started----------' % self.method)
T1 = df_f[self.duration_column]
E1 = df_f[self.event_column]
T2 = df_n[self.duration_column]
E2 = df_n[self.event_column]
kmf1.fit(T1, E1)
fig, ax = plt.subplots(1, 1)
ax = kmf1.plot_survival_function(ax=ax, label='%s' % refined_filter_expression)
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for filter expression has ended----------' % self.method)
plt.title("KM Survival Functions - Filter vs Negation")
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for negation has started----------' % self.method)
kmf2.fit(T2, E2)
ax = kmf2.plot_survival_function(ax=ax, label='~%s' % refined_filter_expression)
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" fitting for negation has ended----------' % self.method)
self.models.extend([kmf1, kmf2])
kmf1_sf = kmf1.survival_function_
kmf2_sf = kmf2.survival_function_
kmf1_sf_json = self.survival_probability_to_json(kmf1_sf)
self.plots.append(fig)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter')
return kmf1_sf_json
else:
fig, ax = plt.subplots(1, 1)
ax = kmf_sf.plot(ax=ax)
plt.title("KM Survival Functions")
self.plots.append(fig)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: KaplanMeierFitter')
return kmf_sf_json
elif self.method.lower() in ['coxphfitter', 'coxregression', 'cox-regression', 'cox regression',
'coxproportionalhazard', 'coxph', 'cox', 'cph']:
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has started ----------' % self.method)
cph = CoxPHFitter(penalizer=0.1)
self.df = self.drop_constant_features(self.df)
C = cph.fit(self.df, self.duration_column, self.event_column)
self.models.append(C)
cph_sf = C.baseline_survival_
self.score = C.score(self.df, scoring_method="concordance_index")
self.log.info(
'\\n---------- SurvivalAnalysis learner "%s" score is "%s"----------' % (self.method, str(self.score)))
cph_sf_json = self.survival_probability_to_json(cph_sf)
if isinstance(self.covariateExpression, str):
covariate, covariate_inputs, covariate_values = self.parse_covariateExpression()
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.tight_layout()
ax1 = C.plot(ax=ax1, hazard_ratios=True)
self.log.info('\\n Summary : \\n%s' % str(C.summary))
ax1.set_title("COX hazard ratio")
ax2 = C.plot_partial_effects_on_outcome(covariate, covariate_values, ax=ax2)
mylabels = [covariate + '=' + str(x) for x in covariate_inputs]
mylabels.append('baseline')
ax2.legend(labels=mylabels)
ax2.set_title("Covariate Plot")
self.plots.append(fig)
else:
fig = plt.figure()
ax1 = C.plot(hazard_ratios=True)
self.log.info('\\n Summary : \\n%s' % str(C.summary))
plt.title("COX hazard ratio")
self.plots.append(fig)
self.log.info('\\n---------- SurvivalAnalysis learner method "%s" has ended ----------' % self.method)
self.log.info('\\n---------- SurvivalAnalysis learner has ended ----------')
self.log.info('Status:- |... Algorithm applied: CoxPHFitter')
return cph_sf_json
def parse_filterExpression(self):
import operator
self.log.info('\\n---------- Filter Expression parsing has started ----------')
self.log.info('Filter Expression provided : %s' % self.filterExpression)
self.log.info('Shape before filter : %s' % str(self.df.shape))
f = self.filterExpression.split('&')
f = list(filter(None, f))
if len(f) == 1:
p = '[<>=!]=?'
op = re.findall(p, self.filterExpression)[0]
covariate, covariate_input = [x.strip().strip('\\'').strip('\\"') for x in self.filterExpression.split(op)]
refined_filter_expression = covariate + op + covariate_input
self.log.info('Final refined filter : %s' % refined_filter_expression)
ops = {"==": operator.eq, ">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le,
"!=": operator.ne}
try:
fv = self.transform_filter_expression(covariate, covariate_input)
df_f = self.df[ops[op](self.df[covariate], fv)]
self.log.info('Shape after filter : %s' % str(df_f.shape))
df_n = self.df[~self.df[covariate].isin(df_f[covariate])]
self.log.info('Shape of negation : %s' % str(df_n.shape))
self.log.info('---------- Filter Expression has ended ----------')
return df_f, df_n, refined_filter_expression
except Exception:
self.log.info('\\n-----> Filter Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid')
self.log.info('Valid examples are "A>100", "B==category1", "C>=10 && C<=20" etc..')
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
else:
full_f = []
try:
for filterExpression in f:
p = '[<>=!]=?'
op = re.findall(p, filterExpression)[0]
covariate, covariate_input = [x.strip().strip('\\'').strip('\\"') for x in filterExpression.split(op)]
full_f.append(covariate + op + covariate_input)
ops = {"==": operator.eq, ">": operator.gt, "<": operator.lt, ">=": operator.ge, "<=": operator.le,
"!=": operator.ne}
fv = self.transform_filter_expression(covariate, covariate_input)
df_f = self.df[ops[op](self.df[covariate], fv)]
df_n = self.df[~self.df[covariate].isin(df_f[covariate])]
refined_filter_expression = " & ".join(full_f)
self.log.info('Final refined filter : %s' % refined_filter_expression)
self.log.info('Shape after filter : %s' % str(df_f.shape))
self.log.info('Shape of negation : %s' % str(df_n.shape))
self.log.info('---------- Filter Expression has ended ----------')
return df_f, df_n, refined_filter_expression
# except (IndexError, ValueError, KeyError):
except Exception:
self.log.info('\\n-----> Filter Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given filter expression '+ self.filterExpression +' is invalid')
self.log.info('Valid examples are "A>100", "B==category1", "C>=10 && C<=20" etc..')
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
def parse_covariateExpression(self):
self.log.info('\\n---------- Covariate Expression parsing has started ----------')
self.log.info('\\n Covariate Expression provided : %s' % self.covariateExpression)
import ast
p = '[=:]'
try:
op = re.findall(p, self.covariateExpression)[0]
covariate, covariate_inputs = [x.strip().strip('\\'').strip('\\"') for x in
self.covariateExpression.split(op)]
covariate_inputs = ast.literal_eval(covariate_inputs)
covariate_values = [self.transform_filter_expression(covariate, x) for x in covariate_inputs]
self.log.info('\\n---------- Covariate Expression parsing has ended ----------')
return covariate, covariate_inputs, covariate_values
except Exception:
self.log.info('\\n-----> Covariate Expression parsing encountered error!!!')
exc_type, exc_obj, exc_tb = sys.exc_info()
if exc_type == IndexError or ValueError or KeyError:
self.log.info('----->Given covariate expression '+ self.filterExpression +' is invalid')
self.log.info("\\n Valid examples are A=['Yes','No'] or B=[100,500,1000]")
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname) + ' ' + str(exc_tb.tb_lineno))
raise Exception(str(exc_type)+str(exc_obj))
def survival_probability_to_json(self, sf):
'''
sf = Survival function i.e. KaplanMeierFitter.survival_function_ or CoxPHFitter.baseline_survival_
returns json of survival probabilities
'''
sf = sf[sf.columns[0]].apply(lambda x: "%4.2f" % (x * 100))
self.log.info('\\n Survival probabilities : \\n%s' % str(sf))
sf = sf.reset_index()
sf = sf.sort_values(sf.columns[0])
sf_json = sf.to_json(orient='records')
self.log.info('\\n Survival probability json : \\n%s' % str(sf_json))
return sf_json
def drop_constant_features(self, df):
dropped = []
for col in df.columns:
if (len(df[col].unique()) == 1) and (col not in [self.duration_column, self.event_column]):
df.drop(col, inplace=True, axis=1)
dropped.append(col)
if len(dropped) != 0:
self.log.info('\\n Dropping constant features %s' % str(col))
self.log.info('\\n After dropping constant features : \\n%s' % str(df))
return df
def predict(self):
if self.method == 'KaplanMeierFitter':
return self.model.predict(self.test[self.duration_column])
elif self.method == 'CoxPHFitter':
res = []
for idx, row in self.test.iterrows():
res.append(
self.model.predict_survival_function(self.test, times=row[self.model.duration_col])[idx].values[0])
return pd.DataFrame(res)
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''<s> import os
from typing import List
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
def split_csv(fp: str = "./data/creditcard.csv", fp_dest: str = "./data",
name: str = "credit", test_size: int = 0.5, strat_col: str = "Class") -> None:
"""Splits a csv file in two, in a stratified fashion.
Format for filenames will be `{name}0.csv and `{name}1.csv`.
:param fp: The path at which the csv file is located.
:type fp: str
:param fp_dest: The path to save the train and test files.
:type fp_dest: str
:param name: The prefix for the files.
| ||
:type name: str
:param test_size: The fraction of total size for the test file.
:type test_size: float
:param strat_col: The column in the original csv file to stratify.
:return: None, two files located at `fp_dest`.
:rtype: NoneType
"""
if not os.path.isfile(fp):
raise FileNotFoundError(f"File at {fp} does not exist.")
if not os.path.isdir(fp_dest):
raise ValueError(f"Directory at {fp_dest} does not exist.")
if not 0 < test_size < 1:
raise ValueError(f"{test_size} is not in interval 0 < x < 1.")
df = pd.read_csv(fp)
if not (strat_col in df.columns):
raise ValueError(f"Stratify column {strat_col} not found in DataFrame.")
train, test = train_test_split(df, test_size=test_size, stratify=df[strat_col])
train.to_csv(f"{fp_dest}/{name}0.csv", index=False)
test.to_csv(f"{fp_dest}/{name}1.csv", index=False)
def rounded_dict(d: dict, precision: int = 6) -> dict:
"""Rounds all values in a dictionairy to `precision` digits after the decimal point.
:param d: Dictionairy containing only floats or ints as values
:type d: dict
:return: Rounded dictionairy
:rtype: dict
"""
return {k: round(v, precision) for k, v in d.items()}
def imbalance_ratio(y: np.ndarray, min_classes: List[int] = [1], maj_classes: List[int] = [0]) -> float:
"""Calculates imbalance ratio of minority class(es) and majority class(es).
:param y: y-vector with labels.
:type y: np.ndarray
:param min_classes: The labels of the minority classes
:type min_classes: list
:param maj_classes: The labels of the minority classes
:type maj_classes: list
:return: The imbalance ratio
:rtype: float
"""
return np.isin(y, min_classes).sum() / np.isin(y, maj_classes).sum()
<s>
import os
import numpy as np
import pandas as pd
import time
from DeepRL.agents.ddqn import TrainDDQN
from DeepRL.agents.dqn import TrainDQN
from DeepRL.dataprocess import get_train_test_val
from DeepRL.utils import rounded_dict
from tensorflow.keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # CPU is faster than GPU on structured data
def PredictRL(input_csv_file, model_load_path, RL_hparams_config_file, RL_Algo_Name):
if not (os.path.exists(model_load_path)):
os.makedirs(model_load_path)
episodes = RL_hparams_config_file['DeepRL']['episodes'] # Total number of episodes
warmup_steps = RL_hparams_config_file['DeepRL']['warmup_steps'] # Amount of warmup steps to collect data with random policy
memory_length = warmup_steps # Max length of the Replay Memory
batch_size = RL_hparams_config_file['DeepRL']['batch_size']
collect_steps_per_episode = RL_hparams_config_file['DeepRL']['collect_steps_per_episode']
collect_every = RL_hparams_config_file['DeepRL']['collect_every']
target_update_period = RL_hparams_config_file['DeepRL']['target_update_period'] # Period to overwrite the target Q-network with the default Q-network
target_update_tau = RL_hparams_config_file['DeepRL']['target_update_tau'] # Soften the target model update
n_step_update = RL_hparams_config_file['DeepRL']['n_step_update']
learning_rate = RL_hparams_config_file['DeepRL']['learning_rate'] # Learning rate
gamma = RL_hparams_config_file['DeepRL']['gamma'] # Discount factor
min_epsilon = RL_hparams_config_file['DeepRL']['min_epsilon'] # Minimal and final chance of choosing random action
decay_episodes = episodes // 10 # Number of episodes to decay from 1.0 to `min_epsilon``
#path = '/home/renith/Renith/Project/AION/Reinforcement/RL_Classification/Code/rl_text_classification/telemetry_data.csv'
data = pd.read_csv(input_csv_file)
device5 = data[data['device_id'] == "Device_1"]
device5 = device5.drop(['device_id'], axis = 1)
device5.reset_index(drop=True, inplace=True)
target_value = []
for i in range(device5['device_status'].shape[0]):
if(device5['device_status'][i] == "NORMAL"):
target_value.append(0.0)
else:
target_value.append(1.0)
device5['target'] = target_value
device5 = device5.drop(['device_status'], axis = 1)
X_test = device5.iloc[:,1:-1]
y_test = device5.iloc[:,-1]
X_test = X_test.astype(np.float32)
y_test = y_test.astype(np.int32)
#Normalization
mini, maxi = X_test.min(axis=0), X_test.max(axis=0)
X_test -= mini
X_test /= maxi - mini
min_class = [1] #Minority class
maj_class = [0] #Majority class
#X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.8, stratify=y_train)
#X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, stratify=y_train)
#X_train = np.array(X_train)
#y_train = np.array(y_train)
#X_val = np.array(X_val)
#y_val = np.array(y_val)
X_test = np.array(X_test)
y_test = np.array(y_test)
#X_train, y_train, X_test, y_test, X_val, y_val = get_train_test_val(X_train.values, y_train.values, X_test.values, y_test.values,
# min_class, maj_class, val_frac=0.2)
layers = [Dense(128, activation="relu"),
Dense(64, activation="relu"),
Dense(32, activation="relu"),
Dense(2, activation=None)]
if(RL_Algo_Name == "DDQN"):
model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,
target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,
memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_load_path)
elif(RL_Algo_Name == "DQN"):
model = TrainDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,
target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,
memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_load_path)
model.compile_model(X_test, y_test, layers)
model.q_net.summary()
#model.train(X_val, y_val, "F1")
#print("Training Ended !!!!")
stats = model.evaluate(X_test, y_test)
print(rounded_dict(stats))
#stats = model.evaluate(X_train, y_train)
#print(rounded_dict(stats))
<s> import os
from typing import List, Tuple
import numpy as np
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from tensorflow.keras.datasets import cifar10, fashion_mnist, imdb, mnist
from tensorflow.keras.preprocessing.sequence import pad_sequences
TrainTestData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
TrainTestValData = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
def load_image(data_source: str) -> TrainTestData:
"""
Loads one of the following image datasets: {mnist, famnist, cifar10}.
Normalizes the data. Returns X and y for both train and test datasets.
Dtypes of X's and y's will be `float32` and `int32` to be compatible with `tf_agents`.
:param data_source: Either mnist, famnist or cifar10
:type data_source: str
:return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test
:rtype: tuple
"""
reshape_shape = -1, 28, 28, 1
if data_source == "mnist":
(X_train, y_train), (X_test, y_test) = mnist.load_data()
elif data_source == "famnist":
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
elif data_source == "cifar10":
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
reshape_shape = -1, 32, 32, 3
else:
raise ValueError("No valid `data_source`.")
X_train = X_train.reshape(reshape_shape).astype(np.float32) # Float32 is the expected dtype for the observation spec in the env
X_test = X_test.reshape(reshape_shape).astype(np.float32)
X_train /= 255 # /= is not available when casting int to float: https://stackoverflow.com/a/48948461/10603874
X_test /= 255
y_train = y_train.reshape(y_train.shape[0], ).astype(np.int32)
y_test = y_test.reshape(y_test.shape[0], ).astype(np.int32)
return X_train, y_train, X_test, y_test
def load_csv(fp_train: str, fp_test: str, label_col: str, drop_cols: List[str], normalization: bool = False) -> TrainTestData:
"""
Loads any csv-file from local filepaths. Returns X and y for both train and test datasets.
Option to normalize the data with min-max normalization.
Only csv-files with float32 values for the features and int32 values for the labels supported.
Source for dataset: https://mimic-iv.mit.edu/
:param fp_train: Location of the train csv-file
:type fp_train: str
:param fp_test: Location of the test csv-file
:type fp_test: str
:param label_col: The name of the column containing the labels of the data
:rtype label_col: str
:param drop_cols: List of the names of the columns to be dropped. `label_col` gets dropped automatically
:rtype drop_cols: List of strings
:param normalization: Normalize the data with min-max normalization?
:type normalization: bool
:return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test
:rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
"""
if not os.path.isfile(fp_train):
raise FileNotFoundError(f"`fp_train` {fp_train} does not exist.")
if not os.path.isfile(fp_test):
raise FileNotFoundError(f"`fp_test` {fp_test} does not exist.")
if not isinstance(normalization, bool):
raise TypeError(f"`normalization` must be of type `bool`, not {type(normalization)}")
X_train = read_csv(fp_train).astype(np.float32) # DataFrames directly converted to float32
X_test = read_csv(fp_test).astype(np.float32)
y_train = X_train[label_col].astype(np.int32)
y_test = X_test[label_col].astype(np.int32)
X_train.drop(columns=drop_cols + [label_col], inplace=True) # Dropping cols and label column
X_test.drop(columns=drop_cols + [label_col], inplace=True)
# Other data sources are already normalized. RGB values are always in range 0 to 255.
if normalization:
mini, maxi = X_train.min(axis=0), X_train.max(axis=0)
X_train -= mini
X_train /= maxi - mini
X_test -= mini
X_test /= maxi - mini
return X_train.values, y_train.values, X_test.values, y_test.values # Numpy arrays
def load_imdb(config: Tuple[int, int] = (5_000, 500)) -> TrainTestData:
"""Loads the IMDB dataset. Returns X and y for both train and test datasets.
:param config: Tuple of number of most frequent words and max length of each sequence.
:type config: str
:return: Tuple of (X_train, y_train, X_test, y_test) containing original split of train/test
:rtype: tuple
"""
if not isinstance(config, (tuple, list)):
raise TypeError(f"{type(config)} is no valid datatype for `config`.")
if len(config) != 2:
raise ValueError("Tuple length of `config` must be 2.")
if not all(i > 0 for i in config):
raise ValueError("All integers of `config` must be > 0.")
(X_train, | ||
y_train), (X_test, y_test) = imdb.load_data(num_words=config[0])
X_train = pad_sequences(X_train, maxlen=config[1])
X_test = pad_sequences(X_test, maxlen=config[1])
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
return X_train, y_train, X_test, y_test
def get_train_test_val(X_train: np.ndarray, y_train: np.ndarray, X_test: np.ndarray, y_test: np.ndarray, min_classes: List[int],
maj_classes: List[int], imb_ratio: float = None, imb_test: bool = True, val_frac: float = 0.25,
print_stats: bool = True) -> TrainTestValData:
"""
Imbalances data and divides the data into train, test and validation sets.
The imbalance rate of each individual dataset is approx. the same as the given `imb_ratio`.
:param X_train: The X_train data
:type X_train: np.ndarray
:param y_train: The y_train data
:type y_train: np.ndarray
:param X_test: The X_test data
:type X_test: np.ndarray
:param y_test: The y_test data
:type y_test: np.ndarray
:param min_classes: List of labels of all minority classes
:type min_classes: list
:param maj_classes: List of labels of all majority classes.
:type maj_classes: list
:param imb_ratio: Imbalance ratio for minority to majority class: len(minority datapoints) / len(majority datapoints)
If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's.
:type imb_ratio: float
:param imb_test: Imbalance the test dataset?
:type imb_test: bool
:param val_frac: Fraction to take from X_train and y_train for X_val and y_val
:type val_frac: float
:param print_stats: Print the imbalance ratio of the imbalanced data?
:type print_stats: bool
:return: Tuple of (X_train, y_train, X_test, y_test, X_val, y_val)
:rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]
"""
if not 0 < val_frac < 1:
raise ValueError(f"{val_frac} is not in interval 0 < x < 1.")
if not isinstance(print_stats, bool):
raise TypeError(f"`print_stats` must be of type `bool`, not {type(print_stats)}.")
X_train, y_train = imbalance_data(X_train, y_train, min_classes, maj_classes, imb_ratio=imb_ratio)
# Only imbalance test-data if imb_test is True
X_test, y_test = imbalance_data(X_test, y_test, min_classes, maj_classes, imb_ratio=imb_ratio if imb_test else None)
# stratify=y_train to ensure class balance is kept between train and validation datasets
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_frac, stratify=y_train)
if print_stats:
p_train, p_test, p_val = [((y == 1).sum(), imbalance_ratio(y)) for y in (y_train, y_test, y_val)]
print(f"Imbalance ratio `p`:\\n"
f"\\ttrain: n={p_train[0]}, p={p_train[1]:.6f}\\n"
f"\\ttest: n={p_test[0]}, p={p_test[1]:.6f}\\n"
f"\\tvalidation: n={p_val[0]}, p={p_val[1]:.6f}")
return X_train, y_train, X_test, y_test, X_val, y_val
def imbalance_data(X: np.ndarray, y: np.ndarray, min_class: List[int], maj_class: List[int],
imb_ratio: float = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Split data in minority and majority, only values in {min_class, maj_class} will be kept.
(Possibly) decrease minority rows to match the imbalance rate.
If initial imb_ratio of dataset is lower than given `imb_ratio`, the imb_ratio of the returned data will not be changed.
If the `imb_ratio` is None, data will not be imbalanced and will only be relabeled to 1's and 0's.
"""
if not isinstance(X, np.ndarray):
raise TypeError(f"`X` must be of type `np.ndarray` not {type(X)}")
if not isinstance(y, np.ndarray):
raise TypeError(f"`y` must be of type `np.ndarray` not {type(y)}")
if X.shape[0] != y.shape[0]:
raise ValueError("`X` and `y` must contain the same amount of rows.")
if not isinstance(min_class, (list, tuple)):
raise TypeError("`min_class` must be of type list or tuple.")
if not isinstance(maj_class, (list, tuple)):
raise TypeError("`maj_class` must be of type list or tuple.")
if (imb_ratio is not None) and not (0 < imb_ratio < 1):
raise ValueError(f"{imb_ratio} is not in interval 0 < imb_ratio < 1.")
if imb_ratio is None: # Do not imbalance data if no `imb_ratio` is given
imb_ratio = 1
X_min = X[np.isin(y, min_class)] # Mask the correct indexes
X_maj = X[np.isin(y, maj_class)] # Only keep data/labels for x in {min_class, maj_class} and forget all other
min_len = int(X_maj.shape[0] * imb_ratio) # Amount of rows to select from minority classes to get to correct imbalance ratio
# Keep all majority rows, decrease minority rows to match `imb_ratio`
X_min = X_min[np.random.choice(X_min.shape[0], min(min_len, X_min.shape[0]), replace=False), :]
X_imb = np.concatenate([X_maj, X_min]).astype(np.float32)
y_imb = np.concatenate((np.zeros(X_maj.shape[0]), np.ones(X_min.shape[0]))).astype(np.int32)
X_imb, y_imb = shuffle(X_imb, y_imb)
return X_imb, y_imb
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import os
import numpy as np
import pandas as pd
import time
import sys
import logging
from reinforcement.agents.ddqn import TrainDDQN
from reinforcement.agents.dqn import TrainDQN
from reinforcement.utils import rounded_dict
from tensorflow.keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
from learner.machinelearning import machinelearning
from learner.aion_matrix import aion_matrix
from reinforcement.metrics import network_predictions
from learner.machinelearning import machinelearning
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # CPU is faster than GPU on structured data
#def TrainRL(input_csv_file, model_save_path, rl_config, RL_Algo_Name):
class ReinformentLearning():
def __init__(self,rlConfig,scoreParam,modelType):
self.rl_config= rlConfig
self.scoreParam = scoreParam
self.log = logging.getLogger('eion')
self.modelType = modelType
def TrainRL(self,xtrain,ytrain,xtest,ytest,algorithm,deployLocation):
try:
scoredetails = ''
X_train, xval, y_train, yval = train_test_split(xtrain, ytrain, test_size=0.2, stratify=ytrain)
X_train = np.array(X_train)
y_train = np.array(y_train)
xval = np.array(xval)
yval = np.array(yval)
valueCount=ytrain.value_counts()
categoryCountList=valueCount.tolist()
xtest = np.array(xtest)
ytest = np.array(ytest)
objClf = aion_matrix()
episodes = self.rl_config['episodes'] # Total number of episodes
warmup_steps = self.rl_config['warmup_steps'] # Amount of warmup steps to collect data with random policy
memory_length = warmup_steps # Max length of the Replay Memory
batch_size = self.rl_config['batch_size']
collect_steps_per_episode = self.rl_config['collect_steps_per_episode']
collect_every = self.rl_config['collect_every']
target_update_period = self.rl_config['target_update_period'] # Period to overwrite the target Q-network with the default Q-network
target_update_tau = self.rl_config['target_update_tau'] # Soften the target model update
n_step_update = self.rl_config['n_step_update']
learning_rate = self.rl_config['learning_rate'] # Learning rate
gamma = self.rl_config['gamma'] # Discount factor
min_epsilon = self.rl_config['min_epsilon'] # Minimal and final chance of choosing random action
decay_episodes = episodes // 10 # Number of episodes to decay from 1.0 to `min_epsilon``
layers = [Dense(128, activation="relu"), #need modification
Dense(64, activation="relu"),
Dense(32, activation="relu"),
Dense(len(np.unique(y_train)), activation=None)]
logFilePath=os.path.join(deployLocation,'log')
if algorithm == "DQN":
start = time.time()
modelName = "DQN"
model_save_path = os.path.dirname(__file__)
model = TrainDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update, model_path=model_save_path,log_dir=logFilePath)
model.compile_model(X_train,y_train,layers)
model.q_net.summary()
model.train(xval,yval)
network = model.get_network()
predictedytrain=network_predictions(network,np.array(xtrain))
predictedytest = network_predictions(network,np.array(xtest))
if "DDQN" == algorithm:
start = time.time()
modelName = "DDQN"
model = TrainDDQN(episodes, warmup_steps, learning_rate, gamma, min_epsilon, decay_episodes, target_update_period=target_update_period,target_update_tau=target_update_tau, batch_size=batch_size, collect_steps_per_episode=collect_steps_per_episode,memory_length=memory_length, collect_every=collect_every, n_step_update=n_step_update,log_dir=logFilePath)
model.compile_model(X_train,y_train,layers)
model.q_net.summary()
model.train(xval,yval)
network = model.get_network()
predictedytrain=network_predictions(network,np.array(xtrain))
predictedytest = network_predictions(network,np.array(xtest))
score = objClf.get_score(self.scoreParam,ytest,predictedytest)
score = round(score,2)
return (network,self.rl_config,score,algorithm,-1,-1,-1)
except Exception as inst:
self.log.info( '\\n-----> RL Failed!!!.'+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s><s> import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import (auc, average_precision_score, confusion_matrix,
f1_score, precision_recall_curve, roc_curve,precision_score,recall_score)
from tensorflow import constant
from tf_agents.trajectories import time_step
def network_predictions(network, X: np.ndarray) -> dict:
"""Computes y_pred using a given network.
Input is array of data entries.
:param network: The network to use to calculate metrics
:type network: (Q)Network
:param X: X data, input to network
:type X: np.ndarray
:return: Numpy array of predicted targets for given X
:rtype: np.ndarray
"""
if not isinstance(X, np.ndarray):
raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}")
q, _ = network(X, step_type=constant([time_step.StepType.FIR | ||
ST] * X.shape[0]), training=False)
return np.argmax(q.numpy(), axis=1) # Max action for each x in X
def decision_function(network, X: np.ndarray) -> dict:
"""Computes the score for the predicted class of each x in X using a given network.
Input is array of data entries.
:param network: The network to use to calculate the score per x in X
:type network: (Q)Network
:param X: X data, input to network
:type X: np.ndarray
:return: Numpy array of scores for given X
:rtype: np.ndarray
"""
if not isinstance(X, np.ndarray):
raise ValueError(f"`X` must be of type `np.ndarray` not {type(X)}")
q, _ = network(X, step_type=constant([time_step.StepType.FIRST] * X.shape[0]), training=False)
return np.max(q.numpy(), axis=1) # Value of max action for each x in X
def classification_metrics(y_true: list, y_pred: list) -> dict:
"""Computes metrics using y_true and y_pred.
:param y_true: True labels
:type y_true: np.ndarray
:param y_pred: Predicted labels, corresponding to y_true
:type y_pred: np.ndarray
:return: Dictionairy containing Geometric Mean, F1, Precision, Recall, TP, TN, FP, FN
:rtype: dict
"""
if not isinstance(y_true, (list, tuple, np.ndarray)):
raise ValueError(f"`y_true` must be of type `list` not {type(y_true)}")
if not isinstance(y_pred, (list, tuple, np.ndarray)):
raise ValueError(f"`y_pred` must be of type `list` not {type(y_pred)}")
if len(y_true) != len(y_pred):
raise ValueError("`X` and `y` must be of same length.")
#G_mean = np.sqrt(recall * specificity) # Geometric mean of recall and specificity
F1 = f1_score(y_true, y_pred, average='macro') # Default F-measure
recall = recall_score(y_true,y_pred,average='macro')
precision = precision_score(y_true,y_pred,average='macro')
return {"F1": F1, "Precision": precision, "Recall": recall}
def plot_pr_curve(network, X_test: np.ndarray, y_test: np.ndarray,
X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover
"""Plots PR curve of X_test and y_test of given network.
Optionally plots PR curve of X_train and y_train.
Average precision is shown in the legend.
:param network: The network to use to calculate the PR curve
:type network: (Q)Network
:param X_test: X data, input to network
:type X_test: np.ndarray
:param y_test: True labels for `X_test`
:type y_test: np.ndarray
:param X_train: Optional X data to plot validation PR curve
:type X_train: np.ndarray
:param y_train: True labels for `X_val`
:type y_train: np.ndarray
:return: None
:rtype: NoneType
"""
plt.plot((0, 1), (1, 0), color="black", linestyle="--", label="Baseline")
# TODO: Consider changing baseline
if X_train is not None and y_train is not None:
y_val_score = decision_function(network, X_train)
val_precision, val_recall, _ = precision_recall_curve(y_train, y_val_score)
val_AP = average_precision_score(y_train, y_val_score)
plt.plot(val_recall, val_precision, label=f"Train AP: {val_AP:.3f}")
y_test_score = decision_function(network, X_test)
test_precision, test_recall, _ = precision_recall_curve(y_test, y_test_score)
test_AP = average_precision_score(y_test, y_test_score)
plt.plot(test_recall, test_precision, label=f"Test AP: {test_AP:.3f}")
plt.xlim((-0.05, 1.05))
plt.ylim((-0.05, 1.05))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("PR Curve")
plt.gca().set_aspect("equal", adjustable="box")
plt.legend(loc="lower left")
plt.grid(True)
plt.show()
def plot_roc_curve(network, X_test: np.ndarray, y_test: np.ndarray,
X_train: np.ndarray = None, y_train: np.ndarray = None) -> None: # pragma: no cover
"""Plots ROC curve of X_test and y_test of given network.
Optionally plots ROC curve of X_train and y_train.
Average precision is shown in the legend.
:param network: The network to use to calculate the PR curve
:type network: (Q)Network
:param X_test: X data, input to network
:type X_test: np.ndarray
:param y_test: True labels for `X_test`
:type y_test: np.ndarray
:param X_train: Optional X data to plot validation PR curve
:type X_train: np.ndarray
:param y_train: True labels for `X_val`
:type y_train: np.ndarray
:return: None
:rtype: NoneType
"""
plt.plot((0, 1), (0, 1), color="black", linestyle="--", label="Baseline")
# TODO: Consider changing baseline
if X_train is not None and y_train is not None:
y_train_score = decision_function(network, X_train)
fpr_train, tpr_train, _ = roc_curve(y_train, y_train_score)
plt.plot(fpr_train, tpr_train, label=f"Train AUROC: {auc(fpr_train, tpr_train):.2f}")
y_test_score = decision_function(network, X_test)
fpr_test, tpr_test, _ = roc_curve(y_test, y_test_score)
plt.plot(fpr_test, tpr_test, label=f"Test AUROC: {auc(fpr_test, tpr_test):.2f}")
plt.xlim((-0.05, 1.05))
plt.ylim((-0.05, 1.05))
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
plt.gca().set_aspect("equal", adjustable="box")
plt.legend(loc="lower right")
plt.grid(True)
plt.show()
def plot_confusion_matrix(TP: int, FN: int, FP: int, TN: int) -> None: # pragma: no cover
"""Plots confusion matric of given TP, FN, FP, TN.
:param TP: True Positive
:type TP: int
:param FN: False Negative
:type FN: int
:param FP: False Positive
:type FP: int
:param TN: True Negative
:type TN: int
:return: None
:rtype: NoneType
"""
if not all(isinstance(i, (int, np.integer)) for i in (TP, FN, FP, TN)):
raise ValueError("Not all arguments are integers.")
ticklabels = ("Minority", "Majority")
sns.heatmap(((TP, FN), (FP, TN)), annot=True, fmt="_d", cmap="viridis", xticklabels=ticklabels, yticklabels=ticklabels)
plt.title("Confusion matrix")
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.show()
<s> import os
import pickle
from datetime import datetime
import numpy as np
import tensorflow as tf
from reinforcement.environments.classifierenv import ClassifierEnv
from reinforcement.metrics import (classification_metrics, decision_function,
network_predictions, plot_pr_curve, plot_roc_curve)
from reinforcement.utils import imbalance_ratio
from tensorflow import data
from tensorflow.keras.optimizers import Adam
from tf_agents.agents.dqn.dqn_agent import DdqnAgent
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.networks.sequential import Sequential
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.replay_buffers.tf_uniform_replay_buffer import \\
TFUniformReplayBuffer
from tf_agents.utils import common
class TrainDDQN():
"""Wrapper for DDQN training, validation, saving etc."""
def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int,
model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None,
collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0,
progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None:
"""
Wrapper to make training easier.
Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial
:param episodes: Number of training episodes
:type episodes: int
:param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts
:type warmup_steps: int
:param learning_rate: Learning Rate for the Adam Optimizer
:type learning_rate: float
:param gamma: Discount factor for the Q-values
:type gamma: float
:param min_epsilon: Lowest and final value for epsilon
:type min_epsilon: float
:param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon`
:type decay_episodes: int
:param model_path: Location to save the trained model
:type model_path: str
:param log_dir: Location to save the logs, usefull for TensorBoard
:type log_dir: str
:param batch_size: Number of samples in minibatch to train on each step
:type batch_size: int
:param memory_length: Maximum size of the Replay Buffer
:type memory_length: int
:param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode
:type collect_steps_per_episode: int
:param collect_every: Step interval to collect data during training
:type collect_every: int
:param val_every: Validate the model every X episodes using the `collect_metrics()` function
:type val_every: int
:param target_update_period: Update the target Q-network every X episodes
:type target_update_period: int
:param target_update_tau: Parameter for softening the `target_update_period`
:type target_update_tau: float
:param progressbar: Enable or disable the progressbar for collecting data and training
:type progressbar: bool
:return: None
:rtype: NoneType
"""
self.episodes = episodes # Total episodes
self.warmup_steps = warmup_steps # Amount of warmup steps before training
self.batch_size = batch_size # Batch size of Replay Memory
self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode
self.collect_every = collect_every # Step interval to collect data during training
self.learning_rate = learning_rate # Learning Rate
self.gamma = gamma # Discount factor
self.min_epsilon = min_epsilon # Minimal chance of choosing random action
self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON`
self.target_update_period = target_update_period # Period for soft updates
self.target_update_tau = target_update_tau
self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training
self.n_step_update = n_step_update
self.gradient_clipping = gradient_clipping # Clip the loss
self.compiled = False
NOW = "DDQN" #datetime.now().strftime("%Y%m%d_%H%M%S")
if memory_length is not None:
self.memory_length = memory_length # Max Replay Memory length
else:
self.memory_length = warmup_steps
if val_every is not None:
self.val_every = val_every # Validate the policy every `val_every` episodes
else:
self.val_every = self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50
if model_path is not None:
#if os.path.exists(model_path + "/" + NOW + ".pkl"):
# os.remove(model_path + "/" + NOW + ".pkl")
self.model_path = model_path + "/" + NOW + ".pkl"
else:
self.model_path = "./models/" + NOW + ".pkl"
if log_dir is None:
log_dir = "./logs/" + NOW
self.writer = tf.summary.create_file_writer(log_dir)
def compile_model(self, X_train, y_train, layers: list = [], imb_ratio: float = None, loss_fn=common.element_wise_squared_loss) -> None:
"""Initializes the neural networks, DDQN-agent, collect policies and replay buffer.
:param X_train: Training data for the model.
:type X_train: np.ndarray
:param y_train: Labels corresponding to `X_train`. 1 for the positive class, 0 for the negative class.
:param y_train: np.ndarray
:param layers: List of layers to feed into the TF-agents custom Sequential(!) layer.
:type layers: list
:param imb_ratio: The imbalance ratio of the data.
:type imb_ratio: float
:param loss_fn: Callable loss function
:type loss_ | ||
fn: tf.compat.v1.losses
:return: None
:rtype: NoneType
"""
if imb_ratio is None:
imb_ratio = imbalance_ratio(y_train)
self.train_env = TFPyEnvironment(ClassifierEnv(X_train, y_train, imb_ratio))
self.global_episode = tf.Variable(0, name="global_episode", dtype=np.int64, trainable=False) # Global train episode counter
# Custom epsilon decay: https://github.com/tensorflow/agents/issues/339
epsilon_decay = tf.compat.v1.train.polynomial_decay(
1.0, self.global_episode, self.decay_episodes, end_learning_rate=self.min_epsilon)
self.q_net = Sequential(layers, self.train_env.observation_spec())
self.agent = DdqnAgent(self.train_env.time_step_spec(),
self.train_env.action_spec(),
q_network=self.q_net,
optimizer=Adam(learning_rate=self.learning_rate),
td_errors_loss_fn=loss_fn,
train_step_counter=self.global_episode,
target_update_period=self.target_update_period,
target_update_tau=self.target_update_tau,
gamma=self.gamma,
epsilon_greedy=epsilon_decay,
n_step_update=self.n_step_update,
gradient_clipping=self.gradient_clipping)
self.agent.initialize()
self.random_policy = RandomTFPolicy(self.train_env.time_step_spec(), self.train_env.action_spec())
self.replay_buffer = TFUniformReplayBuffer(data_spec=self.agent.collect_data_spec,
batch_size=self.train_env.batch_size,
max_length=self.memory_length)
self.warmup_driver = DynamicStepDriver(self.train_env,
self.random_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.warmup_steps) # Uses a random policy
self.collect_driver = DynamicStepDriver(self.train_env,
self.agent.collect_policy,
observers=[self.replay_buffer.add_batch],
num_steps=self.collect_steps_per_episode) # Uses the epsilon-greedy policy of the agent
self.agent.train = common.function(self.agent.train) # Optimalization
self.warmup_driver.run = common.function(self.warmup_driver.run)
self.collect_driver.run = common.function(self.collect_driver.run)
self.compiled = True
def train(self, *args) -> None:
"""Starts the training of the model. Includes warmup period, metrics collection and model saving.
:param *args: All arguments will be passed to `collect_metrics()`.
This can be usefull to pass callables, testing environments or validation data.
Overwrite the TrainDDQN.collect_metrics() function to use your own *args.
:type *args: Any
:return: None
:rtype: NoneType, last step is saving the model as a side-effect
"""
assert self.compiled, "Model must be compiled with model.compile_model(X_train, y_train, layers) before training."
# Warmup period, fill memory with random actions
if self.progressbar:
print(f"\\033[92mCollecting data for {self.warmup_steps:_} steps... This might take a few minutes...\\033[0m")
self.warmup_driver.run(time_step=None, policy_state=self.random_policy.get_initial_state(self.train_env.batch_size))
if self.progressbar:
print(f"\\033[92m{self.replay_buffer.num_frames():_} frames collected!\\033[0m")
dataset = self.replay_buffer.as_dataset(sample_batch_size=self.batch_size, num_steps=self.n_step_update + 1,
num_parallel_calls=data.experimental.AUTOTUNE).prefetch(data.experimental.AUTOTUNE)
iterator = iter(dataset)
def _train():
experiences, _ = next(iterator)
return self.agent.train(experiences).loss
_train = common.function(_train) # Optimalization
ts = None
policy_state = self.agent.collect_policy.get_initial_state(self.train_env.batch_size)
self.collect_metrics(*args) # Initial collection for step 0
for _ in range(self.episodes):
if not self.global_episode % self.collect_every:
# Collect a few steps using collect_policy and save to `replay_buffer`
if self.collect_steps_per_episode != 0:
ts, policy_state = self.collect_driver.run(time_step=ts, policy_state=policy_state)
# Sample a batch of data from `replay_buffer` and update the agent's network
train_loss = _train()
if not self.global_episode % self.val_every:
with self.writer.as_default():
tf.summary.scalar("train_loss", train_loss, step=self.global_episode)
self.collect_metrics(*args)
def collect_metrics(self, X_val: np.ndarray, y_val: np.ndarray, save_best: str = None):
"""Collects metrics using the trained Q-network.
:param X_val: Features of validation data, same shape as X_train
:type X_val: np.ndarray
:param y_val: Labels of validation data, same shape as y_train
:type y_val: np.ndarray
:param save_best: Saving the best model of all validation runs based on given metric:
Choose one of: {Gmean, F1, Precision, Recall, TP, TN, FP, FN}
This improves stability since the model at the last episode is not guaranteed to be the best model.
:type save_best: str
"""
y_pred = network_predictions(self.agent._target_q_network, X_val)
stats = classification_metrics(y_val, y_pred)
avgQ = np.mean(decision_function(self.agent._target_q_network, X_val)) # Max action for each x in X
if save_best is not None:
if not hasattr(self, "best_score"): # If no best model yet
self.best_score = 0.0
if stats.get(save_best) >= self.best_score: # Overwrite best model
self.save_network() # Saving directly to avoid shallow copy without trained weights
self.best_score = stats.get(save_best)
with self.writer.as_default():
tf.summary.scalar("AverageQ", avgQ, step=self.global_episode) # Average Q-value for this epoch
for k, v in stats.items():
tf.summary.scalar(k, v, step=self.global_episode)
def evaluate(self,X_train,y_train, X_test, y_test):
"""
Final evaluation of trained Q-network with X_test and y_test.
Optional PR and ROC curve comparison to X_train, y_train to ensure no overfitting is taking place.
:param X_test: Features of test data, same shape as X_train
:type X_test: np.ndarray
:param y_test: Labels of test data, same shape as y_train
:type y_test: np.ndarray
:param X_train: Features of train data
:type X_train: np.ndarray
:param y_train: Labels of train data
:type y_train: np.ndarray
"""
#if hasattr(self, "best_score"):
# print(f"\\033[92mBest score: {self.best_score:6f}!\\033[0m")
# network = self.load_network(self.model_path) # Load best saved model
#else:
# network = self.agent._target_q_network # Load latest target model
#network = self.load_network(self.model_path)
#if (X_train is not None) and (y_train is not None):
# plot_pr_curve(network, X_test, y_test, X_train, y_train)
# plot_roc_curve(network, X_test, y_test, X_train, y_train)
y_pred = network_predictions(self.agent._target_q_network, X_test)
return classification_metrics(y_test, y_pred)
def get_network(self):
#network = self.load_network(self.model_path)
return self.agent._target_q_network
def save_network(self, filename_rl): #usnish
"""Saves Q-network as pickle to `model_path`."""
with open(self.filename_rl, "wb") as f: # Save Q-network as pickle
pickle.dump(self.agent._target_q_network, f)
@staticmethod
def load_network(fp: str):
"""Static method to load Q-network pickle from given filepath.
:param fp: Filepath to the saved pickle of the network
:type fp: str
:returns: The network-object loaded from a pickle file.
:rtype: tensorflow.keras.models.Model
"""
with open(fp, "rb") as f: # Load the Q-network
network = pickle.load(f)
return network
<s><s> import os
import pickle
from datetime import datetime
import numpy as np
import tensorflow as tf
from reinforcement.environments.classifierenv import ClassifierEnv
from reinforcement.metrics import (classification_metrics, decision_function,
network_predictions, plot_pr_curve, plot_roc_curve)
from reinforcement.utils import imbalance_ratio
from tensorflow import data
from tensorflow.keras.optimizers import Adam
#from tf_agents.agents.dqn.dqn_agent import DdqnAgent
from tf_agents.agents import DqnAgent
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.networks.sequential import Sequential
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.replay_buffers.tf_uniform_replay_buffer import \\
TFUniformReplayBuffer
from tf_agents.utils import common
class TrainDQN():
"""Wrapper for DDQN training, validation, saving etc."""
def __init__(self, episodes: int, warmup_steps: int, learning_rate: float, gamma: float, min_epsilon: float, decay_episodes: int,
model_path: str = None, log_dir: str = None, batch_size: int = 64, memory_length: int = None,
collect_steps_per_episode: int = 1, val_every: int = None, target_update_period: int = 1, target_update_tau: float = 1.0,
progressbar: bool = True, n_step_update: int = 1, gradient_clipping: float = 1.0, collect_every: int = 1) -> None:
"""
Wrapper to make training easier.
Code is partly based of https://www.tensorflow.org/agents/tutorials/1_dqn_tutorial
:param episodes: Number of training episodes
:type episodes: int
:param warmup_steps: Number of episodes to fill Replay Buffer with random state-action pairs before training starts
:type warmup_steps: int
:param learning_rate: Learning Rate for the Adam Optimizer
:type learning_rate: float
:param gamma: Discount factor for the Q-values
:type gamma: float
:param min_epsilon: Lowest and final value for epsilon
:type min_epsilon: float
:param decay_episodes: Amount of episodes to decay from 1 to `min_epsilon`
:type decay_episodes: int
:param model_path: Location to save the trained model
:type model_path: str
:param log_dir: Location to save the logs, usefull for TensorBoard
:type log_dir: str
:param batch_size: Number of samples in minibatch to train on each step
:type batch_size: int
:param memory_length: Maximum size of the Replay Buffer
:type memory_length: int
:param collect_steps_per_episode: Amount of data to collect for Replay Buffer each episiode
:type collect_steps_per_episode: int
:param collect_every: Step interval to collect data during training
:type collect_every: int
:param val_every: Validate the model every X episodes using the `collect_metrics()` function
:type val_every: int
:param target_update_period: Update the target Q-network every X episodes
:type target_update_period: int
:param target_update_tau: Parameter for softening the `target_update_period`
:type target_update_tau: float
:param progressbar: Enable or disable the progressbar for collecting data and training
:type progressbar: bool
:return: None
:rtype: NoneType
"""
self.episodes = episodes # Total episodes
self.warmup_steps = warmup_steps # Amount of warmup steps before training
self.batch_size = batch_size # Batch size of Replay Memory
self.collect_steps_per_episode = collect_steps_per_episode # Amount of steps to collect data each episode
self.collect_every = collect_every # Step interval to collect data during training
self.learning_rate = learning_rate # Learning Rate
self.gamma = gamma # Discount factor
self.min_epsilon = min_epsilon # Minimal chance of choosing random action
self.decay_episodes = decay_episodes # Number of episodes to decay from 1.0 to `EPSILON`
self.target_update_period = target_update_period # Period for soft updates
self.target_update_tau = target_update_tau
self.progressbar = progressbar # Enable or disable the progressbar for collecting data and training
self.n_step_update = n_step_update
self.gradient_clipping = gradient_clipping # Clip the loss
self.compiled = False
NOW = "DQN" #datetime.now().strftime("%Y%m%d_%H%M%S")
if memory_length is not None:
self.memory_length = memory_length # Max Replay Memory length
else:
self.memory_length = warmup_steps
if val_every is not None:
self.val_every = val_every # Validate the policy every `val_every` episodes
else:
self.val_every | ||
= self.episodes // min(50, self.episodes) # Can't validate the model 50 times if self.episodes < 50
if model_path is not None:
#if os.path.exists(model_path + "/" + NOW + ".pkl"):
# os.remove(model_path + "/" + NOW + " | ||
when minority class is misclassified
else: # Majority
reward = -self.imb_ratio # False Positive
if self.episode_step == self.X_train.shape[0] - 1: # If last step in data
self._episode_ended = True
self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint
if self._episode_ended:
return ts.termination(self._state, reward)
else:
return ts.transition(self._state, reward)
<s><s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
# For timeseries pyramid pdaarima module
from pmdarima.arima import auto_arima
import pmdarima as pm
import json
#Python sklearn & std libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
#from sklearn.metrics import mean_absolute_percentage_error
from sklearn.linear_model import LinearRegression
from math import sqrt
import warnings
# For serialization.
#from sklearn.externals import joblib
import pickle
import os,sys
# For ploting (mathlab)
import matplotlib.pyplot as plt
#Import eion config manager module
import logging
from sklearn import metrics
from sklearn.metrics import accuracy_score
import time
import os
import sys
# Eion arima module
class eion_arima ():
#Constructor
def __init__(self,configfile,testpercentage,sesonalityChecks,stationaryChecks): # eaobj - eion arima class object
try:
tsarima_params = configfile
self.testpercentage = testpercentage
self.start_p= int(tsarima_params['start_p'])
self.start_q= int(tsarima_params['start_q'])
self.max_p= int(tsarima_params['max_p'])
self.max_q= int(tsarima_params['max_q'])
self.max_d= int(tsarima_params['max_d'])
self.max_order= int(tsarima_params['max_order'])
self.start_Q= int(tsarima_params['start_Q'])
self.max_P= int(tsarima_params['max_P'])
self.max_D= int(tsarima_params['max_D'])
self.max_Q= int(tsarima_params['max_Q'])
self.m= int(tsarima_params['m'])
self.start_P= int(tsarima_params['start_P'])
self.seasonal= tsarima_params['seasonal']
#self.seasonal= sesonalityChecks
self.stationary=stationaryChecks
#print("self.seasonal: \\n",self.seasonal)
#print("self.stationary: \\n",self.stationary)
if self.seasonal and not self.seasonal.isspace():
if (self.seasonal.lower() == 'true'):
self.seasonal=True
elif (self.seasonal.lower() == 'false'):
self.seasonal=False
else:
self.seasonal=True
else:
self.seasonal=True
self.d= int(tsarima_params['d'])
self.D= int(tsarima_params['D'])
#self.trace= tsarima_params['trace']
self.error_action= tsarima_params['error_action']
self.suppress_warnings= tsarima_params['suppress_warnings']
self.stepwise= tsarima_params['stepwise']
#self.random= tsarima_params['random']
self.log = logging.getLogger('eion')
except Exception as inst:
self.log.info('<!------------- Arima INIT Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def mean_absolute_percentage_error(self,y_true, y_pred):
try:
y_true, y_pred=np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true+sys.float_info.epsilon)) * 100
except Exception as inst:
self.log.info('<------------- mean_absolute_percentage_error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def eion_arima(self,train_data):
try:
start = time.time()
auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=self.seasonal,stationary=self.stationary,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,stepwise=self.stepwise)
#auto_arima_stepwise_fit = pm.auto_arima(train_data, start_p=self.start_p, start_q=self.start_q,max_p=self.max_p, max_q=self.max_q,max_d=self.max_d,max_P=self.max_P,max_D=self.max_D,max_Q=self.max_Q,max_order=self.max_order, m=self.m,start_P=self.start_P,start_Q=self.start_Q, seasonal=True,stationary=True,d=self.d, D=self.D,error_action=self.error_action,suppress_warnings=self.suppress_warnings,random_state=20,stepwise=True)
aic_score = auto_arima_stepwise_fit.aic()
self.log.info('------->AIC Score: '+str(aic_score))
self.log.info('\\n--------- Fit Summary --------------')
self.log.info (auto_arima_stepwise_fit.summary())
self.log.info('--------- Fit Summary End--------------\\n')
self.log.info("\\n--------------- Modal Validation Start ---------------")
size = int(len(train_data) * (100 - self.testpercentage)/100)
train = train_data.loc[0:size]
valid = train_data.loc[size:len(train_data)]
# valid_perc=((100-self.testpercentage)/100)
# valid_perc=round(valid_perc, 1)
# print("valid_perc: \\n", valid_perc)
self.log.info("------->Train Data Shape: "+str(train.shape))
self.log.info("------->Valid Data Shape"+str(valid.shape))
start1=len(train)
end1=len(train_data)
modelfit = auto_arima_stepwise_fit.fit(train)
a_prediction = auto_arima_stepwise_fit.predict(valid.shape[0])
#a_prediction = auto_arima_stepwise_fit.predict(n_periods=len(valid))
#a_prediction = auto_arima_stepwise_fit.predict(start=start1,end=end1)
#print("a_prediction: \\n",a_prediction)
#self.log.info(a_prediction)
mae = metrics.mean_absolute_error(valid, a_prediction)
self.log.info ("------->MAE: "+str(mae))
mape = self.mean_absolute_percentage_error(valid, a_prediction)
#mape=np.mean(np.abs((valid - a_prediction) / valid)) * 100
self.log.info ("------->MAPE :"+str(mape))
#RMSE
rmse = sqrt(mean_squared_error(valid,a_prediction))
mse = mean_squared_error(valid,a_prediction)
self.log.info ("------->RMSE :"+str(rmse))
self.log.info ("------->MSE :"+str(mse))
from sklearn.metrics import r2_score
r2 = r2_score(valid,a_prediction)
########### End ####################
# now we have the model
auto_arima_stepwise_fit.fit(train_data)
self.log.info("------------- Validate Model End----------------\\n")
executionTime=time.time() - start
self.log.info('-------> Time: '+str(executionTime)+'\\n')
return auto_arima_stepwise_fit,mae,rmse,mse,r2,aic_score,mape,valid,a_prediction
except Exception as inst:
self.log.info('<!------------- Arima Execute Error ---------------> '+str(inst))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import json
#Python sklearn & std libraries
import numpy as np
import pandas as pd
from time_series.ts_arima_eion import eion_arima
from statsmodels.tsa.vector_ar.vecm import coint_johansen
from statsmodels.tsa.vector_ar.var_model import VAR
from math import *
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from math import sqrt
import logging
import os
import sys
import time
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from pandas import read_csv
from statsmodels.tsa.stattools import adfuller
import pmdarima as pm
from statsmodels.tsa.stattools import grangercausalitytests
from statsmodels.stats.stattools import durbin_watson
from sklearn.utils import check_array
class timeseriesModelTests():
def __init__(self,data,targetFeature,datetimeFeature,count):
#self.tsConfig = tsConfig
#self.modelconfig = modelconfig
#self.modelList = modelList
self.data = data
self.targetFeature = targetFeature
self.dateTimeFeature = datetimeFeature
self.count=count
self.log = logging.getLogger('eion')
def StatinaryChecks(self,dictDiffCount):
self.log.info("\\n---------------Start Stationary Checks-----------")
tFeature = self.targetFeature.split(',')
tFeature.append(self.dateTimeFeature)
self.data=self.data[tFeature]
tFeature.remove(self.dateTimeFeature)
lengthtFeature=len(tFeature)
diffCount=0
try :
for features in (tFeature):
XSt = self.data[features]
XSt=XSt.values
resultSt = adfuller(XSt,autolag='AIC')
stationaryFlag = False
#print(resultSt)
self.log.info('-------> Features: '+str(features))
self.log.info('----------> ADF Statistic: '+str(resultSt[0]))
self.log.info('----------> p-value: %f' % resultSt[1])
if resultSt[1]<= 0.05:
self.log.info("-------------> Converted As Stationary Data")
stationaryFlag = True
else:
self.log.info("-------------> Stationary Conversion Required")
stationaryFlag = False
self.log.info('----------> Critical Values')
for key, value in resultSt[4].items():
self.log.info('----------> '+str(key)+': '+str(value))
if stationaryFlag == False:
self.data[features]=self.data[features].diff()
self.data=self.data.dropna()
dictDiffCount[features]=1
XStt = self.data[features]
XStt=XStt.values
resultStt = adfuller(XStt)
if resultStt[1] > 0.05 | ||
:
self.data[features]=self.data[features].diff()
self.data=self.data.dropna()
dictDiffCount[features]=2
XSttt = self.data[features]
XSttt=XSttt.values
resultSttt = adfuller(XSttt)
if resultSttt[1]<= 0.05:
stationaryFlag = True
else:
stationaryFlag = True
self.log.info("------------->"+str(dictDiffCount))
if stationaryFlag == True:
self.log.info("----------> Equals to Stationary Data")
else:
self.log.info("----------> Not Equal To Stationary Data")
self.log.info("-------> Stationary data diff()")
self.log.info(dictDiffCount)
self.log.info("---------------Start Stationary Checks Ends-----------\\n")
return self.data,dictDiffCount
except Exception as inst:
self.log.info('<!------------- Time Series Stationary Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def varTimeseriesModelTests(self,data):
try :
tFeature = self.targetFeature.split(',')
self.log.info("\\n--------- Start Granger Causality Test Results ------------")
gtest=grangercausalitytests(data[tFeature], maxlag=15, addconst=True, verbose=True)
self.log.info("-------> GrangerCausalitytest Results "+str(gtest.values()))
self.log.info("--------- End Granger Causality Test Results ------------\\n")
return gtest
except Exception as inst:
self.log.info('<!------------- Time Series Granger Causality testTest Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
def grangersCausationMatrix(self,data, variables, test='ssr_chi2test', verbose=False):
try :
countVariables=0
self.log.info(len(variables))
self.log.info("\\n--------------Start GrangersCausationMatrix---------------")
df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables)
for c in df.columns:
for r in df.index:
test_result = grangercausalitytests(data[[r, c]], maxlag=12, verbose=False)
p_values = [round(test_result[i+1][0][test][1],4) for i in range(12)]
if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}')
min_p_value = np.min(p_values)
df.loc[r, c] = min_p_value
df.columns = [var + '_x' for var in variables]
df.index = [var + '_y' for var in variables]
self.log.info(df)
for i in range(len(variables)):
for j in range(len(variables)):
if i!=j and df.iloc[i][j]<0.05 and df.iloc[i][j]<0.05:
countVariables=countVariables+1
self.log.info("--------------End GrangersCausationMatrix---------------\\n")
return df,countVariables
except Exception as inst:
self.log.info('<!------------- Time Series grangersCausationMatrix Test Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
return df,countVariables
def coIntegrationTest(self,data):
try :
tdata = data.drop([self.dateTimeFeature], axis=1)
tdata.index = data[self.dateTimeFeature]
cols = tdata.columns
self.log.info("\\n-------------- Start of the Co Integration test ---------------")
lenTargetFeature=len(self.targetFeature)
countIntegrationFeature=0
N, l = tdata.shape
jres = coint_johansen(tdata, 0, 1)
trstat = jres.lr1
tsignf = jres.cvt
for i in range(l):
if trstat[i] > tsignf[i, 1]:
r = i + 1
jres.r = r
jres.evecr = jres.evec[:, :r]
jres.r = r
countIntegrationFeature=jres.r
jres.evecr = jres.evec[:, :r]
self.log.info('------->coint_johansen trace statistics: '+str(trstat))
self.log.info('------->coint_johansen critical values:')
self.log.info(tsignf)
self.log.info("------->There are "+str(countIntegrationFeature)+" Co-Integration vectors")
self.log.info("-------------- End of the Co Integration test ---------------\\n")
return countIntegrationFeature
except Exception as inst:
self.log.info('<!------------- Time Series Co-Integration Test Error ---------------> ')
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)
self.log.info(str(exc_type)+' '+str(fname)+' '+str(exc_tb.tb_lineno))
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
# import os
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import math
from sklearn.metrics import mean_squared_error
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LSTM
import logging
# import kerastuner
import keras_tuner
#from keras_tuner.engine.hyperparameters import HyperParameters
from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband
import warnings
warnings.simplefilter("ignore", UserWarning)
# from keras.models import load_model
# from tensorflow.keras.optimizers import SGD
# from tensorflow.keras.utils import load_model
from tensorflow.keras.models import load_model
class timeseriesDLUnivariate:
def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature,modelName):
self.look_back=None
#Preprocessed dataframe
# self.df=df
self.savedmodelname=None
self.deploy_location=None
self.epochs=None
self.batch_size=None
self.hidden_layers=None
self.optimizer=None
self.activation_fn=None
self.loss_fn=None
self.first_layer=None
self.dropout=None
self.model_name=None
self.hpt_train=None
##Below is model type (MLP or lstm)
self.model_type=modelName
#self.dataFolderLocation=str(dataFolderLocation)
##Added for ts hpt
self.tuner_algorithm=""
self.dl_params = configfile
# self.data=data
self.targetFeature=targetFeature
self.dateTimeFeature=dateTimeFeature
self.testpercentage = testpercentage
self.log = logging.getLogger('eion')
#To extract dict key,values
def extract_params(self,dict):
self.dict=dict
for k,v in self.dict.items():
return k,v
##Get deep learning model hyperparameter from advanced config
def getdlparams(self):
val=self.dl_params
self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>')
self.log.info(" "+str(val))
for k,v in val.items():
try:
if (k == "tuner_algorithm"):
self.tuner_algorithm=str(v)
elif (k == "activation"):
self.activation_fn=str(v)
elif (k == "optimizer"):
self.optimizer=str(v)
elif (k == "loss"):
self.loss_fn=str(v)
elif (k == "first_layer"):
if not isinstance(k,list):
self.first_layer=str(v).split(',')
else:
self.first_layer=k
elif (k == "lag_order"):
if isinstance(k,list):
k = ''.join(v)
k=int(float(str(v)))
else:
self.look_back=int(float(str(v)))
elif (k == "hidden_layers"):
self.hidden_layers=int(v)
elif (k == "dropout"):
if not isinstance(k,list):
self.dropout=str(v).split(',')
else:
self.dropout=k
elif (k == "batch_size"):
self.batch_size=int(v)
elif (k == "epochs"):
self.epochs=int(v)
elif (k == "model_name"):
self.model_name=str(v)
except Exception as e:
self.log.info('Exception occured in deeep learn param reading, setting up default params.')
self.activation_fn="relu"
self.optimizer="adam"
self.loss_fn="mean_squared_error"
self.first_layer=[8,512]
self.hidden_layers=1
self.look_back=int(2)
self.dropout=[0.1,0.5]
self.batch_size=2
self.epochs=50
self.model_name="lstmmodel.h5"
continue
## Just use this if user need to create dataframe from input data.
def createdf(self,df):
target=""
# splitting reframed to X and Y considering the first column to be out target featureX=reframed.drop(['var1(t)'],axis=1)
X=df.drop([target],axis=1)
Y=df[target]
X_values=X.values
Y_values=Y.values
n_predict=len(Y_values)
train_X,train_Y = X_values[:(X_values.shape[0]-n_predict),:],Y_values[:(X_values.shape[0]-n_predict)]
test_X,test_Y = X_values[(X_values.shape[0]-n_predict):,:],Y_values[(X_values.shape[0]-n_predict):]
#reshaping train and test to feed to LSTM
train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1]))
return train_X,train_Y,test_X,test_Y
# convert an array of values into a dataset matrix
def numpydf(self,dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
# x,y=numpy.array(dataX), numpy.array(dataY)
return np.array(dataX), np.array(dataY)
def model_save(self,model):
import os.path
savedmodelname=self.model_name
path = os.path.join(self.deploy_location,savedmodelname)
model.save(path)
return (savedmodelname)
## MLP model buid
def mlpDL(self,df):
self.log.info("MLP timeseries learning starts.....")
try:
self.getdlparams()
# look_back = self.look_back
dataset = df.values
dataset = dataset.astype('float32')
##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags.
##number of lag calculated just for reference ,not used now.
#Dont delete this, just use in future.
from statsmodels.tsa.stattools import kpss
statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature])
self.log.info("Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \\t"+str(n_lags))
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len( | ||
dataset) * 0.80)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
self.hpt_train=train
tuner_alg=self.tuner_algorithm
try:
## Remove untitled_project dir in AION root folder created by previous tuner search run
import shutil
shutil.rmtree(r".\\untitled_project")
except:
pass
if (tuner_alg.lower()=="randomsearch"):
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="bayesianoptimization"):
tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="hyperband"):
tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3)
# tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis]))
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
try:
tuner.search(x=train,y=train,validation_data=(test,test),callbacks=[stop_early])
except:
tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early])
# best_model=tuner.get_best_models(num_models=1)[0]
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
best_first_layer=best_hps.get('units')
best_dropout=best_hps.get('Dropout_rate')
best_learning_rate=float(best_hps.get('learning_rate'))
self.log.info("best hyperparameter values for mlp: \\n"+str(best_hps.values))
look_back = 1 ## Because univariate problemtype
trainX, trainY = self.numpydf(train, look_back)
testX, testY = self.numpydf(test, look_back)
best_hmodel=tuner.hypermodel.build(best_hps)
##Added for mlp issue,because tuner build also need to compile.
try:
best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer)
except:
pass
model_fit = best_hmodel.fit(trainX, trainY, epochs=self.epochs, batch_size=self.batch_size, verbose=2)
val_acc_per_epoch = model_fit.history['loss']
best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1
self.log.info("MLP best epochs value:\\n"+str(best_epoch))
trainScore = best_hmodel.evaluate(trainX, trainY, verbose=0)
testScore = best_hmodel.evaluate(testX, testY, verbose=0)
#Scoring values for the model
mse_eval=testScore
try:
#If mse_eval is list of values
min_v=min(mse_eval)
except:
#If mse_eval is single value
min_v=mse_eval
rmse_eval = math.sqrt(min_v)
# generate predictions for training
trainPredict = best_hmodel.predict(trainX)
#print(testX)
testPredict = best_hmodel.predict(testX)
#print(testPredict)
# invert predictions, because we used mimanmax scaler
trainY = scaler.inverse_transform([trainY])
trainPredict = scaler.inverse_transform(trainPredict)
## For test data
testY = scaler.inverse_transform([testY])
testPredict = scaler.inverse_transform(testPredict)
## Creating dataframe for actual,predictions
predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred'])
actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual'])
df_predicted=pd.concat([actual,predictions],axis=1)
#print(df_predicted)
from math import sqrt
from sklearn.metrics import mean_squared_error
try:
mse_mlp = mean_squared_error(testY.T,testPredict)
rmse_mlp=sqrt(mse_mlp)
self.log.info('mse_mlp: '+str(mse_mlp))
self.log.info('rmse_mlp: '+str(rmse_mlp))
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
r2 = r2_score(testY.T,testPredict)
mae = mean_absolute_error(testY.T,testPredict)
self.log.info('r2_mlp: '+str(r2))
self.log.info('mae_mlp: '+str(mae))
except Exception as e:
import traceback
self.log.info("MLP dataframe creation error traceback: \\n"+str(traceback.print_exc()))
self.log.info(e)
# df_predicted.to_csv('mlp_prediction.csv')
except Exception as e:
self.log.info("MLP timeseries model traceback error msg e: "+str(e))
self.log.info("MLP training successfully completed.\\n")
return mse_mlp,rmse_mlp,r2,mae,best_hmodel,df_predicted,look_back,scaler
## Added function for hyperparam tuning (TFSTask:7033)
def build_model(self,hp):
try:
loss=self.loss_fn
optimizer=self.optimizer
try:
if optimizer.lower() == "adam":
optimizer=tf.keras.optimizers.Adam
elif(optimizer.lower() == "adadelta"):
optimizer=tf.keras.optimizers.experimental.Adadelta
elif(optimizer.lower() == "nadam"):
optimizer=tf.keras.optimizers.experimental.Nadam
elif(optimizer.lower() == "adagrad"):
optimizer=tf.keras.optimizers.experimental.Adagrad
elif(optimizer.lower() == "adamax"):
optimizer=tf.keras.optimizers.experimental.Adamax
elif(optimizer.lower() == "rmsprop"):
optimizer=tf.keras.optimizers.experimental.RMSprop
elif(optimizer.lower() == "sgd"):
optimizer=tf.keras.optimizers.experimental.SGD
else:
optimizer=tf.keras.optimizers.Adam
except:
optimizer=tf.keras.optimizers.Adam
pass
first_layer_min=round(int(self.first_layer[0]))
first_layer_max=round(int(self.first_layer[1]))
dropout_min=float(self.dropout[0])
dropout_max=float(self.dropout[1])
model=tf.keras.Sequential()
if (self.model_type.lower() == 'lstm'):
model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.hpt_train.shape[1]),
activation=hp.Choice('dense_activation',values=['relu'])))
elif (self.model_type.lower() == 'mlp'):
# model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(hp.Int('time_steps',min_value=look_back_min,max_value=look_back_max,step=1)),
# activation='relu'))
##input_dim is 1 because mlp is for univariate.
model.add(Dense(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_dim=(1),activation='relu'))
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1)))
model.add(Dense(units=1))
model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[loss])
except Exception as e:
import traceback
self.log.info("lstm errorbuild_model traceback: \\n"+str(traceback.print_exc()))
return model
##LSTM timeseries function call
def ts_lstm(self,df):
self.log.info("lstm network model learning starts.....\\n")
try:
self.getdlparams()
dataset = df.values
dataset = dataset.astype('float32')
##The below Kwiatkowski-Phillips-Schmidt-Shin (kpss) statsmodel lib used for stationary check as well getting number of lags.
##number of lag calculated just for reference ,not used now.
#Dont delete this, just use in future.
from statsmodels.tsa.stattools import kpss
statistic, p_value, n_lags, critical_values = kpss(df[self.targetFeature])
self.log.info("Based on kpss statsmodel, lag order (time steps to calculate next prediction) is: \\t"+str(n_lags))
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# split into train and test sets
train_size = int(len(dataset) * 0.80)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
self.hpt_train=train
tuner_alg=self.tuner_algorithm
try:
## Remove untitled_project dir in AION root folder created by previous tuner search run
import shutil
shutil.rmtree(r".\\untitled_project")
except:
pass
if (tuner_alg.lower()=="randomsearch"):
tuner=RandomSearch(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="bayesianoptimization"):
tuner=BayesianOptimization(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_trials=5,executions_per_trial=3)
elif (tuner_alg.lower()=="hyperband"):
tuner=Hyperband(self.build_model,keras_tuner.Objective("val_loss", direction="min"),max_epochs=50,factor=3)
# tuner.search(X[...,np.new_axis],y,epochs=2,validation_data=(y[...,np.newaxis]))
from keras.callbacks import EarlyStopping
stop_early = EarlyStopping(monitor='val_loss', patience=5)
##Need both x and y with same dimention.
tuner.search(x=train,y=train,validation_split=0.2,callbacks=[stop_early])
# tuner.search(x=train,y=test,validation_data=(test,test),callbacks=[stop_early])
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
best_time_steps=self.look_back
self.log.info("best lag order or lookback (time_steps) for LSTM: \\n"+str(best_time_steps))
self.log.info("best hyperparameter values for LSTM: \\n"+str(best_hps.values))
look_back = best_time_steps
trainX, trainY = self.numpydf(train, look_back)
testX, testY = self.numpydf(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], trainX.shape[1], 1))
testX = np.reshape(testX, (testX.shape[0], testX.shape[1], 1))
#create and fit the LSTM network
best_hmodel=tuner.hypermodel.build(best_hps)
try:
best_hmodel.compile(loss=self.loss_fn, optimizer=self.optimizer)
except:
pass
model_fit = best_hmodel.fit(trainX, trainY, validation_split=0.2, epochs=self.epochs, batch_size=self.batch_size, verbose=2)
val_acc_per_epoch = model_fit.history['loss']
best_epoch = val_acc_per_epoch.index(min(val_acc_per_epoch)) + 1
self.log.info("best epochs value:\\n"+str(best_epoch))
# best_hmodel=tuner.hypermodel.build(best_hps)
# best_hmodel.fit(x=trainX,y=trainY,validation_split=0.2,epochs=best_epoch)
##Using model_evaluate,calculate mse
# mse_eval = model.evaluate(testX, testY, verbose=0)
mse_eval = best_hmodel.evaluate(testX, testY, verbose=0)
try:
#If mse_eval is list of values
min_v=min(mse_eval)
except:
#If mse_eval is single value
min_v=mse_eval
rmse_eval=math.sqrt(min_v)
# self.log.info('LSTM mse:'+str(mse_eval))
# self.log.info('LSTM rmse:'+str(rmse_eval))
# lstm time series predictions
trainPredict = best_hmodel.predict(trainX)
testPredict = best_hmodel.predict(testX)
# invert predictions, because we used mim=nmax scaler
trainY = scaler.inverse_transform([trainY])
trainPredict = scaler.inverse_transform(trainPredict)
testY = scaler.inverse_transform([testY])
testPredict = scaler.inverse_transform(testPredict)
## Creating dataframe for actual,predictions
predictions = pd.DataFrame(testPredict, columns=[self.targetFeature+'_pred'])
actual = pd.DataFrame(testY.T, columns=[self.targetFeature+'_actual'])
df_predicted=pd.concat([actual, | ||
predictions],axis=1)
from math import sqrt
from sklearn.metrics import mean_squared_error
try:
mse_lstm=None
mse_lstm = mean_squared_error(testY.T,testPredict)
rmse_lstm=sqrt(mse_lstm)
self.log.info("mse_lstm: "+str(mse_lstm))
self.log.info("rmse_lstm: "+str(rmse_lstm))
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
r2 = r2_score(testY.T,testPredict)
mae = mean_absolute_error(testY.T,testPredict)
self.log.info('r2_lstm: '+str(r2))
self.log.info('mae_lstm: '+str(mae))
except Exception as e:
self.log.info("lstm error loss fns"+str(e))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
except Exception as e:
import traceback
self.log.info("lstm training error traceback: \\n"+str(traceback.print_exc()))
return 'Error',0,0,0,0,None,pd.DataFrame(),0,None
return 'Success',mse_lstm,rmse_lstm,r2,mae,best_hmodel,df_predicted,look_back,scaler
if __name__ == '__main__':
print('Inside timeseriesDLUnivariate main....\\n')
# tsdl_obj = timeseriesDLUnivariate()
## for testing purpose
'''
df1= pd.read_csv(r"C:\\aiontest\\testPrograms\\Data\\energydemand.csv",encoding='utf-8', engine='python')
dateTimeFeature = "utcTimeStamp"
targetFeature="temperature"
try:
df1[dateTimeFeature] = pd.to_datetime(df1[dateTimeFeature]) #, format = '%d/%m/%Y %H.%M')
except:
pass
tdata = df1.drop([dateTimeFeature], axis=1)
tdata.index = df1[dateTimeFeature]
tdata = pd.DataFrame(tdata[targetFeature])
cols = tdata.columns
mse,rmse,model = tsdl_obj.mlpDL(tdata)
lmse,lrmse,lstmmodel = tsdl_obj.ts_lstm(tdata)
print("mlp mse: \\n",mse)
print("mlp rmse: \\n",rmse)
print("lstm mse: \\n",lmse)
print("lstm rmse: \\n",lrmse)
savedmodelname=tsdl_obj.model_save(lstmmodel)
'''
<s> '''
*
* =============================================================================
* COPYRIGHT NOTICE
* =============================================================================
* @ Copyright HCL Technologies Ltd. 2021, 2022,2023
* Proprietary and confidential. All information contained herein is, and
* remains the property of HCL Technologies Limited. Copying or reproducing the
* contents of this file, via any medium is strictly prohibited unless prior
* written permission is obtained from HCL Technologies Limited.
*
'''
import pandas as pd
import os
import numpy as np
import numpy
import pandas
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
import logging
import tensorflow as tf
from tensorflow.keras.layers import Dropout
import math
import tensorflow as tf
import keras_tuner
#from keras_tuner.engine.hyperparameters import HyperParameters
from keras_tuner.tuners import RandomSearch,BayesianOptimization ,Hyperband
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
import warnings
warnings.simplefilter("ignore", UserWarning)
class timeseriesDLMultivariate:
def __init__(self,configfile,testpercentage,targetFeature,dateTimeFeature):
self.look_back=None
# self.df=df
self.epochs=None
self.batch_size=None
self.hidden_layers=None
self.optimizer=None
self.activation_fn="relu"
self.loss_fn=None
self.first_layer=None
self.dropout=None
self.model_name=None
self.dl_params = configfile
# self.data=data
self.targetFeature=targetFeature
self.dateTimeFeature=dateTimeFeature
self.testpercentage = float(testpercentage)
self.log = logging.getLogger('eion')
##Added for ts hpt (TFSTask:7033)
self.tuner_algorithm=""
self.num_features=0
##Get deep learning model hyperparameter from advanced config
def getdlparams(self):
val=self.dl_params
self.log.info('-------> The given mlp/lstm timeseries algorithm parameters:>>')
self.log.info(" "+str(val))
for k,v in val.items():
try:
if (k == "tuner_algorithm"):
self.tuner_algorithm=str(v)
elif (k == "activation"):
self.activation_fn=str(v)
elif (k == "optimizer"):
self.optimizer=str(v)
elif (k == "loss"):
self.loss_fn=str(v)
elif (k == "first_layer"):
if not isinstance(k,list):
self.first_layer=str(v).split(',')
else:
self.first_layer=k
elif (k == "lag_order"):
if isinstance(k,list):
k = ''.join(v)
k=int(float(str(v)))
else:
self.look_back=int(float(str(v)))
elif (k == "hidden_layers"):
self.hidden_layers=int(v)
elif (k == "dropout"):
if not isinstance(k,list):
self.dropout=str(v).split(',')
else:
self.dropout=k
elif (k == "batch_size"):
self.batch_size=int(v)
elif (k == "epochs"):
self.epochs=int(v)
elif (k == "model_name"):
self.model_name=str(v)
except Exception as e:
self.log.info('Exception occured in deeep learn param reading, setting up default params.')
self.activation_fn="relu"
self.optimizer="adam"
self.loss_fn="mean_squared_error"
self.first_layer=[8,512]
self.hidden_layers=1
self.look_back=int(2)
self.dropout=[0.1,0.5]
self.batch_size=2
self.epochs=50
self.model_name="lstmmodel.h5"
continue
# Reshape the data to the required input shape of the LSTM model
def create_dataset(self,X, y, n_steps):
Xs, ys = [], []
for i in range(len(X) - n_steps):
v = X.iloc[i:(i + n_steps)].values
Xs.append(v)
ys.append(y.iloc[i + n_steps])
return np.array(Xs), np.array(ys)
## Added function for hyperparam tuning (TFSTask:7033)
def build_model(self,hp):
n_features = len(self.targetFeature)
try:
loss=self.loss_fn
optimizer=self.optimizer
# self.getdlparams()
try:
if optimizer.lower() == "adam":
optimizer=tensorflow.keras.optimizers.Adam
elif(optimizer.lower() == "adadelta"):
optimizer=tensorflow.keras.optimizers.experimental.Adadelta
elif(optimizer.lower() == "nadam"):
optimizer=tensorflow.keras.optimizers.experimental.Nadam
elif(optimizer.lower() == "adagrad"):
optimizer=tensorflow.keras.optimizers.experimental.Adagrad
elif(optimizer.lower() == "adamax"):
optimizer=tensorflow.keras.optimizers.experimental.Adamax
elif(optimizer.lower() == "rmsprop"):
optimizer=tensorflow.keras.optimizers.experimental.RMSprop
elif(optimizer.lower() == "sgd"):
optimizer=tensorflow.keras.optimizers.experimental.SGD
else:
optimizer=tensorflow.keras.optimizers.Adam
except:
optimizer=tf.keras.optimizers.Adam
pass
# look_back_min=int(self.look_back[0])
# look_back_max=int(self.look_back[1])
first_layer_min=round(int(self.first_layer[0]))
first_layer_max=round(int(self.first_layer[1]))
dropout_min=float(self.dropout[0])
dropout_max=float(self.dropout[1])
model=tf.keras.Sequential()
try:
model.add(LSTM(units=hp.Int('units',min_value=first_layer_min,max_value=first_layer_max,step=16),input_shape=(self.look_back,self.num_features)))
except Exception as e:
import traceback
self.log.info("lstm build traceback: \\n"+str(traceback.print_exc()))
return model
model.add(Dropout(hp.Float('Dropout_rate',min_value=dropout_min,max_value=dropout_max,step=0.1)))
model.add(Dense(units=n_features))
model.compile(optimizer=optimizer(hp.Choice('learning_rate',values=[1e-1,1e-2,1e-3,1e-4])),loss=loss,metrics=[self.loss_fn])
except Exception as e:
self.log.info(",Hyperparam tuning build_model err msg: \\n"+ str(e))
return model
##Multivariate lstm prediction function (lstm model, train, prediction, metrics)
def lstm_multivariate(self,df):
try:
self.getdlparams()
n_features = len(self.targetFeature)
self.num_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.