|
|
class ImageMulticlassClassification: |
|
|
def __init__(self, imgWidth=300, imgHeight=300, batchSize=32): |
|
|
from time import time |
|
|
import tensorflow as tf |
|
|
import matplotlib.pyplot as plt |
|
|
import pathlib |
|
|
import datetime |
|
|
from sklearn.metrics import roc_curve, auc, roc_auc_score |
|
|
import os |
|
|
import keras |
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
import tarfile |
|
|
import sklearn |
|
|
|
|
|
self.time = time |
|
|
self.sklearn = sklearn |
|
|
self.tf = tf |
|
|
self.plt = plt |
|
|
self.pathlib = pathlib |
|
|
self.datetime = datetime |
|
|
self.roc_curve = roc_curve |
|
|
self.roc_auc_score = roc_auc_score |
|
|
self.auc = auc |
|
|
self.os = os |
|
|
self.keras = keras |
|
|
self.np = np |
|
|
self.AUTOTUNE = tf.data.AUTOTUNE |
|
|
self.pd = pd |
|
|
self.tarfile = tarfile |
|
|
|
|
|
self.imgWidth = imgWidth |
|
|
self.imgHeight = imgHeight |
|
|
self.numGPU = len(self.tf.config.list_physical_devices('GPU')) |
|
|
if self.numGPU > 0: |
|
|
self.batchSize = batchSize * self.numGPU |
|
|
else: |
|
|
self.batchSize = batchSize |
|
|
self.Model = None |
|
|
self.time_callback = None |
|
|
self.history = None |
|
|
self.confusionMatrix = None |
|
|
self.validation_label = None |
|
|
self.trainDataset = None |
|
|
self.validationDataset = None |
|
|
self.accuracy = None |
|
|
self.recall = None |
|
|
self.precision = None |
|
|
self.f1Score = None |
|
|
self.modelName = "" |
|
|
|
|
|
|
|
|
def data_MakeDataset(self, datasetUrl=None, datasetPath=None, datasetDirectoryName="Dataset Covid19 Training", ratioValidation=0.2): |
|
|
""" |
|
|
Purpose: |
|
|
- Make dataset from parameter |
|
|
|
|
|
Parameter: |
|
|
- datasetUrl: url of dataset |
|
|
- type: string |
|
|
- example: "https://storage.googleapis.com/fdataset/Dataset%20Covid19%20Training.tgz" |
|
|
- datasetPath: path of dataset |
|
|
- type: string |
|
|
- example: "C:/Users/User/Desktop/Dataset Covid19 Training.tgz" |
|
|
- datasetDirectoryName: name of dataset directory |
|
|
- type: string |
|
|
- example: "Dataset Covid19 Training" |
|
|
- ratioValidation: ratio of validation data |
|
|
- type: float |
|
|
- example: 0.2 |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
if datasetUrl is not None: |
|
|
dataset_url = datasetUrl |
|
|
data_dir = self.tf.keras.utils.get_file(datasetDirectoryName, origin=dataset_url, untar=True) |
|
|
data_dir = self.pathlib.Path(data_dir) |
|
|
elif datasetPath is not None: |
|
|
currentPath = self.os.getcwd() |
|
|
if self.os.path.exists(currentPath + "/" + datasetDirectoryName): |
|
|
|
|
|
self.os.system("rm -rf " + currentPath + "/" + datasetDirectoryName) |
|
|
|
|
|
my_tar = self.tarfile.open(datasetPath) |
|
|
|
|
|
my_tar.extractall(currentPath) |
|
|
my_tar.close() |
|
|
data_dir = self.pathlib.Path(f'{currentPath}/{datasetDirectoryName}/') |
|
|
|
|
|
image_count = len(list(data_dir.glob('*/*.jpg'))) |
|
|
|
|
|
train_ds = self.tf.keras.preprocessing.image_dataset_from_directory( |
|
|
data_dir, |
|
|
seed=123, |
|
|
subset="training", |
|
|
validation_split=ratioValidation, |
|
|
image_size=(self.imgWidth, self.imgHeight), |
|
|
batch_size=self.batchSize) |
|
|
|
|
|
val_ds = self.tf.keras.preprocessing.image_dataset_from_directory( |
|
|
data_dir, |
|
|
seed=123, |
|
|
subset="validation", |
|
|
validation_split=ratioValidation, |
|
|
image_size=(self.imgWidth, self.imgHeight), |
|
|
batch_size=self.batchSize) |
|
|
|
|
|
self.trainDataset = train_ds.cache().shuffle(1000).prefetch(buffer_size=self.AUTOTUNE) |
|
|
self.validationDataset = val_ds.cache().prefetch(buffer_size=self.AUTOTUNE) |
|
|
|
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def data_PreprocessingDataset(self, typeRandomFlip="horizontal_and_vertical", RandomRotation=0.3, RandomZoom=0.2, shuffleTrainDataset=True, augmentTrainDataset=True): |
|
|
""" |
|
|
Purpose: |
|
|
- Preprocessing dataset |
|
|
|
|
|
Parameter: |
|
|
- typeRandomFlip: type of random flip |
|
|
- type: string |
|
|
- example: "horizontal_and_vertical" |
|
|
- options: "horizontal", "vertical", "horizontal_and_vertical" |
|
|
- RandomRotation: random rotation |
|
|
- type: float |
|
|
- example: 0.3 |
|
|
- RandomZoom: random zoom |
|
|
- type: float |
|
|
- example: 0.2 |
|
|
- shuffleTrainDataset: shuffle train dataset |
|
|
- type: bool |
|
|
- example: True |
|
|
- augmentTrainDataset: augment train dataset |
|
|
- type: bool |
|
|
- example: True |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
rescale = self.tf.keras.layers.Rescaling(1.0 / 255, input_shape=(self.imgWidth, self.imgHeight, 3)) |
|
|
|
|
|
data_augmentation = self.tf.keras.Sequential( |
|
|
[ |
|
|
self.tf.keras.layers.RandomFlip(typeRandomFlip, input_shape=(self.imgWidth,self.imgHeight,3)), |
|
|
self.tf.keras.layers.RandomRotation(RandomRotation), |
|
|
self.tf.keras.layers.RandomZoom(RandomZoom), |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
def prepare(ds, shuffle=False, augment=False): |
|
|
|
|
|
ds = ds.map(lambda x, y: (rescale(x), y), num_parallel_calls=self.AUTOTUNE) |
|
|
|
|
|
if shuffle: |
|
|
ds = ds.shuffle(1024) |
|
|
|
|
|
|
|
|
if augment: |
|
|
ds = ds.map(lambda x, y: (data_augmentation(x), y), num_parallel_calls=self.AUTOTUNE,) |
|
|
|
|
|
|
|
|
return ds.prefetch(buffer_size=self.AUTOTUNE) |
|
|
|
|
|
self.trainDataset = prepare(self.trainDataset, shuffle=shuffleTrainDataset, augment=augmentTrainDataset) |
|
|
self.validationDataset = prepare(self.validationDataset) |
|
|
|
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def data_GetLabelFromDataset(self, dataset): |
|
|
""" |
|
|
Purpose: |
|
|
- Get label from dataset |
|
|
|
|
|
Parameter: |
|
|
- dataset: dataset |
|
|
- type: tf.data.Dataset |
|
|
- example: trainDataset |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success", "label":array([0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, |
|
|
0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, |
|
|
1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, |
|
|
1, 1, 0, 0, 0, 0, 0, 0], dtype=int32)} |
|
|
""" |
|
|
try: |
|
|
label = self.np.concatenate([y for x, y in dataset], axis=0) |
|
|
return {"success":True, "code":200, "detail":"success", "label":label} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def model_make(self, model=None): |
|
|
""" |
|
|
Purpose: |
|
|
- Make default model |
|
|
|
|
|
Parameter: |
|
|
- model: model |
|
|
- type: tf.keras.Model |
|
|
- example: model |
|
|
- default: None |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success", "model":model} |
|
|
""" |
|
|
try: |
|
|
if model is None: |
|
|
model = self.tf.keras.Sequential() |
|
|
base_model = self.tf.keras.applications.DenseNet121(include_top=False, input_shape=(self.imgWidth, self.imgHeight, 3)) |
|
|
base_model.trainable=True |
|
|
model.add(base_model) |
|
|
model.add(self.tf.keras.layers.Dropout(0.4)) |
|
|
model.add(self.tf.keras.layers.Flatten()) |
|
|
model.add(self.tf.keras.layers.Dense(128,activation='relu')) |
|
|
model.add(self.tf.keras.layers.Dropout(0.5)) |
|
|
model.add(self.tf.keras.layers.Dense(32,activation='relu')) |
|
|
model.add(self.tf.keras.layers.Dense(1, activation="sigmoid")) |
|
|
self.Model = model |
|
|
else: |
|
|
self.Model = model |
|
|
return {"success":True, "code":200, "detail":"success", "model":self.Model} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def training_model(self, epochs=10, lossFunction="binary_crossentropy", optimizer="adam", metrics=["accuracy"], device='/GPU:0', modelName=None): |
|
|
""" |
|
|
Purpose: |
|
|
- Training model |
|
|
|
|
|
Parameter: |
|
|
- model: model |
|
|
- type: tf.keras.Model |
|
|
- example: model |
|
|
- default: True |
|
|
- epochs: epochs |
|
|
- type: int |
|
|
- example: 10 |
|
|
- lossFunction: loss function |
|
|
- type: string |
|
|
- example: "binary_crossentropy" |
|
|
- options: "binary_crossentropy", "categorical_crossentropy", "sparse_categorical_crossentropy" |
|
|
- optimizer: optimizer |
|
|
- type: string |
|
|
- example: "adam" |
|
|
- options: "adam", "adamax", "nadam", "rmsprop", "sgd", tf.keras.optimizers.RMSprop(learning_rate=1e-4) |
|
|
- metrics: metrics |
|
|
- type: list |
|
|
- example: ["accuracy"] |
|
|
- device: device |
|
|
- type: string |
|
|
- example: "/GPU:0" |
|
|
- options: "/CPU:0", "/GPU:0" |
|
|
- modelName: model name |
|
|
- type: string |
|
|
- example: "model" |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
if modelName is not None: |
|
|
self.modelName = modelName |
|
|
|
|
|
self.time_callback = TimeHistory() |
|
|
self.Model.compile( |
|
|
loss=lossFunction, |
|
|
optimizer=optimizer, |
|
|
metrics=metrics, |
|
|
) |
|
|
|
|
|
print(self.Model.summary()) |
|
|
|
|
|
with self.tf.device(device): |
|
|
self.history = self.Model.fit( |
|
|
self.trainDataset, validation_data=self.validationDataset, epochs=epochs, verbose=1, callbacks=[self.time_callback] |
|
|
) |
|
|
|
|
|
dataFrameHistory = self.pd.DataFrame({"training_loss":self.history.history["loss"], "training_accuracy":self.history.history["accuracy"], "validation_loss":self.history.history["val_loss"], "validation_accuracy":self.history.history["val_accuracy"], "training_time":self.time_callback.times}) |
|
|
dataFrameHistory.to_excel(f"report_{self.modelName}.xlsx") |
|
|
|
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def training_model_multiGPU(self, epochs=10, lossFunction="binary_crossentropy", optimizer="adam", metrics=["accuracy"], device='/GPU:0', modelName=None): |
|
|
""" |
|
|
Purpose: |
|
|
- Training model with multi GPU support, with mirrored strategy |
|
|
|
|
|
Parameter: |
|
|
- model: model |
|
|
- type: tf.keras.Model |
|
|
- example: model |
|
|
- default: True |
|
|
- epochs: epochs |
|
|
- type: int |
|
|
- example: 10 |
|
|
- lossFunction: loss function |
|
|
- type: string |
|
|
- example: "binary_crossentropy" |
|
|
- options: "binary_crossentropy", "categorical_crossentropy", "sparse_categorical_crossentropy" |
|
|
- optimizer: optimizer |
|
|
- type: string |
|
|
- example: "adam" |
|
|
- options: "adam", "adamax", "nadam", "rmsprop", "sgd", tf.keras.optimizers.RMSprop(learning_rate=1e-4) |
|
|
- metrics: metrics |
|
|
- type: list |
|
|
- example: ["accuracy"] |
|
|
- device: device |
|
|
- type: string |
|
|
- example: "/GPU:0" |
|
|
- options: "/CPU:0", "/GPU:0" |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
if modelName is not None: |
|
|
self.modelName = modelName |
|
|
|
|
|
self.time_callback = TimeHistory() |
|
|
|
|
|
print(self.Model.summary()) |
|
|
strategy = self.tf.distribute.MirroredStrategy() |
|
|
with strategy.scope(): |
|
|
model = self.Model |
|
|
model.compile(loss=lossFunction, optimizer=optimizer, metrics=metrics) |
|
|
|
|
|
self.history = model.fit(self.trainDataset, validation_data=self.validationDataset, epochs=epochs, verbose=1, callbacks=[self.time_callback]) |
|
|
|
|
|
dataFrameHistory = self.pd.DataFrame({"training_loss":self.history.history["loss"], "training_accuracy":self.history.history["accuracy"], "validation_loss":self.history.history["val_loss"], "validation_accuracy":self.history.history["val_accuracy"], "training_time":self.time_callback.times}) |
|
|
dataFrameHistory.to_excel(f"report_{self.modelName}.xlsx") |
|
|
|
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def evaluation(self, labelName=["COVID19", "NORMAL"]): |
|
|
""" |
|
|
Purpose: |
|
|
- Evaluation model with confusionMatrix, precision, recall, f1Score, accuracy |
|
|
|
|
|
Parameter: |
|
|
- labelName: label name |
|
|
- type: list |
|
|
- example: ["COVID19", "NORMAL"] |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success", "confusionMatrix":confusionMatrix, "precision":precision, "recall":recall, "f1Score":f1Score, "accuracy":accuracy} |
|
|
""" |
|
|
try: |
|
|
self.Model.evaluate(self.validationDataset) |
|
|
prediction_result = self.Model.predict(self.validationDataset) |
|
|
prediction_result = self.np.argmax(prediction_result, axis=1) |
|
|
self.validation_label = self.np.concatenate([y for x, y in self.validationDataset], axis=0) |
|
|
self.confusionMatrix = self.tf.math.confusion_matrix(labels=self.validation_label, predictions=prediction_result).numpy() |
|
|
self.accuracy = self.sklearn.metrics.accuracy_score(self.validation_label, prediction_result) |
|
|
self.precision = self.sklearn.metrics.precision_score(self.validation_label, prediction_result, average="macro", zero_division=0) |
|
|
self.recall = self.sklearn.metrics.recall_score(self.validation_label, prediction_result, average="macro") |
|
|
self.f1Score = self.sklearn.metrics.f1_score(self.validation_label, prediction_result, average="macro") |
|
|
self.__drawConfusionMatrix(labelName) |
|
|
self.__drawROC() |
|
|
dataFrameScore = self.pd.DataFrame({"accuracy":[self.accuracy], "recall":[self.recall], "precision":[self.precision], "f1Score":[self.f1Score]}) |
|
|
dataFrameScore.to_excel(f"reportScore_{self.modelName}.xlsx") |
|
|
self.__drawHistoryAccuracy() |
|
|
self.__drawHistoryLoss() |
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
|
|
|
def __drawConfusionMatrix(self, labelName=["COVID19", "NORMAL"]): |
|
|
|
|
|
labelName.sort() |
|
|
fig, ax = self.plt.subplots() |
|
|
im = ax.imshow(self.confusionMatrix) |
|
|
ax.figure.colorbar(im, ax=ax) |
|
|
ax.set(xticks=self.np.arange(self.confusionMatrix.shape[1]), yticks=self.np.arange(self.confusionMatrix.shape[0]), xticklabels=labelName, yticklabels=labelName, title="Confusion Matrix", ylabel="True label", xlabel="Predicted label") |
|
|
ax.set_xlabel("Predicted") |
|
|
ax.set_ylabel("True") |
|
|
self.plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") |
|
|
for i in range(self.confusionMatrix.shape[0]): |
|
|
for j in range(self.confusionMatrix.shape[1]): |
|
|
ax.text(j, i, self.confusionMatrix[i, j], ha="center", va="center", color="w") |
|
|
self.plt.tight_layout() |
|
|
self.plt.savefig(f"confusionMatrix_{self.modelName}.png") |
|
|
self.plt.show() |
|
|
self.plt.close() |
|
|
|
|
|
dataFrameConfusionMatrix = self.pd.DataFrame(self.confusionMatrix) |
|
|
dataFrameConfusionMatrix.to_excel(f"confusionMatrix_{self.modelName}.xlsx") |
|
|
|
|
|
def __drawROC(self): |
|
|
""" |
|
|
Purpose: |
|
|
- Draw ROC curve like this sample https://scikit-learn.org/stable/_images/sphx_glr_plot_roc_001.png for multi class |
|
|
""" |
|
|
predictResult = self.Model.predict(self.validationDataset) |
|
|
fpr, tpr, thresholds = self.sklearn.metrics.roc_curve(self.validation_label, predictResult[:, 1], pos_label=1) |
|
|
self.auc = self.sklearn.metrics.auc(fpr, tpr) |
|
|
fig, ax = self.plt.subplots() |
|
|
ax.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % self.auc) |
|
|
ax.plot([0, 1], [0, 1], "k--") |
|
|
ax.set_xlim([0.0, 1.0]) |
|
|
ax.set_ylim([0.0, 1.05]) |
|
|
ax.set_xlabel("False Positive Rate") |
|
|
ax.set_ylabel("True Positive Rate") |
|
|
ax.set_title("Receiver operating characteristic") |
|
|
ax.legend(loc="best") |
|
|
self.plt.savefig(f"ROC_{self.modelName}.png") |
|
|
self.plt.show() |
|
|
self.plt.close() |
|
|
|
|
|
dataFrameROC = self.pd.DataFrame({"fpr":fpr, "tpr":tpr, "thresholds":thresholds, "auc":self.auc}) |
|
|
dataFrameROC.to_excel(f"ROC_{self.modelName}.xlsx") |
|
|
|
|
|
def __drawHistoryAccuracy(self): |
|
|
""" |
|
|
Purpose: |
|
|
- Draw history accuracy with training and validation dataset |
|
|
""" |
|
|
fig, ax = self.plt.subplots() |
|
|
ax.plot(self.history.history["accuracy"], label="training dataset") |
|
|
ax.plot(self.history.history["val_accuracy"], label="validation dataset") |
|
|
ax.set_xlabel("Epoch") |
|
|
ax.set_ylabel("Accuracy") |
|
|
ax.set_title("Accuracy") |
|
|
ax.legend(loc="best") |
|
|
self.plt.savefig(f"historyAccuracy_{self.modelName}.png") |
|
|
self.plt.show() |
|
|
self.plt.close() |
|
|
|
|
|
def __drawHistoryLoss(self): |
|
|
""" |
|
|
Purpose: |
|
|
- Draw history loss with training and validation dataset |
|
|
""" |
|
|
fig, ax = self.plt.subplots() |
|
|
ax.plot(self.history.history["loss"], label="training dataset") |
|
|
ax.plot(self.history.history["val_loss"], label="validation dataset") |
|
|
ax.set_xlabel("Epoch") |
|
|
ax.set_ylabel("Loss") |
|
|
ax.set_title("Loss") |
|
|
ax.legend(loc="best") |
|
|
self.plt.savefig(f"historyLoss_{self.modelName}.png") |
|
|
self.plt.show() |
|
|
self.plt.close() |
|
|
|
|
|
def import_data_Dataset(self, trainDataset, validationDataset): |
|
|
""" |
|
|
Purpose: |
|
|
- Import dataset |
|
|
|
|
|
Parameter: |
|
|
- trainDataset: dataset |
|
|
- type: tf.data.Dataset |
|
|
- example: trainDataset |
|
|
- validationDataset: dataset |
|
|
- type: tf.data.Dataset |
|
|
- example: validationDataset |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
self.trainDataset = trainDataset |
|
|
self.validationDataset = validationDataset |
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def saveModelWithWeight(self, fileName): |
|
|
""" |
|
|
Purpose: |
|
|
- Save model with weight |
|
|
|
|
|
Parameter: |
|
|
- fileName: file name |
|
|
- type: string |
|
|
- example: "my_model" |
|
|
- options: "my_model", "gs://bucket/my_model" |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
self.Model.save(fileName) |
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
def loadModelWithWeightAndCustomObject(self, fileName, customObject): |
|
|
""" |
|
|
Purpose: |
|
|
- Load model with weight and custom object |
|
|
|
|
|
Parameter: |
|
|
- fileName: file name |
|
|
- type: string |
|
|
- example: "my_model" |
|
|
- options: "my_model", "gs://bucket/my_model" |
|
|
- customObject: custom object |
|
|
- type: dict |
|
|
- example: {"MyCustomObject":MyCustomObject} |
|
|
|
|
|
Return: |
|
|
- {"success":True, "code":200, "detail":"success"} |
|
|
""" |
|
|
try: |
|
|
self.Model = self.tf.keras.models.load_model(fileName, custom_objects=customObject) |
|
|
return {"success":True, "code":200, "detail":"success"} |
|
|
except Exception as e: |
|
|
return {"success":False, "code":500, "detail":str(e)} |
|
|
|
|
|
import tensorflow as tf |
|
|
from time import time |
|
|
class TimeHistory(tf.keras.callbacks.Callback): |
|
|
def on_train_begin(self, logs={}): |
|
|
self.times = [] |
|
|
|
|
|
def on_epoch_begin(self, batch, logs={}): |
|
|
self.epoch_time_start = time() |
|
|
|
|
|
def on_epoch_end(self, batch, logs={}): |
|
|
self.times.append(time() - self.epoch_time_start) |