code
stringlengths
17
6.64M
def plot_acc(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['acc']) plt.plot(history['val_acc']) if (title is not None): plt.title(title) plt.ylabel('정확도') plt.xlabel('에포크') plt.legend(['ν•™μŠ΅ 데이터 μ„±λŠ₯', '검증 데이터 μ„±λŠ₯'], loc=0)
def plot_loss(history, title=None): if (not isinstance(history, dict)): history = history.history plt.plot(history['loss']) plt.plot(history['val_loss']) if (title is not None): plt.title(title) plt.ylabel('손싀') plt.xlabel('에포크') plt.legend(['ν•™μŠ΅ 데이터 μ„±λŠ₯', '검증 데이터 μ„±λŠ₯'], loc=0)
def plot_history(history): plt.figure(figsize=(15, 5)) plt.subplot(1, 2, 1) plot_acc(history) plt.subplot(1, 2, 2) plot_loss(history)
def plot_loss_acc(history): plot_loss(history, '(a) 손싀 좔이') plt.show() plot_acc(history, '(b) 정확도 좔이') plt.show()
def plot_acc_loss(history): plot_acc(history, '(a) 정확도 좔이') plt.show() plot_loss(history, '(b) 손싀 좔이') plt.show()
class ARGS(): pass
def kproc(x): return (((x ** 2) + (2 * x)) + 1)
def kshape(input_shape): return input_shape
class Mult(Layer): def __init__(self, **kwargs): super().__init__(**kwargs) def call(self, x): return (((x ** 2) + (2 * x)) + 1)
def kproc_concat(x): m = K.mean(x, axis=1, keepdims=True) d1 = K.abs((x - m)) d2 = K.square((x - m)) return K.concatenate([x, d1, d2], axis=1)
def kshape_concat(input_shape): output_shape = list(input_shape) output_shape[1] *= 3 return tuple(output_shape)
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def main(): Nin = 784 Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(Nin, Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(0.05)) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(0.2)) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(0.05)) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(0.1)) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(0.05)) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(0.05)) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(0.01)) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(0.05)) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(0.01)) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(0)) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(0.01)) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(0.01)) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Pd_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dropout(Pd_l[0])) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dropout(Pd_l[1])) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), ((W * H) * C)) X_test = X_test.reshape((- 1), ((W * H) * C)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nh_l = [100, 50] Pd_l = [0.0, 0.0] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(X_train.shape[1], Nh_l, Pd_l, Nout) history = model.fit(X_train, Y_train, epochs=100, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class AE(models.Model): def __init__(self, x_nodes=784, z_dim=36): x_shape = (x_nodes,) x = layers.Input(shape=x_shape) z = layers.Dense(z_dim, activation='relu')(x) y = layers.Dense(x_nodes, activation='sigmoid')(z) super().__init__(x, y) self.x = x self.z = z self.z_dim = z_dim self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy']) def Encoder(self): return models.Model(self.x, self.z) def Decoder(self): z_shape = (self.z_dim,) z = layers.Input(shape=z_shape) y_layer = self.layers[(- 1)] y = y_layer(z) return models.Model(z, y)
def show_ae(autoencoder): encoder = autoencoder.Encoder() decoder = autoencoder.Decoder() encoded_imgs = encoder.predict(x_test) decoded_imgs = decoder.predict(encoded_imgs) n = 10 plt.figure(figsize=(20, 6)) for i in range(n): ax = plt.subplot(3, n, (i + 1)) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, ((i + 1) + n)) plt.stem(encoded_imgs[i].reshape((- 1))) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, (((i + 1) + n) + n)) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(epochs=20): x_nodes = 784 z_dim = 36 ((X_train, Y_train), (X_test, Y_test)) = Data_func() autoencoder = AE(x_nodes, z_dim) history = autoencoder.fit(x_train, x_train, epochs=epochs, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder) plt.show()
def Conv2D(filters, kernel_size, padding='same', activation='relu'): return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
class AE(models.Model): def __init__(self, org_shape=(1, 28, 28)): original = layers.Input(shape=org_shape) x = Conv2D(4, (3, 3))(original) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3))(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) z = Conv2D(1, (7, 7))(x) y = Conv2D(16, (3, 3))(z) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(8, (3, 3))(y) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(4, (3, 3))(y) decoded = Conv2D(1, (3, 3), activation='sigmoid')(y) super().__init__(original, decoded) self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def show_ae(autoencoder, data): x_test = data.x_test decoded_imgs = autoencoder.predict(x_test) print(decoded_imgs.shape, data.x_test.shape) if (backend.image_data_format() == 'channels_first'): (N, n_ch, n_i, n_j) = x_test.shape else: (N, n_i, n_j, n_ch) = x_test.shape x_test = x_test.reshape(N, n_i, n_j) decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): ax = plt.subplot(2, n, (i + 1)) plt.imshow(x_test[i], cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(2, n, ((i + 1) + n)) plt.imshow(decoded_imgs[i], cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(epochs=20, batch_size=128): data = DATA() autoencoder = AE(data.input_shape) history = autoencoder.fit(data.x_train, data.x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder, data) plt.show()
class Data(): def __init__(self, max_features=20000, maxlen=80): ((x_train, y_train), (x_test, y_test)) = imdb.load_data(num_words=max_features) x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) (self.x_train, self.y_train) = (x_train, y_train) (self.x_test, self.y_test) = (x_test, y_test)
class RNN_LSTM(models.Model): def __init__(self, max_features, maxlen): x = layers.Input((maxlen,)) h = layers.Embedding(max_features, 128)(x) h = layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)(h) y = layers.Dense(1, activation='sigmoid')(h) super().__init__(x, y) self.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
class Machine(): def __init__(self, max_features=20000, maxlen=80): self.data = Data(max_features, maxlen) self.model = RNN_LSTM(max_features, maxlen) def run(self, epochs=3, batch_size=32): data = self.data model = self.model print('Training stage') print('==============') model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_data=(data.x_test, data.y_test)) (score, acc) = model.evaluate(data.x_test, data.y_test, batch_size=batch_size) print('Test performance: accuracy={0}, loss={1}'.format(acc, score))
class GAN(): def __init__(self, ni_D, nh_D, nh_G): D = models.Sequential() D.add(Dense(nh_D, activation='relu', input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) G = models.Sequential() G.add(Reshape((ni_D, 1), input_shape=(ni_D,))) G.add(Conv1D(nh_G, 1)) G.add(Conv1D(nh_G, 1)) G.add(Conv1D(1, 1)) G.add(Flatten()) model_compile(G) GD = models.Sequential() GD.add(G) GD.add(D) D.trainable = False model_compile(GD) D.trainable = True (self.D, self.G, self.GD) = (D, G, GD) def D_train_on_batch(self, Real, Gen): D = self.D X = np.concatenate([Real, Gen], axis=0) y = (([1] * Real.shape[0]) + ([0] * Gen.shape[0])) D.train_on_batch(X, y) def GD_train_on_batch(self, Z): (GD, D) = (self.GD, self.D) y = ([1] * Z.shape[0]) GD.train_on_batch(Z, y)
class Data(): def __init__(self, mu, sigma, ni_D): self.real_sample = (lambda n_batch: np.random.normal(mu, sigma, (n_batch, ni_D))) self.in_sample = (lambda n_batch: np.random.rand(n_batch, ni_D))
class Machine(): def __init__(self, n_batch=10, ni_D=100): self.data = Data(0, 1, ni_D) self.gan = GAN(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch def train_D(self): gan = self.gan n_batch = self.n_batch data = self.data Real = data.real_sample(n_batch) Z = data.in_sample(n_batch) Gen = gan.G.predict(Z) gan.D.trainable = True gan.D_train_on_batch(Real, Gen) def train_GD(self): gan = self.gan n_batch = self.n_batch data = self.data Z = data.in_sample(n_batch) gan.D.trainable = False gan.GD_train_on_batch(Z) def train_each(self): self.train_D() self.train_GD() def train(self, epochs): for epoch in range(epochs): self.train_each() def test(self, n_test): '\n generate a new image\n ' gan = self.gan data = self.data Z = data.in_sample(n_test) Gen = gan.G.predict(Z) return (Gen, Z) def show_hist(self, Real, Gen, Z): plt.hist(Real.reshape((- 1)), histtype='step', label='Real') plt.hist(Gen.reshape((- 1)), histtype='step', label='Generated') plt.hist(Z.reshape((- 1)), histtype='step', label='Input') plt.legend(loc=0) def test_and_show(self, n_test): data = self.data (Gen, Z) = self.test(n_test) Real = data.real_sample(n_test) self.show_hist(Real, Gen, Z) def run(self, epochs, n_test): '\n train GAN and show the results\n for showing, the original and the artificial results will be compared\n ' self.train(epochs) self.test_and_show(n_test) def run_loop(self, n_iter=100, epochs_each=1000, n_test=1000): for ii in range(n_iter): print('Stage', ii) machine.run(epochs_each, n_test) plt.show()
def Conv2D(filters, kernel_size, padding='same', activation='relu'): return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
class AE(models.Model): def __init__(self, org_shape=(1, 28, 28)): original = layers.Input(shape=org_shape) x = Conv2D(4, (3, 3))(original) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3))(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) z = Conv2D(1, (7, 7))(x) y = Conv2D(16, (3, 3))(z) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(8, (3, 3))(y) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(4, (3, 3))(y) decoded = Conv2D(1, (3, 3), activation='sigmoid')(y) super().__init__(original, decoded) self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def show_ae(autoencoder, data): x_test = data.x_test decoded_imgs = autoencoder.predict(x_test) print(decoded_imgs.shape, data.x_test.shape) if (backend.image_data_format() == 'channels_first'): (N, n_ch, n_i, n_j) = x_test.shape else: (N, n_i, n_j, n_ch) = x_test.shape x_test = x_test.reshape(N, n_i, n_j) decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): ax = plt.subplot(2, n, (i + 1)) plt.imshow(x_test[i], cmap='YlGnBu') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(2, n, ((i + 1) + n)) plt.imshow(decoded_imgs[i], cmap='YlGnBu') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(epochs=20, batch_size=128): data = DATA() autoencoder = AE(data.input_shape) history = autoencoder.fit(data.x_train, data.x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder, data) plt.show()
def ANN_models_func(Nin, Nh, Nout): x = layers.Input(shape=(Nin,)) h = layers.Activation('relu')(layers.Dense(Nh)(x)) y = layers.Activation('softmax')(layers.Dense(Nout)(h)) model = models.Model(x, y) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
def ANN_seq_func(Nin, Nh, Nout): model = models.Sequential() model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) model.add(layers.Dense(Nout, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model
class ANN_models_class(models.Model): def __init__(self, Nin, Nh, Nout): hidden = layers.Dense(Nh) output = layers.Dense(Nout) relu = layers.Activation('relu') softmax = layers.Activation('softmax') x = layers.Input(shape=(Nin,)) h = relu(hidden(x)) y = softmax(output(h)) super().__init__(x, y) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
class ANN_seq_class(models.Sequential): def __init__(self, Nin, Nh, Nout): super().__init__() self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,))) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def plot_loss(history): plt.plot(history.history['loss'], ':k') plt.plot(history.history['val_loss'], '-k') plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Validation'], loc=0)
def plot_acc(history): plt.plot(history.history['acc'], ':k') plt.plot(history.history['val_acc'], '-k') plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Validation'], loc=0)
def main(): Nin = 784 Nh = 100 number_of_class = 10 Nout = number_of_class model = ANN_seq_class(Nin, Nh, Nout) ((X_train, Y_train), (X_test, Y_test)) = Data_func() history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, Y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_loss(history) plt.show() plot_acc(history) plt.show()
class Machine(aicnn.Machine): def __init__(self): ((X, y), (x_test, y_test)) = datasets.cifar10.load_data() super().__init__(X, y, nb_classes=10)
def main(): m = Machine() m.run()
class CNN(models.Sequential): def __init__(self, input_shape, num_classes): super().__init__() self.add(layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) self.add(layers.Conv2D(64, (3, 3), activation='relu')) self.add(layers.MaxPooling2D(pool_size=(2, 2))) self.add(layers.Dropout(0.25)) self.add(layers.Flatten()) self.add(layers.Dense(128, activation='relu')) self.add(layers.Dropout(0.5)) self.add(layers.Dense(num_classes, activation='softmax')) self.compile(loss=keras.losses.categorical_crossentropy, optimizer='rmsprop', metrics=['accuracy'])
class DATA(): def __init__(self): num_classes = 10 ((x_train, y_train), (x_test, y_test)) = datasets.mnist.load_data() (img_rows, img_cols) = x_train.shape[1:] if (backend.image_data_format() == 'channels_first'): x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) self.input_shape = input_shape self.num_classes = num_classes (self.x_train, self.y_train) = (x_train, y_train) (self.x_test, self.y_test) = (x_test, y_test)
class DNN(models.Sequential): def __init__(self, Nin, Nh_l, Nout): super().__init__() self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1')) self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2')) self.add(layers.Dense(Nout, activation='softmax')) self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
def Data_func(): ((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data() Y_train = np_utils.to_categorical(y_train) Y_test = np_utils.to_categorical(y_test) (L, W, H, C) = X_train.shape X_train = X_train.reshape((- 1), (W * H)) X_test = X_test.reshape((- 1), (W * H)) X_train = (X_train / 255.0) X_test = (X_test / 255.0) return ((X_train, Y_train), (X_test, Y_test))
def main(): Nin = 784 Nh_l = [100, 50] number_of_class = 10 Nout = number_of_class ((X_train, Y_train), (X_test, Y_test)) = Data_func() model = DNN(Nin, Nh_l, Nout) history = model.fit(X_train, y_train, epochs=10, batch_size=100, validation_split=0.2) performace_test = model.evaluate(X_test, y_test, batch_size=100) print('Test Loss and Accuracy ->', performace_test) plot_acc(history) plt.show() plot_loss(history) plt.show()
class DataSet(): def __init__(self, X, y, nb_classes, scaling=True, test_size=0.2, random_state=0): '\n X is originally vector. Hence, it will be transformed\n to 2D images with a channel (i.e, 3D).\n ' self.X = X self.add_channels() X = self.X (X_train, X_test, y_train, y_test) = model_selection.train_test_split(X, y, test_size=0.2, random_state=random_state) print(X_train.shape, y_train.shape) X_train = X_train.astype('float32') X_test = X_test.astype('float32') if scaling: scaler = MinMaxScaler() n = X_train.shape[0] X_train = scaler.fit_transform(X_train.reshape(n, (- 1))).reshape(X_train.shape) n = X_test.shape[0] X_test = scaler.transform(X_test.reshape(n, (- 1))).reshape(X_test.shape) self.scaler = scaler print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) (self.X_train, self.X_test) = (X_train, X_test) (self.Y_train, self.Y_test) = (Y_train, Y_test) (self.y_train, self.y_test) = (y_train, y_test) def add_channels(self): X = self.X if (len(X.shape) == 3): (N, img_rows, img_cols) = X.shape if (K.image_dim_ordering() == 'th'): X = X.reshape(X.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: X = X.reshape(X.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) else: input_shape = X.shape[1:] self.X = X self.input_shape = input_shape
class CNN(Model): def __init__(model, nb_classes, in_shape=None): model.nb_classes = nb_classes model.in_shape = in_shape model.build_model() super().__init__(model.x, model.y) model.compile() def build_model(model): nb_classes = model.nb_classes in_shape = model.in_shape x = Input(in_shape) h = Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=in_shape)(x) h = Conv2D(64, (3, 3), activation='relu')(h) h = MaxPooling2D(pool_size=(2, 2))(h) h = Dropout(0.25)(h) h = Flatten()(h) z_cl = h h = Dense(128, activation='relu')(h) h = Dropout(0.5)(h) z_fl = h y = Dense(nb_classes, activation='softmax', name='preds')(h) model.cl_part = Model(x, z_cl) model.fl_part = Model(x, z_fl) (model.x, model.y) = (x, y) def compile(model): Model.compile(model, loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
class Machine(): def __init__(self, X, y, nb_classes=2, fig=True): self.nb_classes = nb_classes self.set_data(X, y) self.set_model() self.fig = fig def set_data(self, X, y): nb_classes = self.nb_classes self.data = DataSet(X, y, nb_classes) print('data.input_shape', self.data.input_shape) def set_model(self): nb_classes = self.nb_classes data = self.data self.model = CNN(nb_classes=nb_classes, in_shape=data.input_shape) def fit(self, epochs=10, batch_size=128, verbose=1): data = self.data model = self.model history = model.fit(data.X_train, data.Y_train, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_data=(data.X_test, data.Y_test)) return history def run(self, epochs=10, batch_size=128, verbose=1): data = self.data model = self.model fig = self.fig history = self.fit(epochs=epochs, batch_size=batch_size, verbose=verbose) score = model.evaluate(data.X_test, data.Y_test, verbose=0) print('Confusion matrix') Y_test_pred = model.predict(data.X_test, verbose=0) y_test_pred = np.argmax(Y_test_pred, axis=1) print(metrics.confusion_matrix(data.y_test, y_test_pred)) print('Test score:', score[0]) print('Test accuracy:', score[1]) suffix = sfile.unique_filename('datatime') foldname = ('output_' + suffix) os.makedirs(foldname) skeras.save_history_history('history_history.npy', history.history, fold=foldname) model.save_weights(os.path.join(foldname, 'dl_model.h5')) print('Output results are saved in', foldname) if fig: plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) skeras.plot_acc(history) plt.subplot(1, 2, 2) skeras.plot_loss(history) plt.show() self.history = history return foldname
def load_data(fname='international-airline-passengers.csv'): dataset = pd.read_csv(fname, usecols=[1], engine='python', skipfooter=3) data = dataset.values.reshape((- 1)) plt.plot(data) plt.xlabel('Time') plt.ylabel('#Passengers') plt.title('Original Data') plt.show() data_dn = (((data - np.mean(data)) / np.std(data)) / 5) plt.plot(data_dn) plt.xlabel('Time') plt.ylabel('Normalized #Passengers') plt.title('Normalized data by $E[]$ and $5\\sigma$') plt.show() return data_dn
def get_Xy(data, D=12): X_l = [] y_l = [] N = len(data) assert (N > D), 'N should be larger than D, where N is len(data)' for ii in range(((N - D) - 1)): X_l.append(data[ii:(ii + D)]) y_l.append(data[(ii + D)]) X = np.array(X_l) X = X.reshape(X.shape[0], X.shape[1], 1) y = np.array(y_l) print(X.shape, y.shape) return (X, y)
class Dataset(): def __init__(self, fname='international-airline-passengers.csv', D=12): data_dn = load_data(fname=fname) (X, y) = get_Xy(data_dn, D=D) (X_train, X_test, y_train, y_test) = model_selection.train_test_split(X, y, test_size=0.2, random_state=42) (self.X, self.y) = (X, y) (self.X_train, self.X_test, self.y_train, self.y_test) = (X_train, X_test, y_train, y_test)
def rnn_model(shape): m_x = layers.Input(shape=shape) m_h = layers.LSTM(10)(m_x) m_y = layers.Dense(1)(m_h) m = models.Model(m_x, m_y) m.compile('adam', 'mean_squared_error') m.summary() return m
class Machine(): def __init__(self): self.data = Dataset() shape = self.data.X.shape[1:] self.model = rnn_model(shape) def run(self, epochs=400): d = self.data (X_train, X_test, y_train, y_test) = (d.X_train, d.X_test, d.y_train, d.y_test) (X, y) = (d.X, d.y) m = self.model h = m.fit(X_train, y_train, epochs=epochs, validation_data=[X_test, y_test], verbose=0) skeras.plot_loss(h) plt.title('History of training') plt.show() yp = m.predict(X_test) print('Loss:', m.evaluate(X_test, y_test)) plt.plot(yp, label='Origial') plt.plot(y_test, label='Prediction') plt.legend(loc=0) plt.title('Validation Results') plt.show() yp = m.predict(X) plt.plot(yp, label='Origial') plt.plot(y, label='Prediction') plt.legend(loc=0) plt.title('All Results') plt.show()
class Data(): def __init__(self, max_features=20000, maxlen=80): ((x_train, y_train), (x_test, y_test)) = imdb.load_data(num_words=max_features) x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) (self.x_train, self.y_train) = (x_train, y_train) (self.x_test, self.y_test) = (x_test, y_test)
class RNN_LSTM(models.Model): def __init__(self, max_features, maxlen): x = layers.Input((maxlen,)) h = layers.Embedding(max_features, 128)(x) h = layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)(h) y = layers.Dense(1, activation='sigmoid')(h) super().__init__(x, y) self.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
class Machine(): def __init__(self, max_features=20000, maxlen=80): self.data = Data(max_features, maxlen) self.model = RNN_LSTM(max_features, maxlen) def run(self, epochs=3, batch_size=32): data = self.data model = self.model print('Training stage') print('==============') model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_data=(data.x_test, data.y_test)) (score, acc) = model.evaluate(data.x_test, data.y_test, batch_size=batch_size) print('Test performance: accuracy={0}, loss={1}'.format(acc, score))
def main(): m = Machine() m.run()
class AE(models.Model): def __init__(self, x_nodes=784, z_dim=36): x_shape = (x_nodes,) x = layers.Input(shape=x_shape) z = layers.Dense(z_dim, activation='relu')(x) y = layers.Dense(x_nodes, activation='sigmoid')(z) super().__init__(x, y) self.x = x self.z = z self.z_dim = z_dim self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy']) def Encoder(self): return models.Model(self.x, self.z) def Decoder(self): z_shape = (self.z_dim,) z = layers.Input(shape=z_shape) y_layer = self.layers[(- 1)] y = y_layer(z) return models.Model(z, y)
def show_ae(autoencoder): encoder = autoencoder.Encoder() decoder = autoencoder.Decoder() encoded_imgs = encoder.predict(x_test) decoded_imgs = decoder.predict(encoded_imgs) n = 10 plt.figure(figsize=(20, 6)) for i in range(n): ax = plt.subplot(3, n, (i + 1)) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, ((i + 1) + n)) plt.stem(encoded_imgs[i].reshape((- 1))) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(3, n, (((i + 1) + n) + n)) plt.imshow(decoded_imgs[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(): x_nodes = 784 z_dim = 36 autoencoder = AE(x_nodes, z_dim) history = autoencoder.fit(x_train, x_train, epochs=10, batch_size=256, shuffle=True, validation_data=(x_test, x_test)) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder) plt.show()
def Conv2D(filters, kernel_size, padding='same', activation='relu'): return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
class AE(models.Model): def __init__(self, org_shape=(1, 28, 28)): original = layers.Input(shape=org_shape) x = Conv2D(4, (3, 3))(original) x = layers.MaxPooling2D((2, 2), padding='same')(x) x = Conv2D(8, (3, 3))(x) x = layers.MaxPooling2D((2, 2), padding='same')(x) z = Conv2D(1, (7, 7))(x) y = Conv2D(16, (3, 3))(z) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(8, (3, 3))(y) y = layers.UpSampling2D((2, 2))(y) y = Conv2D(4, (3, 3))(y) decoded = Conv2D(1, (3, 3), activation='sigmoid')(y) super().__init__(original, decoded) self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def show_ae(autoencoder, data): x_test = data.x_test decoded_imgs = autoencoder.predict(x_test) print(decoded_imgs.shape, data.x_test.shape) if (backend.image_data_format() == 'channels_first'): (N, n_ch, n_i, n_j) = x_test.shape else: (N, n_i, n_j, n_ch) = x_test.shape x_test = x_test.reshape(N, n_i, n_j) decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j) n = 10 plt.figure(figsize=(20, 4)) for i in range(n): ax = plt.subplot(2, n, (i + 1)) plt.imshow(x_test[i], cmap='YlGnBu') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) ax = plt.subplot(2, n, ((i + 1) + n)) plt.imshow(decoded_imgs[i], cmap='YlGnBu') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) plt.show()
def main(epochs=20, batch_size=128): data = DATA() autoencoder = AE(data.input_shape) history = autoencoder.fit(data.x_train, data.x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2) plot_acc(history) plt.show() plot_loss(history) plt.show() show_ae(autoencoder, data) plt.show()
def add_decorate(x): '\n axis = -1 --> last dimension in an array\n ' m = K.mean(x, axis=(- 1), keepdims=True) d = K.square((x - m)) return K.concatenate([x, d], axis=(- 1))
def add_decorate_shape(input_shape): shape = list(input_shape) assert (len(shape) == 2) shape[1] *= 2 return tuple(shape)
def model_compile(model): return model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
class GAN(): def __init__(self, ni_D, nh_D, nh_G): self.ni_D = ni_D self.nh_D = nh_D self.nh_G = nh_G self.D = self.gen_D() self.G = self.gen_G() self.GD = self.make_GD() def gen_D(self): ni_D = self.ni_D nh_D = self.nh_D D = models.Sequential() D.add(Lambda(add_decorate, output_shape=add_decorate_shape, input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) return D def gen_G(self): ni_D = self.ni_D nh_G = self.nh_D G = models.Sequential() G.add(Reshape((ni_D, 1), input_shape=(ni_D,))) G.add(Conv1D(nh_G, 1, activation='relu')) G.add(Conv1D(nh_G, 1, activation='sigmoid')) G.add(Conv1D(1, 1)) G.add(Flatten()) model_compile(G) return G def make_GD(self): (G, D) = (self.G, self.D) GD = models.Sequential() GD.add(G) GD.add(D) D.trainable = False model_compile(GD) D.trainable = True return GD def D_train_on_batch(self, Real, Gen): D = self.D X = np.concatenate([Real, Gen], axis=0) y = np.array((([1] * Real.shape[0]) + ([0] * Gen.shape[0]))) D.train_on_batch(X, y) def GD_train_on_batch(self, Z): GD = self.GD y = np.array(([1] * Z.shape[0])) GD.train_on_batch(Z, y)
class Data(): def __init__(self, mu, sigma, ni_D): self.real_sample = (lambda n_batch: np.random.normal(mu, sigma, (n_batch, ni_D))) self.in_sample = (lambda n_batch: np.random.rand(n_batch, ni_D))
class Machine(): def __init__(self, n_batch=10, ni_D=100): data_mean = 4 data_stddev = 1.25 self.n_iter_D = 1 self.n_iter_G = 5 self.data = Data(data_mean, data_stddev, ni_D) self.gan = GAN(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch def train_D(self): gan = self.gan n_batch = self.n_batch data = self.data Real = data.real_sample(n_batch) Z = data.in_sample(n_batch) Gen = gan.G.predict(Z) gan.D.trainable = True gan.D_train_on_batch(Real, Gen) def train_GD(self): gan = self.gan n_batch = self.n_batch data = self.data Z = data.in_sample(n_batch) gan.D.trainable = False gan.GD_train_on_batch(Z) def train_each(self): for it in range(self.n_iter_D): self.train_D() for it in range(self.n_iter_G): self.train_GD() def train(self, epochs): for epoch in range(epochs): self.train_each() def test(self, n_test): '\n generate a new image\n ' gan = self.gan data = self.data Z = data.in_sample(n_test) Gen = gan.G.predict(Z) return (Gen, Z) def show_hist(self, Real, Gen, Z): plt.hist(Real.reshape((- 1)), histtype='step', label='Real') plt.hist(Gen.reshape((- 1)), histtype='step', label='Generated') plt.hist(Z.reshape((- 1)), histtype='step', label='Input') plt.legend(loc=0) def test_and_show(self, n_test): data = self.data (Gen, Z) = self.test(n_test) Real = data.real_sample(n_test) self.show_hist(Real, Gen, Z) Machine.print_stat(Real, Gen) def run_epochs(self, epochs, n_test): '\n train GAN and show the results\n for showing, the original and the artificial results will be compared\n ' self.train(epochs) self.test_and_show(n_test) def run(self, n_repeat=200, n_show=200, n_test=100): for ii in range(n_repeat): print('Stage', ii, '(Epoch: {})'.format((ii * n_show))) self.run_epochs(n_show, n_test) plt.show() @staticmethod def print_stat(Real, Gen): def stat(d): return (np.mean(d), np.std(d)) print('Mean and Std of Real:', stat(Real)) print('Mean and Std of Gen:', stat(Gen))
class GAN_Pure(GAN): def __init__(self, ni_D, nh_D, nh_G): '\n Discriminator input is not added\n ' super().__init__(ni_D, nh_D, nh_G) def gen_D(self): ni_D = self.ni_D nh_D = self.nh_D D = models.Sequential() D.add(Dense(nh_D, activation='relu', input_shape=(ni_D,))) D.add(Dense(nh_D, activation='relu')) D.add(Dense(1, activation='sigmoid')) model_compile(D) return D
class Machine_Pure(Machine): def __init__(self, n_batch=10, ni_D=100): data_mean = 4 data_stddev = 1.25 self.data = Data(data_mean, data_stddev, ni_D) self.gan = GAN_Pure(ni_D=ni_D, nh_D=50, nh_G=50) self.n_batch = n_batch
def main(): machine = Machine(n_batch=1, ni_D=100) machine.run(n_repeat=200, n_show=200, n_test=100)