code stringlengths 17 6.64M |
|---|
def list_norm_inplace(buff):
r_mean = np.mean(buff)
r_std = np.std(buff)
for ii in range(len(buff)):
buff[ii] = ((buff[ii] - r_mean) / r_std)
|
def plot_durations(episode_durations):
plt.figure(2)
plt.clf()
durations_t = TC.FloatTensor(episode_durations)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
if (len(durations_t) >= 100):
means = durations_t.unfold(0, 100, 1).mean(1).view((- 1))
means = TC.cat((TC.zeros(99), means))
plt.plot(means.numpy())
plt.show()
|
def plot_durations_ii(ii, episode_durations, ee, ee_duration=100):
episode_durations.append((ii + 1))
if (((ee + 1) % ee_duration) == 0):
clear_output()
plot_durations(episode_durations)
|
class PGNET(nn.Module):
def __init__(self, num_state):
super(PGNET, self).__init__()
self.fc_in = nn.Linear(num_state, 24)
self.fc_hidden = nn.Linear(24, 36)
self.fc_out = nn.Linear(36, 1)
def forward(self, x):
x = F.relu(self.fc_in(x))
x = F.relu(self.fc_hidden(x))
x = TC.sigmoid(self.fc_out(x))
return x
|
class PGNET_MACHINE(PGNET):
def __init__(self, num_state, render_flag=False):
self.forget_factor = 0.99
self.learning_rate = 0.01
self.num_episode = 5000
self.num_batch = 5
self.render_flag = render_flag
self.steps_in_batch = 0
self.episode_durations = []
super(PGNET_MACHINE, self).__init__(num_state)
self.optimizer = TC.optim.RMSprop(self.parameters(), lr=self.learning_rate)
self.init_buff()
def forward(self, state):
state_var = Variable(TC.from_numpy(state).float())
prob = super(PGNET_MACHINE, self).forward(state_var)
return Bernoulli(prob)
def push_buff_done(self, reward, state, action, done_flag=False):
if done_flag:
self.reward_buff.append(0)
else:
self.reward_buff.append(reward)
self.state_buff.append(state)
self.action_buff.append(action)
def pull_buff(self, ii):
return (self.reward_buff[ii], self.state_buff[ii], self.action_buff[ii])
def init_buff(self):
self.reward_buff = []
self.state_buff = []
self.action_buff = []
def transform_discount_reward(self, steps):
future_reward = 0
for ii in reversed(range(steps)):
if (self.reward_buff[ii] == 0):
future_reward = 0
else:
future_reward = ((future_reward * self.forget_factor) + self.reward_buff[ii])
self.reward_buff[ii] = future_reward
list_norm_inplace(self.reward_buff)
def train(self, steps):
self.transform_discount_reward(steps)
self.optimizer.zero_grad()
for ii in range(steps):
(reward, state, action) = self.pull_buff(ii)
action_var = Variable(TC.FloatTensor([float(action)]))
policy = self.forward(state)
loss = ((- policy.log_prob(action_var)) * reward)
loss.backward()
self.optimizer.step()
self.init_buff()
def step(self, env, state, ee, ii, ee_duration=100):
policy = self.forward(state)
action = policy.sample().data.numpy().astype(int)[0]
(next_state, reward, done_flag, _) = env.step(action)
if self.render_flag:
env.render()
self.push_buff_done(reward, state, action, done_flag)
self.steps_in_batch += 1
state = next_state
return (state, done_flag)
def run_episode(self, env, ee):
state = env.reset()
for ii in count():
(state, done_flag) = self.step(env, state, ee, ii, ee_duration=100)
if done_flag:
plot_durations_ii(ii, self.episode_durations, ee, ee_duration=100)
break
def train_episode(self, ee):
if ((ee > 0) and ((ee % self.num_batch) == 0)):
self.train(self.steps_in_batch)
self.steps_in_batch = 0
def run(self, env):
for ee in range(self.num_episode):
self.run_episode(env, ee)
self.train_episode(ee)
|
def main():
env = gym.make('CartPole-v0')
mypgnet = PGNET_MACHINE(env.observation_space.shape[0], render_flag=False)
mypgnet.run(env)
env.close()
|
def list_norm_inplace(buff):
r_mean = np.mean(buff)
r_std = np.std(buff)
for ii in range(len(buff)):
buff[ii] = ((buff[ii] - r_mean) / r_std)
|
def plot_durations(episode_durations):
plt.figure(2)
plt.clf()
durations_t = TC.FloatTensor(episode_durations)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
if (len(durations_t) >= 100):
means = durations_t.unfold(0, 100, 1).mean(1).view((- 1))
means = TC.cat((TC.zeros(99), means))
plt.plot(means.numpy())
plt.show()
|
def plot_durations_ii(ii, episode_durations, ee, ee_duration=100):
episode_durations.append((ii + 1))
if (((ee + 1) % ee_duration) == 0):
clear_output()
plot_durations(episode_durations)
|
class PGNET(nn.Module):
def __init__(self, num_state):
super(PGNET, self).__init__()
self.fc_in = nn.Linear(num_state, 24)
self.fc_hidden = nn.Linear(24, 36)
self.fc_out = nn.Linear(36, 1)
def forward(self, x):
x = F.relu(self.fc_in(x))
x = F.relu(self.fc_hidden(x))
x = TC.sigmoid(self.fc_out(x))
return x
|
class PGNET_AGENT(PGNET):
def __init__(self, num_state, render_flag=False):
self.forget_factor = 0.99
self.learning_rate = 0.01
self.num_episode = 5000
self.num_batch = 5
self.render_flag = render_flag
self.steps_in_batch = 0
self.episode_durations = []
super().__init__(num_state)
self.optimizer = TC.optim.RMSprop(self.parameters(), lr=self.learning_rate)
self.init_buff()
def forward(self, state):
state_var = Variable(TC.from_numpy(state).float())
prob = super().forward(state_var)
return Bernoulli(prob)
def push_buff_done(self, reward, state, action, done_flag=False):
if done_flag:
self.reward_buff.append(0)
else:
self.reward_buff.append(reward)
self.state_buff.append(state)
self.action_buff.append(action)
def pull_buff(self, ii):
return (self.reward_buff[ii], self.state_buff[ii], self.action_buff[ii])
def init_buff(self):
self.reward_buff = []
self.state_buff = []
self.action_buff = []
def transform_discount_reward(self, steps):
future_reward = 0
for ii in reversed(range(steps)):
if (self.reward_buff[ii] == 0):
future_reward = 0
else:
future_reward = ((future_reward * self.forget_factor) + self.reward_buff[ii])
self.reward_buff[ii] = future_reward
list_norm_inplace(self.reward_buff)
def train(self, steps):
self.transform_discount_reward(steps)
self.optimizer.zero_grad()
for ii in range(steps):
(reward, state, action) = self.pull_buff(ii)
action_var = Variable(TC.FloatTensor([float(action)]))
policy = self.forward(state)
loss = ((- policy.log_prob(action_var)) * reward)
loss.backward()
self.optimizer.step()
self.init_buff()
def step(self, env, state, ee, ii, ee_duration=100):
policy = self.forward(state)
action = policy.sample().data.numpy().astype(int)[0]
(next_state, reward, done_flag, _) = env.step(action)
if self.render_flag:
env.render()
self.push_buff_done(reward, state, action, done_flag)
self.steps_in_batch += 1
state = next_state
return (state, done_flag)
def run_episode(self, env, ee):
state = env.reset()
for ii in count():
(state, done_flag) = self.step(env, state, ee, ii, ee_duration=100)
if done_flag:
plot_durations_ii(ii, self.episode_durations, ee, ee_duration=100)
break
def train_episode(self, ee):
if ((ee > 0) and ((ee % self.num_batch) == 0)):
self.train(self.steps_in_batch)
self.steps_in_batch = 0
def run(self, env):
for ee in range(self.num_episode):
self.run_episode(env, ee)
self.train_episode(ee)
|
class CDENSE(Layer):
def __init__(self, No, **kwargs):
self.No = No
super().__init__(**kwargs)
def build(self, inshape_l):
inshape = inshape_l[0]
self.w_r = self.add_weight('w_r', (inshape[1], self.No), initializer=igu)
self.w_i = self.add_weight('w_i', (inshape[1], self.No), initializer=igu)
self.b_r = self.add_weight('b_r', (self.No,), initializer=iz)
self.b_i = self.add_weight('b_i', (self.No,), initializer=iz)
self.w = tf.complex(self.w_r, self.w_i)
self.b = tf.complex(self.b_r, self.b_i)
super().build(inshape)
def call(self, x_l):
(x_r, x_i) = x_l
x = tf.complex(x_r, x_i)
y = (tf.matmul(x, self.w) + self.b)
y_r = tf.real(y)
y_i = tf.imag(y)
return [y_r, y_i]
def compute_output_shape(self, inshape_l):
return [(inshape_l[0], self.No), (inshape_l[0], self.No)]
|
def modeling(input_shape):
x_r = keras.layers.Input(input_shape)
x_i = keras.layers.Input(input_shape)
[y_r, y_i] = CDENSE(1, input_shape=(1,))([x_r, x_i])
return keras.models.Model([x_r, x_i], [y_r, y_i])
|
def cfit(model, x, y, **kwargs):
x_l = [np.real(x), np.imag(x)]
y_l = [np.real(y), np.imag(y)]
return model.fit(x_l, y_l, **kwargs)
|
def cpredict(model, x, **kwargs):
x_l = [np.real(x), np.imag(x)]
y_l = model.predict(x_l)
return (y_l[0] + (1j * y_l[1]))
|
def cget_weights(model):
[w_r, w_i, b_r, b_i] = model.get_weights()
return ([(w_r + (1j * w_i))], [(b_r + (1j * b_i))])
|
def cmain():
model = modeling((1,))
model.compile(keras.optimizers.sgd(), 'mse')
x = (np.array([0, 1, 2, 3, 4]) + (1j * np.array([4, 3, 2, 1, 0])))
y = ((x * (2 + 1j)) + (1 + 2j))
h = cfit(model, x[:2], y[:2], epochs=5000, verbose=0)
y_pred = cpredict(model, x[2:])
print('Targets:', y[2:])
print(y_pred)
[w, b] = cget_weights(model)
print('weight:', w)
print('bias:', b)
|
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def plot_acc(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if (title is not None):
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def main():
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
model = ANN_seq_class(Nin, Nh, Nout)
((X_train, Y_train), (X_test, Y_test)) = Data_func()
history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
|
class ANN(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = output(h)
super().__init__(x, y)
self.compile(loss='mse', optimizer='sgd')
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.boston_housing.load_data()
scaler = preprocessing.MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return ((X_train, y_train), (X_test, y_test))
|
def main():
Nin = 13
Nh = 5
Nout = 1
model = ANN(Nin, Nh, Nout)
((X_train, y_train), (X_test, y_test)) = Data_func()
history = model.fit(X_train, y_train, epochs=100, batch_size=100, validation_split=0.2, verbose=2)
performace_test = model.evaluate(X_test, y_test, batch_size=100)
print('\nTest Loss -> {:.2f}'.format(performace_test))
plot_loss(history)
plt.show()
|
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def main():
Nin = 784
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
((X_train, Y_train), (X_test, Y_test)) = Data_func()
model = DNN(Nin, Nh_l, Nout)
history = model.fit(X_train, Y_train, epochs=10, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_acc(history)
plt.show()
plot_loss(history)
plt.show()
|
class DNN(models.Sequential):
def __init__(self, Nin, Nh_l, Pd_l, Nout):
super().__init__()
self.add(layers.Dense(Nh_l[0], activation='relu', input_shape=(Nin,), name='Hidden-1'))
self.add(layers.Dropout(Pd_l[0]))
self.add(layers.Dense(Nh_l[1], activation='relu', name='Hidden-2'))
self.add(layers.Dropout(Pd_l[1]))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.cifar10.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
(L, W, H, C) = X_train.shape
X_train = X_train.reshape((- 1), ((W * H) * C))
X_test = X_test.reshape((- 1), ((W * H) * C))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def main(Pd_l=[0.0, 0.0]):
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
((X_train, Y_train), (X_test, Y_test)) = Data_func()
model = DNN(X_train.shape[1], Nh_l, Pd_l, Nout)
history = model.fit(X_train, Y_train, epochs=100, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_acc(history, '(a) νμ΅μ ν΅ν μ νλμ λ³ν')
plt.show()
plot_loss(history, '(b) νμ΅μ ν΅ν μμ€μ λ³ν')
plt.show()
|
class CNN(models.Sequential):
def __init__(self, input_shape, num_classes):
super().__init__()
self.add(layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
self.add(layers.Conv2D(64, (3, 3), activation='relu'))
self.add(layers.MaxPooling2D(pool_size=(2, 2)))
self.add(layers.Dropout(0.25))
self.add(layers.Flatten())
self.add(layers.Dense(128, activation='relu'))
self.add(layers.Dropout(0.5))
self.add(layers.Dense(num_classes, activation='softmax'))
self.compile(loss=keras.losses.categorical_crossentropy, optimizer='rmsprop', metrics=['accuracy'])
|
class DATA():
def __init__(self):
num_classes = 10
((x_train, y_train), (x_test, y_test)) = datasets.mnist.load_data()
(img_rows, img_cols) = x_train.shape[1:]
if (backend.image_data_format() == 'channels_first'):
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
self.input_shape = input_shape
self.num_classes = num_classes
(self.x_train, self.y_train) = (x_train, y_train)
(self.x_test, self.y_test) = (x_test, y_test)
|
def main():
batch_size = 128
epochs = 10
data = DATA()
model = CNN(data.input_shape, data.num_classes)
history = model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2)
score = model.evaluate(data.x_test, data.y_test)
print()
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
|
class Machine(aicnn.Machine):
def __init__(self):
((X, y), (x_test, y_test)) = datasets.cifar10.load_data()
super().__init__(X, y, nb_classes=10)
|
def main():
m = Machine()
m.run()
|
class Data():
def __init__(self, max_features=20000, maxlen=80):
((x_train, y_train), (x_test, y_test)) = imdb.load_data(num_words=max_features)
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
(self.x_train, self.y_train) = (x_train, y_train)
(self.x_test, self.y_test) = (x_test, y_test)
|
class RNN_LSTM(models.Model):
def __init__(self, max_features, maxlen):
x = layers.Input((maxlen,))
h = layers.Embedding(max_features, 128)(x)
h = layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2)(h)
y = layers.Dense(1, activation='sigmoid')(h)
super().__init__(x, y)
self.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
|
class Machine():
def __init__(self, max_features=20000, maxlen=80):
self.data = Data(max_features, maxlen)
self.model = RNN_LSTM(max_features, maxlen)
def run(self, epochs=3, batch_size=32):
data = self.data
model = self.model
print('Training stage')
print('==============')
model.fit(data.x_train, data.y_train, batch_size=batch_size, epochs=epochs, validation_data=(data.x_test, data.y_test))
(score, acc) = model.evaluate(data.x_test, data.y_test, batch_size=batch_size)
print('Test performance: accuracy={0}, loss={1}'.format(acc, score))
|
def main():
m = Machine()
m.run()
|
def main():
machine = Machine()
machine.run(epochs=400)
|
class Machine():
def __init__(self):
self.data = Dataset()
shape = self.data.X.shape[1:]
self.model = rnn_model(shape)
def run(self, epochs=400):
d = self.data
(X_train, X_test, y_train, y_test) = (d.X_train, d.X_test, d.y_train, d.y_test)
(X, y) = (d.X, d.y)
m = self.model
h = m.fit(X_train, y_train, epochs=epochs, validation_data=[X_test, y_test], verbose=0)
skeras.plot_loss(h)
plt.title('History of training')
plt.show()
yp = m.predict(X_test)
print('Loss:', m.evaluate(X_test, y_test))
plt.plot(yp, label='Origial')
plt.plot(y_test, label='Prediction')
plt.legend(loc=0)
plt.title('Validation Results')
plt.show()
yp = m.predict(X_test).reshape((- 1))
print('Loss:', m.evaluate(X_test, y_test))
print(yp.shape, y_test.shape)
df = pd.DataFrame()
df['Sample'] = (list(range(len(y_test))) * 2)
df['Normalized #Passengers'] = np.concatenate([y_test, yp], axis=0)
df['Type'] = ((['Original'] * len(y_test)) + (['Prediction'] * len(yp)))
plt.figure(figsize=(7, 5))
sns.barplot(x='Sample', y='Normalized #Passengers', hue='Type', data=df)
plt.ylabel('Normalized #Passengers')
plt.show()
yp = m.predict(X)
plt.plot(yp, label='Origial')
plt.plot(y, label='Prediction')
plt.legend(loc=0)
plt.title('All Results')
plt.show()
|
def rnn_model(shape):
m_x = layers.Input(shape=shape)
m_h = layers.LSTM(10)(m_x)
m_y = layers.Dense(1)(m_h)
m = models.Model(m_x, m_y)
m.compile('adam', 'mean_squared_error')
m.summary()
return m
|
class Dataset():
def __init__(self, fname='international-airline-passengers.csv', D=12):
data_dn = load_data(fname=fname)
(X, y) = get_Xy(data_dn, D=D)
(X_train, X_test, y_train, y_test) = model_selection.train_test_split(X, y, test_size=0.2, random_state=42)
(self.X, self.y) = (X, y)
(self.X_train, self.X_test, self.y_train, self.y_test) = (X_train, X_test, y_train, y_test)
|
def load_data(fname='international-airline-passengers.csv'):
dataset = pd.read_csv(fname, usecols=[1], engine='python', skipfooter=3)
data = dataset.values.reshape((- 1))
plt.plot(data)
plt.xlabel('Time')
plt.ylabel('#Passengers')
plt.title('Original Data')
plt.show()
data_dn = (((data - np.mean(data)) / np.std(data)) / 5)
plt.plot(data_dn)
plt.xlabel('Time')
plt.ylabel('Normalized #Passengers')
plt.title('Normalized data by $E[]$ and $5\\sigma$')
plt.show()
return data_dn
|
def get_Xy(data, D=12):
X_l = []
y_l = []
N = len(data)
assert (N > D), 'N should be larger than D, where N is len(data)'
for ii in range(((N - D) - 1)):
X_l.append(data[ii:(ii + D)])
y_l.append(data[(ii + D)])
X = np.array(X_l)
X = X.reshape(X.shape[0], X.shape[1], 1)
y = np.array(y_l)
print(X.shape, y.shape)
return (X, y)
|
class AE(models.Model):
def __init__(self, x_nodes=784, z_dim=36):
x_shape = (x_nodes,)
x = layers.Input(shape=x_shape)
z = layers.Dense(z_dim, activation='relu')(x)
y = layers.Dense(x_nodes, activation='sigmoid')(z)
super().__init__(x, y)
self.x = x
self.z = z
self.z_dim = z_dim
self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
def Encoder(self):
return models.Model(self.x, self.z)
def Decoder(self):
z_shape = (self.z_dim,)
z = layers.Input(shape=z_shape)
y_layer = self.layers[(- 1)]
y = y_layer(z)
return models.Model(z, y)
|
def show_ae(autoencoder):
encoder = autoencoder.Encoder()
decoder = autoencoder.Decoder()
encoded_imgs = encoder.predict(X_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10
plt.figure(figsize=(20, 6))
for i in range(n):
ax = plt.subplot(3, n, (i + 1))
plt.imshow(X_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, ((i + 1) + n))
plt.stem(encoded_imgs[i].reshape((- 1)))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, (((i + 1) + n) + n))
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
def main():
x_nodes = 784
z_dim = 36
autoencoder = AE(x_nodes, z_dim)
history = autoencoder.fit(X_train, X_train, epochs=10, batch_size=256, shuffle=True, validation_data=(X_test, X_test))
plot_acc(history, '(a) νμ΅ κ²½κ³Όμ λ°λ₯Έ μ νλ λ³ν μΆμ΄')
plt.show()
plot_loss(history, '(b) νμ΅ κ²½κ³Όμ λ°λ₯Έ μμ€κ° λ³ν μΆμ΄')
plt.show()
show_ae(autoencoder)
plt.show()
|
def Conv2D(filters, kernel_size, padding='same', activation='relu'):
return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
|
class AE(models.Model):
def __init__(self, org_shape=(1, 28, 28)):
original = layers.Input(shape=org_shape)
x = Conv2D(4, (3, 3))(original)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3))(x)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
z = Conv2D(1, (7, 7))(x)
y = Conv2D(16, (3, 3))(z)
y = layers.UpSampling2D((2, 2))(y)
y = Conv2D(8, (3, 3))(y)
y = layers.UpSampling2D((2, 2))(y)
y = Conv2D(4, (3, 3))(y)
decoded = Conv2D(1, (3, 3), activation='sigmoid')(y)
super().__init__(original, decoded)
self.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
|
def show_ae(autoencoder, data):
x_test = data.x_test
decoded_imgs = autoencoder.predict(x_test)
print(decoded_imgs.shape, data.x_test.shape)
if (backend.image_data_format() == 'channels_first'):
(N, n_ch, n_i, n_j) = x_test.shape
else:
(N, n_i, n_j, n_ch) = x_test.shape
x_test = x_test.reshape(N, n_i, n_j)
decoded_imgs = decoded_imgs.reshape(decoded_imgs.shape[0], n_i, n_j)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, (i + 1))
plt.imshow(x_test[i], cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, ((i + 1) + n))
plt.imshow(decoded_imgs[i], cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
def main(epochs=20, batch_size=128):
data = DATA()
autoencoder = AE(data.input_shape)
history = autoencoder.fit(data.x_train, data.x_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_split=0.2)
plot_acc(history, '(a) μ νλ νμ΅ κ³‘μ ')
plt.show()
plot_loss(history, '(b) μμ€ νμ΅ κ³‘μ ')
plt.show()
show_ae(autoencoder, data)
plt.show()
|
def add_decorate(x):
'\n axis = -1 --> last dimension in an array\n '
m = K.mean(x, axis=(- 1), keepdims=True)
d = K.square((x - m))
return K.concatenate([x, d], axis=(- 1))
|
def add_decorate_shape(input_shape):
shape = list(input_shape)
assert (len(shape) == 2)
shape[1] *= 2
return tuple(shape)
|
def model_compile(model):
return model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
|
class GAN():
def __init__(self, ni_D, nh_D, nh_G):
self.ni_D = ni_D
self.nh_D = nh_D
self.nh_G = nh_G
self.D = self.gen_D()
self.G = self.gen_G()
self.GD = self.make_GD()
def gen_D(self):
ni_D = self.ni_D
nh_D = self.nh_D
D = models.Sequential()
D.add(Lambda(add_decorate, output_shape=add_decorate_shape, input_shape=(ni_D,)))
D.add(Dense(nh_D, activation='relu'))
D.add(Dense(nh_D, activation='relu'))
D.add(Dense(1, activation='sigmoid'))
model_compile(D)
return D
def gen_G(self):
ni_D = self.ni_D
nh_G = self.nh_D
G = models.Sequential()
G.add(Reshape((ni_D, 1), input_shape=(ni_D,)))
G.add(Conv1D(nh_G, 1, activation='relu'))
G.add(Conv1D(nh_G, 1, activation='sigmoid'))
G.add(Conv1D(1, 1))
G.add(Flatten())
model_compile(G)
return G
def make_GD(self):
(G, D) = (self.G, self.D)
GD = models.Sequential()
GD.add(G)
GD.add(D)
D.trainable = False
model_compile(GD)
D.trainable = True
return GD
def D_train_on_batch(self, Real, Gen):
D = self.D
X = np.concatenate([Real, Gen], axis=0)
y = np.array((([1] * Real.shape[0]) + ([0] * Gen.shape[0])))
D.train_on_batch(X, y)
def GD_train_on_batch(self, Z):
GD = self.GD
y = np.array(([1] * Z.shape[0]))
GD.train_on_batch(Z, y)
|
class Data():
def __init__(self, mu, sigma, ni_D):
self.real_sample = (lambda n_batch: np.random.normal(mu, sigma, (n_batch, ni_D)))
self.in_sample = (lambda n_batch: np.random.rand(n_batch, ni_D))
|
class Machine():
def __init__(self, n_batch=10, ni_D=100):
data_mean = 4
data_stddev = 1.25
self.n_iter_D = 1
self.n_iter_G = 5
self.data = Data(data_mean, data_stddev, ni_D)
self.gan = GAN(ni_D=ni_D, nh_D=50, nh_G=50)
self.n_batch = n_batch
def train_D(self):
gan = self.gan
n_batch = self.n_batch
data = self.data
Real = data.real_sample(n_batch)
Z = data.in_sample(n_batch)
Gen = gan.G.predict(Z)
gan.D.trainable = True
gan.D_train_on_batch(Real, Gen)
def train_GD(self):
gan = self.gan
n_batch = self.n_batch
data = self.data
Z = data.in_sample(n_batch)
gan.D.trainable = False
gan.GD_train_on_batch(Z)
def train_each(self):
for it in range(self.n_iter_D):
self.train_D()
for it in range(self.n_iter_G):
self.train_GD()
def train(self, epochs):
for epoch in range(epochs):
self.train_each()
def test(self, n_test):
'\n generate a new image\n '
gan = self.gan
data = self.data
Z = data.in_sample(n_test)
Gen = gan.G.predict(Z)
return (Gen, Z)
def show_hist(self, Real, Gen, Z):
plt.hist(Real.reshape((- 1)), histtype='step', label='Real')
plt.hist(Gen.reshape((- 1)), histtype='step', label='Generated')
plt.hist(Z.reshape((- 1)), histtype='step', label='Input')
plt.legend(loc=0)
def test_and_show(self, n_test):
data = self.data
(Gen, Z) = self.test(n_test)
Real = data.real_sample(n_test)
self.show_hist(Real, Gen, Z)
Machine.print_stat(Real, Gen)
def run_epochs(self, epochs, n_test):
'\n train GAN and show the results\n for showing, the original and the artificial results will be compared\n '
self.train(epochs)
self.test_and_show(n_test)
def run(self, n_repeat=200, n_show=200, n_test=100):
for ii in range(n_repeat):
print('Stage', ii, '(Epoch: {})'.format((ii * n_show)))
self.run_epochs(n_show, n_test)
plt.show()
@staticmethod
def print_stat(Real, Gen):
def stat(d):
return (np.mean(d), np.std(d))
print('Mean and Std of Real:', stat(Real))
print('Mean and Std of Gen:', stat(Gen))
|
class GAN_Pure(GAN):
def __init__(self, ni_D, nh_D, nh_G):
'\n Discriminator input is not added\n '
super().__init__(ni_D, nh_D, nh_G)
def gen_D(self):
ni_D = self.ni_D
nh_D = self.nh_D
D = models.Sequential()
D.add(Dense(nh_D, activation='relu', input_shape=(ni_D,)))
D.add(Dense(nh_D, activation='relu'))
D.add(Dense(1, activation='sigmoid'))
model_compile(D)
return D
|
class Machine_Pure(Machine):
def __init__(self, n_batch=10, ni_D=100):
data_mean = 4
data_stddev = 1.25
self.data = Data(data_mean, data_stddev, ni_D)
self.gan = GAN_Pure(ni_D=ni_D, nh_D=50, nh_G=50)
self.n_batch = n_batch
|
def main():
machine = Machine(n_batch=1, ni_D=100)
machine.run(n_repeat=200, n_show=200, n_test=100)
|
class Machine(aigen.Machine_Generator):
def __init__(self):
((x_train, y_train), (x_test, y_test)) = datasets.cifar10.load_data()
(_, X, _, y) = model_selection.train_test_split(x_train, y_train, test_size=0.02)
X = X.astype(float)
gen_param_dict = {'rotation_range': 10}
super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict)
|
def main():
m = Machine()
m.run()
|
class Machine(aiprt.Machine_Generator):
def __init__(self):
((x_train, y_train), (x_test, y_test)) = datasets.cifar10.load_data()
(_, X, _, y) = model_selection.train_test_split(x_train, y_train, test_size=0.02)
X = X.astype(float)
super().__init__(X, y, nb_classes=10)
|
def main():
m = Machine()
m.run()
|
def Lambda_with_lambda():
from keras.layers import Lambda, Input
from keras.models import Model
x = Input((1,))
y = Lambda((lambda x: (x + 1)))(x)
m = Model(x, y)
yp = m.predict_on_batch([1, 2, 3])
print('np.array([1,2,3]) + 1:')
print(yp)
|
def Lambda_function():
from keras.layers import Lambda, Input
from keras.models import Model
def kproc(x):
return (((x ** 2) + (2 * x)) + 1)
def kshape(input_shape):
return input_shape
x = Input((1,))
y = Lambda(kproc, kshape)(x)
m = Model(x, y)
yp = m.predict_on_batch([1, 2, 3])
print('np.array([1,2,3]) + 1:')
print(yp)
|
def Backend_for_Lambda():
from keras.layers import Lambda, Input
from keras.models import Model
from keras import backend as K
def kproc_concat(x):
m = K.mean(x, axis=1, keepdims=True)
d1 = K.abs((x - m))
d2 = K.square((x - m))
return K.concatenate([x, d1, d2], axis=1)
def kshape_concat(input_shape):
output_shape = list(input_shape)
output_shape[1] *= 3
return tuple(output_shape)
x = Input((3,))
y = Lambda(kproc_concat, kshape_concat)(x)
m = Model(x, y)
yp = m.predict_on_batch([[1, 2, 3], [3, 4, 8]])
print(yp)
|
def TF_for_Lamda():
from keras.layers import Lambda, Input
from keras.models import Model
import tensorflow as tf
def kproc_concat(x):
m = tf.reduce_mean(x, axis=1, keep_dims=True)
d1 = tf.abs((x - m))
d2 = tf.square((x - m))
return tf.concat([x, d1, d2], axis=1)
def kshape_concat(input_shape):
output_shape = list(input_shape)
output_shape[1] *= 3
return tuple(output_shape)
x = Input((3,))
y = Lambda(kproc_concat, kshape_concat)(x)
m = Model(x, y)
yp = m.predict_on_batch([[1, 2, 3], [3, 4, 8]])
print(yp)
|
def main():
print('Lambda with lambda')
Lambda_with_lambda()
print('Lambda function')
Lambda_function()
print('Backend for Lambda')
Backend_for_Lambda()
print('TF for Lambda')
TF_for_Lamda()
|
class SFC(Layer):
def __init__(self, No, **kwargs):
self.No = No
super().__init__(**kwargs)
def build(self, inshape):
self.w = self.add_weight('w', (inshape[1], self.No), initializer=igu)
self.b = self.add_weight('b', (self.No,), initializer=iz)
super().build(inshape)
def call(self, x):
return (K.dot(x, self.w) + self.b)
def compute_output_shape(self, inshape):
return (inshape[0], self.No)
|
def main():
x = np.array([0, 1, 2, 3, 4])
y = ((x * 2) + 1)
model = keras.models.Sequential()
model.add(SFC(1, input_shape=(1,)))
model.compile('SGD', 'mse')
model.fit(x[:2], y[:2], epochs=1000, verbose=0)
print('Targets:', y[2:])
print('Predictions:', model.predict(x[2:]).flatten())
|
class DNN():
def __init__(self, Nin, Nh_l, Nout):
self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))
self.L_ph = tf.placeholder(tf.float32, shape=(None, Nout))
H = Dense(Nh_l[0], activation='relu')(self.X_ph)
H = Dropout(0.5)(H)
H = Dense(Nh_l[1], activation='relu')(H)
H = Dropout(0.25)(H)
self.Y_tf = Dense(Nout, activation='softmax')(H)
self.Loss_tf = tf.reduce_mean(categorical_crossentropy(self.L_ph, self.Y_tf))
self.Train_tf = tf.train.AdamOptimizer().minimize(self.Loss_tf)
self.Acc_tf = categorical_accuracy(self.L_ph, self.Y_tf)
self.Init_tf = tf.global_variables_initializer()
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def run(model, data, sess, epochs, batch_size=100):
((X_train, Y_train), (X_test, Y_test)) = data
sess.run(model.Init_tf)
with sess.as_default():
N_tr = X_train.shape[0]
for epoch in range(epochs):
for b in range((N_tr // batch_size)):
X_tr_b = X_train[(batch_size * (b - 1)):(batch_size * b)]
Y_tr_b = Y_train[(batch_size * (b - 1)):(batch_size * b)]
model.Train_tf.run(feed_dict={model.X_ph: X_tr_b, model.L_ph: Y_tr_b, K.learning_phase(): 1})
loss = sess.run(model.Loss_tf, feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0})
acc = model.Acc_tf.eval(feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0})
print('Epoch {0}: loss = {1:.3f}, acc = {2:.3f}'.format(epoch, loss, np.mean(acc)))
|
def main():
Nin = 784
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
data = Data_func()
model = DNN(Nin, Nh_l, Nout)
run(model, data, sess, 10, 100)
|
class CNN(Model):
def __init__(model, nb_classes, in_shape=None):
model.nb_classes = nb_classes
model.in_shape = in_shape
model.build_model()
super().__init__(model.x, model.y)
model.compile()
def build_model(model):
nb_classes = model.nb_classes
in_shape = model.in_shape
x = Input(in_shape)
h = Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=in_shape)(x)
h = Conv2D(64, (3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2, 2))(h)
h = Dropout(0.25)(h)
h = Flatten()(h)
z_cl = h
h = Dense(128, activation='relu')(h)
h = Dropout(0.5)(h)
z_fl = h
y = Dense(nb_classes, activation='softmax', name='preds')(h)
model.cl_part = Model(x, z_cl)
model.fl_part = Model(x, z_fl)
(model.x, model.y) = (x, y)
def compile(model):
Model.compile(model, loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
|
class DataSet():
def __init__(self, X, y, nb_classes, scaling=True, test_size=0.2, random_state=0):
'\n X is originally vector. Hence, it will be transformed\n to 2D images with a channel (i.e, 3D).\n '
self.X = X
self.add_channels()
X = self.X
(X_train, X_test, y_train, y_test) = model_selection.train_test_split(X, y, test_size=0.2, random_state=random_state)
print(X_train.shape, y_train.shape)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
if scaling:
scaler = MinMaxScaler()
n = X_train.shape[0]
X_train = scaler.fit_transform(X_train.reshape(n, (- 1))).reshape(X_train.shape)
n = X_test.shape[0]
X_test = scaler.transform(X_test.reshape(n, (- 1))).reshape(X_test.shape)
self.scaler = scaler
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
(self.X_train, self.X_test) = (X_train, X_test)
(self.Y_train, self.Y_test) = (Y_train, Y_test)
(self.y_train, self.y_test) = (y_train, y_test)
def add_channels(self):
X = self.X
if (len(X.shape) == 3):
(N, img_rows, img_cols) = X.shape
if (K.image_dim_ordering() == 'th'):
X = X.reshape(X.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X = X.reshape(X.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
else:
input_shape = X.shape[1:]
self.X = X
self.input_shape = input_shape
|
class Machine():
def __init__(self, X, y, nb_classes=2, fig=True):
self.nb_classes = nb_classes
self.set_data(X, y)
self.set_model()
self.fig = fig
def set_data(self, X, y):
nb_classes = self.nb_classes
self.data = DataSet(X, y, nb_classes)
print('data.input_shape', self.data.input_shape)
def set_model(self):
nb_classes = self.nb_classes
data = self.data
self.model = CNN(nb_classes=nb_classes, in_shape=data.input_shape)
def fit(self, epochs=10, batch_size=128, verbose=1):
data = self.data
model = self.model
history = model.fit(data.X_train, data.Y_train, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_data=(data.X_test, data.Y_test))
return history
def run(self, epochs=100, batch_size=128, verbose=1):
data = self.data
model = self.model
fig = self.fig
history = self.fit(epochs=epochs, batch_size=batch_size, verbose=verbose)
score = model.evaluate(data.X_test, data.Y_test, verbose=0)
print('Confusion matrix')
Y_test_pred = model.predict(data.X_test, verbose=0)
y_test_pred = np.argmax(Y_test_pred, axis=1)
print(metrics.confusion_matrix(data.y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
suffix = sfile.unique_filename('datatime')
foldname = ('output_' + suffix)
os.makedirs(foldname)
skeras.save_history_history('history_history.npy', history.history, fold=foldname)
model.save_weights(os.path.join(foldname, 'dl_model.h5'))
print('Output results are saved in', foldname)
if fig:
skeras.plot_acc_loss(history)
self.history = history
return foldname
|
class Machine_Generator(aicnn.Machine):
def __init__(self, X, y, nb_classes=2, steps_per_epoch=10, fig=True, gen_param_dict=None):
super().__init__(X, y, nb_classes=nb_classes, fig=fig)
self.set_generator(steps_per_epoch=steps_per_epoch, gen_param_dict=gen_param_dict)
def set_generator(self, steps_per_epoch=10, gen_param_dict=None):
if (gen_param_dict is not None):
self.generator = ImageDataGenerator(**gen_param_dict)
else:
self.generator = ImageDataGenerator()
print(self.data.X_train.shape)
self.generator.fit(self.data.X_train, seed=0)
self.steps_per_epoch = steps_per_epoch
def fit(self, epochs=10, batch_size=64, verbose=1):
model = self.model
data = self.data
generator = self.generator
steps_per_epoch = self.steps_per_epoch
history = model.fit_generator(generator.flow(data.X_train, data.Y_train, batch_size=batch_size), epochs=epochs, steps_per_epoch=steps_per_epoch, validation_data=(data.X_test, data.Y_test))
return history
|
class CNN(aicnn.CNN):
def __init__(model, input_shape, nb_classes, n_dense=128, p_dropout=0.5, BN_flag=False, PretrainedModel=VGG16):
'\n If BN_flag is True, BN is used instaed of Dropout\n '
model.in_shape = input_shape
model.n_dense = n_dense
model.p_dropout = p_dropout
model.PretrainedModel = PretrainedModel
model.BN_flag = BN_flag
super().__init__(nb_classes)
def build_model(model):
nb_classes = model.nb_classes
input_shape = model.in_shape
PretrainedModel = model.PretrainedModel
base_model = PretrainedModel(weights='imagenet', include_top=False, input_shape=input_shape)
x = base_model.input
h = base_model.output
z_cl = h
h = model.topmodel(h)
z_fl = h
y = Dense(nb_classes, activation='softmax', name='preds')(h)
for layer in base_model.layers:
layer.trainable = False
model.cl_part = Model(x, z_cl)
model.fl_part = Model(x, z_fl)
model.x = x
model.y = y
def topmodel(model, h):
'\n Define topmodel\n if BN_Flag is True, BN is used instead of Dropout\n '
BN_flag = model.BN_flag
n_dense = model.n_dense
p_dropout = model.p_dropout
h = GlobalAveragePooling2D()(h)
h = Dense(n_dense, activation='relu')(h)
if BN_flag:
h = BatchNormalization()(h)
else:
h = Dropout(p_dropout)(h)
return h
|
class DataSet(aicnn.DataSet):
def __init__(self, X, y, nb_classes, n_channels=3, scaling=True, test_size=0.2, random_state=0):
self.n_channels = n_channels
super().__init__(X, y, nb_classes, scaling=scaling, test_size=test_size, random_state=random_state)
def add_channels(self):
n_channels = self.n_channels
if (n_channels == 1):
super().add_channels()
else:
X = self.X
if (X.ndim < 4):
(N, img_rows, img_cols) = X.shape
if (K.image_dim_ordering() == 'th'):
X = X.reshape(X.shape[0], 1, img_rows, img_cols)
X = np.concatenate([X, X, X], axis=1)
input_shape = (n_channels, img_rows, img_cols)
else:
X = X.reshape(X.shape[0], img_rows, img_cols, 1)
X = np.concatenate([X, X, X], axis=3)
input_shape = (img_rows, img_cols, n_channels)
elif (K.image_dim_ordering() == 'th'):
(N, Ch, img_rows, img_cols) = X.shape
if (Ch == 1):
X = np.concatenate([X, X, X], axis=1)
input_shape = (n_channels, img_rows, img_cols)
else:
(N, img_rows, img_cols, Ch) = X.shape
if (Ch == 1):
X = np.concatenate([X, X, X], axis=3)
input_shape = (img_rows, img_cols, n_channels)
X = preprocess_input(X)
self.X = X
self.input_shape = input_shape
|
class Machine_Generator(aigen.Machine_Generator):
'\n This Machine Generator is for pretrained approach.\n '
def __init__(self, X, y, nb_classes=2, steps_per_epoch=10, n_dense=128, p_dropout=0.5, BN_flag=False, scaling=False, PretrainedModel=VGG16, fig=True, gen_param_dict=None):
'\n scaling becomes False for DataSet\n '
self.scaling = scaling
self.n_dense = n_dense
self.p_dropout = p_dropout
self.BN_flag = BN_flag
self.PretrainedModel = PretrainedModel
super().__init__(X, y, nb_classes=nb_classes, steps_per_epoch=steps_per_epoch, fig=fig, gen_param_dict=gen_param_dict)
def set_data(self, X, y):
nb_classes = self.nb_classes
scaling = self.scaling
self.data = DataSet(X, y, nb_classes, n_channels=3, scaling=scaling)
def set_model(self):
data = self.data
nb_classes = self.nb_classes
n_dense = self.n_dense
p_dropout = self.p_dropout
BN_flag = self.BN_flag
PretrainedModel = self.PretrainedModel
self.model = CNN(data.input_shape, nb_classes, n_dense=n_dense, p_dropout=p_dropout, BN_flag=BN_flag, PretrainedModel=PretrainedModel)
|
def coeff_determination(y_true, y_pred):
SS_res = K.sum(K.square((y_true - y_pred)))
SS_tot = K.sum(K.square((y_true - K.mean(y_true))))
return (1 - (SS_res / (SS_tot + K.epsilon())))
|
def unique_filename(type='uuid'):
if (type == 'datetime'):
filename = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
else:
filename = str(uuid.uuid4())
return filename
|
def makenewfold(prefix='output_', type='datetime'):
suffix = unique_filename('datetime')
foldname = ('output_' + suffix)
os.makedirs(foldname)
return foldname
|
def save_history_history(fname, history_history, fold=''):
np.save(os.path.join(fold, fname), history_history)
|
def load_history_history(fname, fold=''):
history_history = np.load(os.path.join(fold, fname)).item(0)
return history_history
|
def plot_acc(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if (title is not None):
plt.title(title)
plt.ylabel('Accracy')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0)
|
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training data', 'Validation data'], loc=0)
|
def plot_history(history):
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plot_acc(history)
plt.subplot(1, 2, 2)
plot_loss(history)
|
def plot_loss_acc(history):
plot_loss(history, '(a) Loss trajectory')
plt.show()
plot_acc(history, '(b) Accracy trajectory')
plt.show()
|
def plot_acc_loss(history):
plot_acc(history, '(a) Accracy trajectory')
plt.show()
plot_loss(history, '(b) Loss trajectory')
plt.show()
|
def save_history_history(fname, history_history, fold=''):
np.save(os.path.join(fold, fname), history_history)
|
def load_history_history(fname, fold=''):
history_history = np.load(os.path.join(fold, fname)).item(0)
return history_history
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.