code stringlengths 17 6.64M |
|---|
def Conv2D(filters, kernel_size, padding='same', activation='relu'):
return layers.Conv2D(filters, kernel_size, padding=padding, activation=activation)
|
class AE(models.Model):
def __init__(self, org_shape=(1, 28, 28)):
original = layers.Input(shape=org_shape)
x = Conv2D(4, (3, 3))(original)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3))(x)
x = layers.MaxPooling2D((2, 2), padding='same')(x)
z = Conv2D(1, (7, 7))(x)
y = Conv2D(16, (3, 3))(z)
y = layers.UpSampling2D((2, 2))(y)
y = Conv2D(8, (3, 3))(y)
y = layers.UpSampling2D((2, 2))(y)
y = Conv2D(4, (3, 3))(y)
decoded = Conv2D(1, (3, 3), activation='sigmoid')(y)
super().__init__(original, decoded)
self.compile(optimizer='adadelta', loss='binary_crossentropy')
|
class DATA():
def __init__(self):
num_classes = 10
((x_train, y_train), (x_test, y_test)) = datasets.mnist.load_data()
(img_rows, img_cols) = x_train.shape[1:]
if (backend.image_data_format() == 'channels_first'):
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
self.input_shape = input_shape
self.num_classes = num_classes
(self.x_train, self.y_train) = (x_train, y_train)
(self.x_test, self.y_test) = (x_test, y_test)
|
def plot_loss(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
class ANN(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = output(h)
super().__init__(x, y)
self.compile(loss='mse', optimizer='sgd')
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.boston_housing.load_data()
scaler = preprocessing.MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return ((X_train, y_train), (X_test, y_test))
|
def main():
Nin = 13
Nh = 5
Nout = 1
model = ANN(Nin, Nh, Nout)
((X_train, y_train), (X_test, y_test)) = Data_func()
history = model.fit(X_train, y_train, epochs=100, batch_size=100, validation_split=0.2, verbose=2)
performace_test = model.evaluate(X_test, y_test, batch_size=100)
print('\nTest Loss -> {:.2f}'.format(performace_test))
plot_loss(history)
plt.show()
|
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def plot_loss(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc=0)
|
def plot_acc(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc=0)
|
def main():
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
model = ANN_seq_class(Nin, Nh, Nout)
((X_train, Y_train), (X_test, Y_test)) = Data_func()
history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
|
def plot_acc(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if (title is not None):
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
class History():
def __init__(self):
self.history = {'accuracy': [], 'loss': [], 'val_accuracy': [], 'val_loss': []}
|
class Metrics_Mean():
def __init__(self):
self.reset_states()
def __call__(self, loss):
self.buff.append(loss.data)
def reset_states(self):
self.buff = []
def result(self):
return np.mean(self.buff)
|
class Metrics_CategoricalAccuracy():
def __init__(self):
self.reset_states()
def __call__(self, labels, predictions):
decisions = predictions.data.max(1)[1]
self.correct += decisions.eq(labels.data).cpu().sum()
self.L += len(labels.data)
def reset_states(self):
(self.correct, self.L) = (0, 0)
def result(self):
return (float(self.correct) / self.L)
|
class ANN_models_class(nn.Module):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.hidden = nn.Linear(Nin, Nh)
self.last = nn.Linear(Nh, Nout)
self.Nin = Nin
def forward(self, x):
x = x.view((- 1), self.Nin)
h = F.relu(self.hidden(x))
y = F.softmax(self.last(h), dim=1)
return y
|
def Data_func():
train_dataset = datasets.MNIST('~/pytorch_data', train=True, download=True, transform=transforms.ToTensor())
test_dataset = datasets.MNIST('~/pytorch_data', train=False, transform=transforms.ToTensor())
train_ds = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_ds = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
return (train_ds, test_ds)
|
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = utils.to_categorical(y_train)
Y_test = utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def plot_acc(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['acc'])
plt.plot(history['val_acc'])
if (title is not None):
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def main():
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
model = ANN_seq_class(Nin, Nh, Nout)
((X_train, Y_train), (X_test, Y_test)) = Data_func()
history = model.fit(X_train, Y_train, epochs=15, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100, verbose=0)
print('Test Loss and Accuracy ->', performace_test)
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
|
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
|
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = utils.to_categorical(y_train)
Y_test = utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def plot_acc(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if (title is not None):
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.hidden = layers.Dense(Nh)
self.last = layers.Dense(Nout)
def call(self, x):
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
h = relu(self.hidden(x))
y = softmax(self.last(h))
return y
|
def Data_func():
((X_train, y_train), (X_test, y_test)) = datasets.mnist.load_data()
Y_train = utils.to_categorical(y_train)
Y_test = utils.to_categorical(y_test)
(L, W, H) = X_train.shape
X_train = X_train.reshape((- 1), (W * H))
X_test = X_test.reshape((- 1), (W * H))
X_train = (X_train / 255.0)
X_test = (X_test / 255.0)
return ((X_train, Y_train), (X_test, Y_test))
|
def plot_acc(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if (title is not None):
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
def plot_loss(history, title=None):
if (not isinstance(history, dict)):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if (title is not None):
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
|
class History():
def __init__(self):
self.history = {'accuracy': [], 'loss': [], 'val_accuracy': [], 'val_loss': []}
|
class _ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
|
@tf2.function
def ep_train(xx, yy):
with tf2.GradientTape() as tape:
yp = model(xx)
loss = Loss_object(yy, yp)
gradients = tape.gradient(loss, model.trainable_variables)
Optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(yy, yp)
|
@tf2.function
def ep_test(xx, yy):
yp = model(xx)
t_loss = Loss_object(yy, yp)
test_loss(t_loss)
test_accuracy(yy, yp)
|
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
|
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
|
@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
|
def fixed_point(x, k, fraclength=None, signed=True):
if (fraclength != None):
f = fraclength
n = float((2.0 ** f))
mn = (- (2.0 ** ((k - f) - 1)))
mx = ((- mn) - (2.0 ** (- f)))
if (not signed):
mx -= mn
mn = 0
x = tf.clip_by_value(x, mn, mx)
else:
n = float(((2 ** k) - 1))
return (x + tf.stop_gradient(((tf.floor(((x * n) + 0.5)) / n) - x)))
|
def quantize(x, bit_width, frac_bits=None, signed=None):
if (bit_width is None):
return x
elif (bit_width == 1):
return (x + tf.stop_gradient((tf.sign(x) - x)))
elif (bit_width == 2):
ones = tf.ones_like(x)
zeros = (ones * 0)
mask = tf.where((x < 0.33), zeros, ones)
binary = (x + tf.stop_gradient((tf.sign(x) - x)))
ternary = (binary * mask)
return ternary
else:
x = tf.clip_by_value(x, (- 1), 1)
x = ((x * 0.5) + 0.5)
return ((2 * fixed_point(x, bit_width)) - 1)
|
class SYQ(Conv2D):
def __init__(self, bit_width, *args, **kwargs):
self.bit_width = bit_width
super(SYQ, self).__init__(*args, **kwargs)
def get_config(self):
config = super().get_config()
config['bit_width'] = self.bit_width
return config
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if (self.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
if (input_shape.dims[channel_axis].value is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
kernel_shape = (self.kernel_size + (input_dim, self.filters))
self.kernel = self.add_weight(name='kernel', shape=kernel_shape, initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, trainable=True, dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(name='bias', shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype)
else:
self.bias = None
self.scale = self.add_weight('scale', shape=kernel_shape, initializer=keras.initializers.Ones(), dtype=self.dtype, trainable=True)
self.kernel = (quantize(self.kernel, self.bit_width) * self.scale)
self.input_spec = InputSpec(ndim=(self.rank + 2), axes={channel_axis: input_dim})
if (self.padding == 'causal'):
op_padding = 'valid'
else:
op_padding = self.padding
if (not isinstance(op_padding, (list, tuple))):
op_padding = op_padding.upper()
self._convolution_op = nn_ops.Convolution(input_shape, filter_shape=self.kernel.shape, dilation_rate=self.dilation_rate, strides=self.strides, padding=op_padding, data_format=conv_utils.convert_data_format(self.data_format, (self.rank + 2)))
self.built = True
|
class SYQ_Dense(Dense):
def __init__(self, bit_width, *args, **kwargs):
self.bit_width = bit_width
super(SYQ_Dense, self).__init__(*args, **kwargs)
def get_config(self):
config = super().get_config()
config['bit_width'] = self.bit_width
return config
def build(self, input_shape):
dtype = dtypes.as_dtype((self.dtype or K.floatx()))
if (not (dtype.is_floating or dtype.is_complex)):
raise TypeError(('Unable to build `Dense` layer with non-floating point dtype %s' % (dtype,)))
input_shape = tensor_shape.TensorShape(input_shape)
if (tensor_shape.dimension_value(input_shape[(- 1)]) is None):
raise ValueError('The last dimension of the inputs to `Dense` should be defined. Found `None`.')
last_dim = tensor_shape.dimension_value(input_shape[(- 1)])
self.input_spec = InputSpec(min_ndim=2, axes={(- 1): last_dim})
self.kernel = self.add_weight('kernel', shape=[last_dim, self.units], initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, dtype=self.dtype, trainable=True)
self.scale = self.add_weight('scale', shape=[1], initializer=keras.initializers.Ones(), dtype=self.dtype, trainable=True)
self.kernel = (quantize(self.kernel, self.bit_width) * self.scale)
if self.use_bias:
self.bias = self.add_weight('bias', shape=[self.units], initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, dtype=self.dtype, trainable=True)
else:
self.bias = None
self.built = True
|
class Model():
def __init__(self, bit_width=None, model_name=None, load=None):
self.bit_width = bit_width
self.load = load
self.model_name = model_name
self.model = keras.Sequential([SYQ(self.bit_width, 32, (3, 3), activation='relu', input_shape=(28, 28, 1)), SYQ(self.bit_width, 32, (3, 3), activation='relu'), Flatten(), SYQ_Dense(self.bit_width, 128, activation=tf.nn.relu), SYQ_Dense(self.bit_width, 128, activation=tf.nn.relu), Dense(10, activation=tf.nn.softmax)])
print(self.model.get_config())
def train_model(self):
if (self.load is not None):
self.model = load_model(args.load)
assert (self.model_name is not None)
self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
self.model.fit(train_images, train_labels, epochs=1)
self.model.save((args.model_name + '.h5'))
def evaluate_model(self):
if (self.load is not None):
self.model = load_model(self.load, custom_objects={'SYQ': SYQ, 'SYQ_Dense': SYQ_Dense})
(test_loss, test_acc) = self.model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = self.model.predict(test_images)
|
def skip(app, what, name, obj, skip, options):
if (name == '__init__'):
return False
return skip
|
def process_signature(app, what, name, obj, options, signature, return_annotation):
if signature:
signature = re.sub("<Mock name='([^']+)'.*>", '\\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
return (signature, return_annotation)
|
def setup(app):
from recommonmark.transform import AutoStructify
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', skip)
app.add_config_value('recommonmark_config', {'url_resolver': (lambda url: ('https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/' + url)), 'auto_toc_tree_section': 'Contents', 'enable_math': True, 'enable_inline_math': True}, True)
app.add_transform(AutoStructify)
|
def get_args():
description = 'plot points into graph.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-i', '--input', help='input data file, use "-" for stdin. Default stdin. Input format is many rows of DELIMIETER-separated data', default='-')
parser.add_argument('-o', '--output', help='output image', default='')
parser.add_argument('--show', help='show the figure after rendered', action='store_true')
parser.add_argument('-c', '--column', help="describe each column in data, for example 'x,y,y'. Default to 'y' for one column and 'x,y' for two columns. Plot attributes can be appended after 'y', like 'ythick;cr'. By default, assume all columns are y. ")
parser.add_argument('-t', '--title', help='title of the graph', default='')
parser.add_argument('--xlabel', help='x label', type=six.text_type)
parser.add_argument('--ylabel', help='y label', type=six.text_type)
parser.add_argument('--xlim', help='x lim', type=float, nargs=2)
parser.add_argument('--ylim', help='y lim', type=float, nargs=2)
parser.add_argument('-s', '--scale', help='scale of each y, separated by comma')
parser.add_argument('--annotate-maximum', help='annonate maximum value in graph', action='store_true')
parser.add_argument('--annotate-minimum', help='annonate minimum value in graph', action='store_true')
parser.add_argument('--xkcd', help='xkcd style', action='store_true')
parser.add_argument('--decay', help='exponential decay rate to smooth Y', type=float, default=0)
parser.add_argument('-l', '--legend', help='legend for each y')
parser.add_argument('-d', '--delimeter', help='column delimeter', default='\t')
global args
args = parser.parse_args()
if ((not args.show) and (not args.output)):
args.show = True
|
def filter_valid_range(points, rect):
'rect = (min_x, max_x, min_y, max_y)'
ret = []
for (x, y) in points:
if ((x >= rect[0]) and (x <= rect[1]) and (y >= rect[2]) and (y <= rect[3])):
ret.append((x, y))
if (len(ret) == 0):
ret.append(points[0])
return ret
|
def exponential_smooth(data, alpha):
' smooth data by alpha. returned a smoothed version'
ret = np.copy(data)
now = data[0]
for k in range(len(data)):
ret[k] = ((now * alpha) + (data[k] * (1 - alpha)))
now = ret[k]
return ret
|
def annotate_min_max(data_x, data_y, ax):
(max_x, min_x) = (max(data_x), min(data_x))
(max_y, min_y) = (max(data_y), min(data_y))
x_range = (max_x - min_x)
y_range = (max_y - min_y)
(x_max, y_max) = (data_y[0], data_y[0])
(x_min, y_min) = (data_x[0], data_y[0])
for i in range(1, len(data_x)):
if (data_y[i] > y_max):
y_max = data_y[i]
x_max = data_x[i]
if (data_y[i] < y_min):
y_min = data_y[i]
x_min = data_x[i]
rect = ax.axis()
if args.annotate_maximum:
(text_x, text_y) = filter_valid_range([((x_max + (0.05 * x_range)), (y_max + (0.025 * y_range))), ((x_max - (0.05 * x_range)), (y_max + (0.025 * y_range))), ((x_max + (0.05 * x_range)), (y_max - (0.025 * y_range))), ((x_max - (0.05 * x_range)), (y_max - (0.025 * y_range)))], rect)[0]
ax.annotate('maximum ({:d},{:.3f})'.format(int(x_max), y_max), xy=(x_max, y_max), xytext=(text_x, text_y), arrowprops=dict(arrowstyle='->'))
if args.annotate_minimum:
(text_x, text_y) = filter_valid_range([((x_min + (0.05 * x_range)), (y_min - (0.025 * y_range))), ((x_min - (0.05 * x_range)), (y_min - (0.025 * y_range))), ((x_min + (0.05 * x_range)), (y_min + (0.025 * y_range))), ((x_min - (0.05 * x_range)), (y_min + (0.025 * y_range)))], rect)[0]
ax.annotate('minimum ({:d},{:.3f})'.format(int(x_min), y_min), xy=(x_min, y_min), xytext=(text_x, text_y), arrowprops=dict(arrowstyle='->'))
|
def plot_args_from_column_desc(desc):
if (not desc):
return {}
ret = {}
desc = desc.split(';')
if ('thick' in desc):
ret['lw'] = 5
if ('dash' in desc):
ret['ls'] = '--'
for v in desc:
if v.startswith('c'):
ret['color'] = v[1:]
return ret
|
def do_plot(data_xs, data_ys):
'\n data_xs: list of 1d array, either of size 1 or size len(data_ys)\n data_ys: list of 1d array\n '
fig = plt.figure(figsize=((16.18 / 1.2), (10 / 1.2)))
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
nr_y = len(data_ys)
y_column = args.y_column
if args.legend:
legends = args.legend.split(',')
assert (len(legends) == nr_y)
else:
legends = None
if args.scale:
scale = map(float, args.scale.split(','))
assert (len(scale) == nr_y)
else:
scale = ([1.0] * nr_y)
for yidx in range(nr_y):
plotargs = plot_args_from_column_desc(y_column[yidx][1:])
now_scale = scale[yidx]
data_y = (data_ys[yidx] * now_scale)
leg = (legends[yidx] if legends else None)
if (now_scale != 1):
leg = '{}*{}'.format((now_scale if (int(now_scale) != now_scale) else int(now_scale)), leg)
data_x = (data_xs[0] if (len(data_xs) == 1) else data_xs[yidx])
assert (len(data_x) >= len(data_y)), 'x column is shorter than y column! {} < {}'.format(len(data_x), len(data_y))
truncate_data_x = data_x[:len(data_y)]
p = plt.plot(truncate_data_x, data_y, label=leg, **plotargs)
c = p[0].get_color()
plt.fill_between(truncate_data_x, data_y, alpha=0.1, facecolor=c)
if (args.annotate_maximum or args.annotate_minimum):
annotate_min_max(truncate_data_x, data_y, ax)
if args.xlabel:
plt.xlabel(args.xlabel, fontsize='xx-large')
if args.ylabel:
plt.ylabel(args.ylabel, fontsize='xx-large')
if args.xlim:
plt.xlim(args.xlim[0], args.xlim[1])
if args.ylim:
plt.ylim(args.ylim[0], args.ylim[1])
plt.legend(loc='best', fontsize='xx-large')
(minx, maxx) = (min(data_x), max(data_x))
new_maxx = (maxx + ((maxx - minx) * 0.05))
plt.xlim(minx, new_maxx)
for label in chain.from_iterable([ax.get_xticklabels(), ax.get_yticklabels()]):
label.set_fontproperties(fontm.FontProperties(size=15))
ax.grid(color='gray', linestyle='dashed')
plt.title(args.title, fontdict={'fontsize': '20'})
if (args.output != ''):
plt.savefig(args.output, bbox_inches='tight')
if args.show:
plt.show()
|
def main():
get_args()
if (args.input == STDIN_FNAME):
fin = sys.stdin
else:
fin = open(args.input)
all_inputs = fin.readlines()
if (args.input != STDIN_FNAME):
fin.close()
nr_column = len(all_inputs[0].rstrip('\n').split(args.delimeter))
if (args.column is None):
column = (['y'] * nr_column)
else:
column = args.column.strip().split(',')
for k in column:
assert (k[0] in ['x', 'y'])
assert (nr_column == len(column)), "Column and data doesn't have same length. {}!={}".format(nr_column, len(column))
args.y_column = [v for v in column if (v[0] == 'y')]
args.y_column_idx = [idx for (idx, v) in enumerate(column) if (v[0] == 'y')]
args.x_column = [v for v in column if (v[0] == 'x')]
args.x_column_idx = [idx for (idx, v) in enumerate(column) if (v[0] == 'x')]
nr_x_column = len(args.x_column)
nr_y_column = len(args.y_column)
if (nr_x_column > 1):
assert (nr_x_column == nr_y_column), 'If multiple x columns are used, nr_x_column must equals to nr_y_column'
x_column_set = set(args.x_column)
data = [[] for _ in range(nr_column)]
ended = defaultdict(bool)
data_format = (- 1)
for (lineno, line) in enumerate(all_inputs):
line = line.rstrip('\n').split(args.delimeter)
assert (len(line) <= nr_column), 'One row have too many columns (separated by {})!\nLine: {}'.format(repr(args.delimeter), line)
for (idx, val) in enumerate(line):
if (val == ''):
ended[idx] = True
continue
else:
val = float(val)
assert (not ended[idx]), 'Column {} has hole!'.format(idx)
data[idx].append(val)
data_ys = [data[k] for k in args.y_column_idx]
length_ys = [len(t) for t in data_ys]
print('Length of each column:', length_ys)
max_ysize = max(length_ys)
if nr_x_column:
data_xs = [data[k] for k in args.x_column_idx]
else:
data_xs = [list(range(1, (max_ysize + 1)))]
for (idx, data_y) in enumerate(data_ys):
data_ys[idx] = np.asarray(data_y)
if (args.decay != 0):
data_ys[idx] = exponential_smooth(data_y, args.decay)
for (idx, data_x) in enumerate(data_xs):
data_xs[idx] = np.asarray(data_x)
if args.xkcd:
with plt.xkcd():
do_plot(data_xs, data_ys)
else:
do_plot(data_xs, data_ys)
|
def _global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = (p.__all__ if ('__all__' in dir(p)) else dir(p))
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
|
class PreventStuckPlayer(ProxyPlayer):
" Prevent the player from getting stuck (repeating a no-op)\n by inserting a different action. Useful in games such as Atari Breakout\n where the agent needs to press the 'start' button to start playing.\n "
def __init__(self, player, nr_repeat, action):
"\n :param nr_repeat: trigger the 'action' after this many of repeated action\n :param action: the action to be triggered to get out of stuck\n Does auto-reset, but doesn't auto-restart the underlying player.\n "
super(PreventStuckPlayer, self).__init__(player)
self.act_que = deque(maxlen=nr_repeat)
self.trigger_action = action
def action(self, act):
self.act_que.append(act)
if (self.act_que.count(self.act_que[0]) == self.act_que.maxlen):
act = self.trigger_action
(r, isOver) = self.player.action(act)
if isOver:
self.act_que.clear()
return (r, isOver)
def restart_episode(self):
super(PreventStuckPlayer, self).restart_episode()
self.act_que.clear()
|
class LimitLengthPlayer(ProxyPlayer):
' Limit the total number of actions in an episode.\n Will auto restart the underlying player on timeout\n '
def __init__(self, player, limit):
super(LimitLengthPlayer, self).__init__(player)
self.limit = limit
self.cnt = 0
def action(self, act):
(r, isOver) = self.player.action(act)
self.cnt += 1
if (self.cnt >= self.limit):
isOver = True
self.finish_episode()
self.restart_episode()
if isOver:
self.cnt = 0
return (r, isOver)
def restart_episode(self):
self.player.restart_episode()
self.cnt = 0
|
class AutoRestartPlayer(ProxyPlayer):
" Auto-restart the player on episode ends,\n in case some player wasn't designed to do so. "
def action(self, act):
(r, isOver) = self.player.action(act)
if isOver:
self.player.finish_episode()
self.player.restart_episode()
return (r, isOver)
|
class MapPlayerState(ProxyPlayer):
def __init__(self, player, func):
super(MapPlayerState, self).__init__(player)
self.func = func
def current_state(self):
return self.func(self.player.current_state())
|
@six.add_metaclass(ABCMeta)
class RLEnvironment(object):
def __init__(self):
self.reset_stat()
@abstractmethod
def current_state(self):
'\n Observe, return a state representation\n '
@abstractmethod
def action(self, act):
'\n Perform an action. Will automatically start a new episode if isOver==True\n :param act: the action\n :returns: (reward, isOver)\n '
def restart_episode(self):
" Start a new episode, even if the current hasn't ended "
raise NotImplementedError()
def finish_episode(self):
' get called when an episode finished'
pass
def get_action_space(self):
' return an `ActionSpace` instance'
raise NotImplementedError()
def reset_stat(self):
' reset all statistics counter'
self.stats = defaultdict(list)
def play_one_episode(self, func, stat='score'):
' play one episode for eval.\n :param func: call with the state and return an action\n :param stat: a key or list of keys in stats\n :returns: the stat(s) after running this episode\n '
if (not isinstance(stat, list)):
stat = [stat]
while True:
s = self.current_state()
act = func(s)
(r, isOver) = self.action(act)
if isOver:
s = [self.stats[k] for k in stat]
self.reset_stat()
return (s if (len(s) > 1) else s[0])
|
class ActionSpace(object):
def __init__(self):
self.rng = get_rng(self)
@abstractmethod
def sample(self):
pass
def num_actions(self):
raise NotImplementedError()
|
class DiscreteActionSpace(ActionSpace):
def __init__(self, num):
super(DiscreteActionSpace, self).__init__()
self.num = num
def sample(self):
return self.rng.randint(self.num)
def num_actions(self):
return self.num
def __repr__(self):
return 'DiscreteActionSpace({})'.format(self.num)
def __str__(self):
return 'DiscreteActionSpace({})'.format(self.num)
|
class NaiveRLEnvironment(RLEnvironment):
' for testing only'
def __init__(self):
self.k = 0
def current_state(self):
self.k += 1
return self.k
def action(self, act):
self.k = act
return (self.k, (self.k > 10))
|
class ProxyPlayer(RLEnvironment):
' Serve as a proxy another player '
def __init__(self, player):
self.player = player
def reset_stat(self):
self.player.reset_stat()
def current_state(self):
return self.player.current_state()
def action(self, act):
return self.player.action(act)
@property
def stats(self):
return self.player.stats
def restart_episode(self):
self.player.restart_episode()
def finish_episode(self):
self.player.finish_episode()
def get_action_space(self):
return self.player.get_action_space()
|
class GymEnv(RLEnvironment):
'\n An OpenAI/gym wrapper. Can optionally auto restart.\n Only support discrete action space now\n '
def __init__(self, name, dumpdir=None, viz=False, auto_restart=True):
with _ENV_LOCK:
self.gymenv = gym.make(name)
if dumpdir:
mkdir_p(dumpdir)
self.gymenv.monitor.start(dumpdir)
self.use_dir = dumpdir
self.reset_stat()
self.rwd_counter = StatCounter()
self.restart_episode()
self.auto_restart = auto_restart
self.viz = viz
def restart_episode(self):
self.rwd_counter.reset()
self._ob = self.gymenv.reset()
def finish_episode(self):
if (self.use_dir is not None):
self.gymenv.monitor.flush()
self.stats['score'].append(self.rwd_counter.sum)
def current_state(self):
if self.viz:
self.gymenv.render()
time.sleep(self.viz)
return self._ob
def action(self, act):
(self._ob, r, isOver, info) = self.gymenv.step(act)
self.rwd_counter.feed(r)
if isOver:
self.finish_episode()
if self.auto_restart:
self.restart_episode()
return (r, isOver)
def get_action_space(self):
spc = self.gymenv.action_space
assert isinstance(spc, gym.spaces.discrete.Discrete)
return DiscreteActionSpace(spc.n)
|
class HistoryFramePlayer(ProxyPlayer):
' Include history frames in state, or use black images\n Assume player will do auto-restart.\n '
def __init__(self, player, hist_len):
'\n :param hist_len: total length of the state, including the current\n and `hist_len-1` history\n '
super(HistoryFramePlayer, self).__init__(player)
self.history = deque(maxlen=hist_len)
s = self.player.current_state()
self.history.append(s)
def current_state(self):
assert (len(self.history) != 0)
diff_len = (self.history.maxlen - len(self.history))
if (diff_len == 0):
return np.concatenate(self.history, axis=2)
zeros = [np.zeros_like(self.history[0]) for k in range(diff_len)]
for k in self.history:
zeros.append(k)
assert (len(zeros) == self.history.maxlen)
return np.concatenate(zeros, axis=2)
def action(self, act):
(r, isOver) = self.player.action(act)
s = self.player.current_state()
self.history.append(s)
if isOver:
self.history.clear()
self.history.append(s)
return (r, isOver)
def restart_episode(self):
super(HistoryFramePlayer, self).restart_episode()
self.history.clear()
self.history.append(self.player.current_state())
|
class TransitionExperience(object):
' A transition of state, or experience'
def __init__(self, state, action, reward, **kwargs):
' kwargs: whatever other attribute you want to save'
self.state = state
self.action = action
self.reward = reward
for (k, v) in six.iteritems(kwargs):
setattr(self, k, v)
|
@six.add_metaclass(ABCMeta)
class SimulatorProcessBase(mp.Process):
def __init__(self, idx):
super(SimulatorProcessBase, self).__init__()
self.idx = int(idx)
self.name = u'simulator-{}'.format(self.idx)
self.identity = self.name.encode('utf-8')
@abstractmethod
def _build_player(self):
pass
|
class SimulatorProcessStateExchange(SimulatorProcessBase):
'\n A process that simulates a player and communicates to master to\n send states and receive the next action\n '
def __init__(self, idx, pipe_c2s, pipe_s2c):
'\n :param idx: idx of this process\n '
super(SimulatorProcessStateExchange, self).__init__(idx)
self.c2s = pipe_c2s
self.s2c = pipe_s2c
def run(self):
player = self._build_player()
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(2)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
s2c_socket.connect(self.s2c)
state = player.current_state()
(reward, isOver) = (0, False)
while True:
c2s_socket.send(dumps((self.identity, state, reward, isOver)), copy=False)
action = loads(s2c_socket.recv(copy=False).bytes)
(reward, isOver) = player.action(action)
state = player.current_state()
|
class SimulatorMaster(threading.Thread):
' A base thread to communicate with all StateExchangeSimulatorProcess.\n It should produce action for each simulator, as well as\n defining callbacks when a transition or an episode is finished.\n '
class ClientState(object):
def __init__(self):
self.memory = []
def __init__(self, pipe_c2s, pipe_s2c):
super(SimulatorMaster, self).__init__()
self.daemon = True
self.name = 'SimulatorMaster'
self.context = zmq.Context()
self.c2s_socket = self.context.socket(zmq.PULL)
self.c2s_socket.bind(pipe_c2s)
self.c2s_socket.set_hwm(10)
self.s2c_socket = self.context.socket(zmq.ROUTER)
self.s2c_socket.bind(pipe_s2c)
self.s2c_socket.set_hwm(10)
self.send_queue = queue.Queue(maxsize=100)
def f():
msg = self.send_queue.get()
self.s2c_socket.send_multipart(msg, copy=False)
self.send_thread = LoopThread(f)
self.send_thread.daemon = True
self.send_thread.start()
def clean_context(soks, context):
for s in soks:
s.close()
context.term()
import atexit
atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context)
def run(self):
self.clients = defaultdict(self.ClientState)
while True:
msg = loads(self.c2s_socket.recv(copy=False).bytes)
(ident, state, reward, isOver) = msg
client = self.clients[ident]
if (len(client.memory) > 0):
client.memory[(- 1)].reward = reward
if isOver:
self._on_episode_over(ident)
else:
self._on_datapoint(ident)
self._on_state(state, ident)
@abstractmethod
def _on_state(self, state, ident):
'response to state sent by ident. Preferrably an async call'
@abstractmethod
def _on_episode_over(self, client):
" callback when the client just finished an episode.\n You may want to clear the client's memory in this callback.\n "
def _on_datapoint(self, client):
' callback when the client just finished a transition\n '
def __del__(self):
self.context.destroy(linger=0)
|
class SimulatorProcessDF(SimulatorProcessBase):
' A simulator which contains a forward model itself, allowing\n it to produce data points directly '
def __init__(self, idx, pipe_c2s):
super(SimulatorProcessDF, self).__init__(idx)
self.pipe_c2s = pipe_c2s
def run(self):
self.player = self._build_player()
self.ctx = zmq.Context()
self.c2s_socket = self.ctx.socket(zmq.PUSH)
self.c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
self.c2s_socket.set_hwm(5)
self.c2s_socket.connect(self.pipe_c2s)
self._prepare()
for dp in self.get_data():
self.c2s_socket.send(dumps(dp), copy=False)
@abstractmethod
def _prepare(self):
pass
@abstractmethod
def get_data(self):
pass
|
class SimulatorProcessSharedWeight(SimulatorProcessDF):
' A simulator process with an extra thread waiting for event,\n and take shared weight from shm.\n\n Start me under some CUDA_VISIBLE_DEVICES set!\n '
def __init__(self, idx, pipe_c2s, condvar, shared_dic, pred_config):
super(SimulatorProcessSharedWeight, self).__init__(idx, pipe_c2s)
self.condvar = condvar
self.shared_dic = shared_dic
self.pred_config = pred_config
def _prepare(self):
disable_layer_logging()
self.predictor = OfflinePredictor(self.pred_config)
with self.predictor.graph.as_default():
vars_to_update = self._params_to_update()
self.sess_updater = SessionUpdate(self.predictor.session, vars_to_update)
self.predictor.graph.finalize()
self.weight_lock = threading.Lock()
def func():
self.condvar.acquire()
while True:
self.condvar.wait()
self._trigger_evt()
self.evt_th = threading.Thread(target=func)
self.evt_th.daemon = True
self.evt_th.start()
def _trigger_evt(self):
with self.weight_lock:
self.sess_updater.update(self.shared_dic['params'])
logger.info('Updated.')
def _params_to_update(self):
return tf.trainable_variables()
|
class WeightSync(Callback):
' Sync weight from main process to shared_dic and notify'
def __init__(self, condvar, shared_dic):
self.condvar = condvar
self.shared_dic = shared_dic
def _setup_graph(self):
self.vars = self._params_to_update()
def _params_to_update(self):
return tf.trainable_variables()
def _before_train(self):
self._sync()
def _trigger_epoch(self):
self._sync()
def _sync(self):
logger.info('Updating weights ...')
dic = {v.name: v.eval() for v in self.vars}
self.shared_dic['params'] = dic
self.condvar.acquire()
self.condvar.notify_all()
self.condvar.release()
|
def _global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = (p.__all__ if ('__all__' in dir(p)) else dir(p))
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
|
@six.add_metaclass(ABCMeta)
class Callback(object):
' Base class for all callbacks '
def before_train(self):
'\n Called right before the first iteration.\n '
self._before_train()
def _before_train(self):
pass
def setup_graph(self, trainer):
'\n Called before finalizing the graph.\n Use this callback to setup some ops used in the callback.\n\n :param trainer: :class:`train.Trainer` instance\n '
self.trainer = trainer
self.graph = tf.get_default_graph()
self.epoch_num = (self.trainer.config.starting_epoch - 1)
with tf.name_scope(type(self).__name__):
self._setup_graph()
def _setup_graph(self):
pass
def after_train(self):
'\n Called after training.\n '
self._after_train()
def _after_train(self):
pass
def trigger_step(self):
'\n Callback to be triggered after every step (every backpropagation)\n\n Could be useful to apply some tricks on parameters (clipping, low-rank, etc)\n '
def trigger_epoch(self):
'\n Triggered after every epoch.\n\n In this function, self.epoch_num would be the number of epoch finished.\n '
self.epoch_num += 1
self._trigger_epoch()
def _trigger_epoch(self):
pass
def __str__(self):
return type(self).__name__
|
class ProxyCallback(Callback):
def __init__(self, cb):
self.cb = cb
def _before_train(self):
self.cb.before_train()
def _setup_graph(self):
self.cb.setup_graph(self.trainer)
def _after_train(self):
self.cb.after_train()
def _trigger_epoch(self):
self.cb.trigger_epoch()
def __str__(self):
return ('Proxy-' + str(self.cb))
|
class PeriodicCallback(ProxyCallback):
"\n A callback to be triggered after every `period` epochs.\n Doesn't work for trigger_step\n "
def __init__(self, cb, period):
'\n :param cb: a `Callback`\n :param period: int\n '
super(PeriodicCallback, self).__init__(cb)
self.period = int(period)
def _trigger_epoch(self):
if ((self.epoch_num % self.period) == 0):
self.cb.epoch_num = (self.epoch_num - 1)
self.cb.trigger_epoch()
def __str__(self):
return ('Periodic-' + str(self.cb))
|
class StartProcOrThread(Callback):
def __init__(self, procs_threads):
'\n Start extra threads and processes before training\n :param procs_threads: list of processes or threads\n '
if (not isinstance(procs_threads, list)):
procs_threads = [procs_threads]
self._procs_threads = procs_threads
def _before_train(self):
logger.info(('Starting ' + ', '.join([k.name for k in self._procs_threads])))
start_proc_mask_signal(self._procs_threads)
|
class OutputTensorDispatcer(object):
def __init__(self):
self._names = []
self._idxs = []
def add_entry(self, names):
v = []
for n in names:
tensorname = get_op_tensor_name(n)[1]
if (tensorname in self._names):
v.append(self._names.index(tensorname))
else:
self._names.append(tensorname)
v.append((len(self._names) - 1))
self._idxs.append(v)
def get_all_names(self):
return self._names
def get_idx_for_each_entry(self):
return self._idxs
|
class DumpParamAsImage(Callback):
'\n Dump a variable to image(s) after every epoch to logger.LOG_DIR.\n '
def __init__(self, var_name, prefix=None, map_func=None, scale=255, clip=False):
'\n :param var_name: the name of the variable.\n :param prefix: the filename prefix for saved images. Default is the op name.\n :param map_func: map the value of the variable to an image or list of\n images of shape [h, w] or [h, w, c]. If None, will use identity\n :param scale: a multiplier on pixel values, applied after map_func. default to 255\n :param clip: whether to clip the result to [0, 255]\n '
(op_name, self.var_name) = get_op_var_name(var_name)
self.func = map_func
if (prefix is None):
self.prefix = op_name
else:
self.prefix = prefix
self.log_dir = logger.LOG_DIR
self.scale = scale
self.clip = clip
def _before_train(self):
self.var = self.graph.get_tensor_by_name(self.var_name)
def _trigger_epoch(self):
val = self.trainer.sess.run(self.var)
if (self.func is not None):
val = self.func(val)
if isinstance(val, list):
for (idx, im) in enumerate(val):
self._dump_image(im, idx)
else:
self._dump_image(val)
def _dump_image(self, im, idx=None):
assert (im.ndim in [2, 3]), str(im.ndim)
fname = os.path.join(self.log_dir, (self.prefix + '-ep{:03d}{}.png'.format(self.epoch_num, (('-' + str(idx)) if idx else ''))))
res = (im * self.scale)
if self.clip:
res = np.clip(res, 0, 255)
cv2.imwrite(fname, res.astype('uint8'))
|
class RunOp(Callback):
' Run an op periodically'
def __init__(self, setup_func, run_before=True, run_epoch=True):
'\n :param setup_func: a function that returns the op in the graph\n :param run_before: run the op before training\n :param run_epoch: run the op on every epoch trigger\n '
self.setup_func = setup_func
self.run_before = run_before
self.run_epoch = run_epoch
def _setup_graph(self):
self._op = self.setup_func()
def _before_train(self):
if self.run_before:
self._op.run()
def _trigger_epoch(self):
if self.run_epoch:
self._op.run()
|
class CallbackTimeLogger(object):
def __init__(self):
self.times = []
self.tot = 0
def add(self, name, time):
self.tot += time
self.times.append((name, time))
@contextmanager
def timed_callback(self, name):
s = time.time()
(yield)
self.add(name, (time.time() - s))
def log(self):
' log the time of some heavy callbacks '
if (self.tot < 3):
return
msgs = []
for (name, t) in self.times:
if (((t / self.tot) > 0.3) and (t > 1)):
msgs.append('{}: {:.3f}sec'.format(name, t))
logger.info('Callbacks took {:.3f} sec in total. {}'.format(self.tot, '; '.join(msgs)))
|
class Callbacks(Callback):
'\n A container to hold all callbacks, and execute them in the right order and proper session.\n '
def __init__(self, cbs):
'\n :param cbs: a list of `Callbacks`\n '
for cb in cbs:
assert isinstance(cb, Callback), cb.__class__
for cb in cbs:
if isinstance(cb, StatPrinter):
sp = cb
cbs.remove(sp)
cbs.append(sp)
break
else:
raise ValueError('Callbacks must contain StatPrinter for stat and writer to work properly!')
self.cbs = cbs
def _setup_graph(self):
with tf.name_scope(None):
for cb in self.cbs:
cb.setup_graph(self.trainer)
def _before_train(self):
for cb in self.cbs:
cb.before_train()
def _after_train(self):
for cb in self.cbs:
cb.after_train()
def trigger_step(self):
for cb in self.cbs:
cb.trigger_step()
def _trigger_epoch(self):
tm = CallbackTimeLogger()
test_sess_restored = False
for cb in self.cbs:
display_name = str(cb)
with tm.timed_callback(display_name):
cb.trigger_epoch()
tm.log()
def append(self, cb):
assert isinstance(cb, Callback)
self.cbs.append(cb)
|
@six.add_metaclass(ABCMeta)
class Inferencer(object):
def before_inference(self):
'\n Called before a new round of inference starts.\n '
self._before_inference()
def _before_inference(self):
pass
def datapoint(self, output):
'\n Called after complete running every data point\n '
self._datapoint(output)
@abstractmethod
def _datapoint(self, output):
pass
def after_inference(self):
'\n Called after a round of inference ends.\n Returns a dict of statistics which will be logged by the InferenceRunner.\n The inferencer needs to handle other kind of logging by their own.\n '
return self._after_inference()
def _after_inference(self):
pass
def get_output_tensors(self):
'\n Return a list of tensor names needed for this inference\n '
return self._get_output_tensors()
@abstractmethod
def _get_output_tensors(self):
pass
|
class ScalarStats(Inferencer):
'\n Write some scalar tensor to both stat and summary.\n The output of the given Ops must be a scalar.\n The value will be averaged over all data points in the inference dataflow.\n '
def __init__(self, names_to_print, prefix='validation'):
'\n :param names_to_print: list of names of tensors, or just a name\n :param prefix: an optional prefix for logging\n '
if (not isinstance(names_to_print, list)):
self.names = [names_to_print]
else:
self.names = names_to_print
self.prefix = prefix
def _get_output_tensors(self):
return self.names
def _before_inference(self):
self.stats = []
def _datapoint(self, output):
self.stats.append(output)
def _after_inference(self):
self.stats = np.mean(self.stats, axis=0)
assert (len(self.stats) == len(self.names))
ret = {}
for (stat, name) in zip(self.stats, self.names):
(opname, _) = get_op_var_name(name)
name = ('{}_{}'.format(self.prefix, opname) if self.prefix else opname)
ret[name] = stat
return ret
|
class ClassificationError(Inferencer):
'\n Compute classification error in batch mode, from a `wrong` variable\n\n The `wrong` tensor is supposed to be an 0/1 integer vector containing\n whether each sample in the batch is incorrectly classified.\n You can use `tf.nn.in_top_k` to produce this vector record top-k error as well.\n\n This callback produce the "true" error,\n taking account of the fact that batches might not have the same size in\n testing (because the size of test set might not be a multiple of batch size).\n Therefore the result is different from averaging the error rate of each batch.\n '
def __init__(self, wrong_var_name='incorrect_vector', summary_name='val_error'):
'\n :param wrong_var_name: name of the `wrong` variable\n :param summary_name: the name for logging\n '
self.wrong_var_name = wrong_var_name
self.summary_name = summary_name
def _get_output_tensors(self):
return [self.wrong_var_name]
def _before_inference(self):
self.err_stat = RatioCounter()
def _datapoint(self, outputs):
vec = outputs[0]
if (vec.ndim == 0):
logger.error("[DEPRECATED] use a 'wrong vector' for ClassificationError instead of nr_wrong")
sys.exit(1)
else:
assert (vec.ndim == 1), '{} is not a vector!'.format(self.wrong_var_name)
batch_size = len(vec)
wrong = np.sum(vec)
self.err_stat.feed(wrong, batch_size)
def _after_inference(self):
return {self.summary_name: self.err_stat.ratio}
|
class BinaryClassificationStats(Inferencer):
' Compute precision/recall in binary classification, given the\n prediction vector and the label vector.\n '
def __init__(self, pred_var_name, label_var_name, summary_prefix='val'):
'\n :param pred_var_name: name of the 0/1 prediction tensor.\n :param label_var_name: name of the 0/1 label tensor.\n '
self.pred_var_name = pred_var_name
self.label_var_name = label_var_name
self.prefix = summary_prefix
def _get_output_tensors(self):
return [self.pred_var_name, self.label_var_name]
def _before_inference(self):
self.stat = BinaryStatistics()
def _datapoint(self, outputs):
(pred, label) = outputs
self.stat.feed(pred, label)
def _after_inference(self):
return {(self.prefix + '_precision'): self.stat.precision, (self.prefix + '_recall'): self.stat.recall}
|
def summary_inferencer(trainer, infs):
for inf in infs:
ret = inf.after_inference()
for (k, v) in six.iteritems(ret):
try:
v = float(v)
except:
logger.warn('{} returns a non-scalar statistics!'.format(type(inf).__name__))
continue
trainer.write_scalar_summary(k, v)
|
class InferenceRunner(Callback):
'\n A callback that runs different kinds of inferencer.\n '
IOTensor = namedtuple('IOTensor', ['index', 'isOutput'])
def __init__(self, ds, infs, inf_epochs, input_tensors=None):
'\n :param ds: inference dataset. a `DataFlow` instance.\n :param infs: a list of `Inferencer` instance.\n :param input_tensor_names: list of tensors to feed the dataflow to.\n default to all the input placeholders.\n '
assert isinstance(ds, DataFlow), ds
self.ds = ds
if (not isinstance(infs, list)):
self.infs = [infs]
else:
self.infs = infs
for v in self.infs:
assert isinstance(v, Inferencer), v
self.input_tensors = input_tensors
self.inf_epochs = inf_epochs
def _setup_graph(self):
self._find_input_tensors()
self._find_output_tensors()
self.pred_func = self.trainer.get_predict_func(self.input_tensors, self.output_tensors)
def _find_input_tensors(self):
if (self.input_tensors is None):
input_vars = self.trainer.model.get_reuse_placehdrs()
def get_name(x):
if isinstance(x, tf.SparseTensor):
return x.op.name.split('/')[0]
return x.name
self.input_tensors = [get_name(x) for x in input_vars]
def _find_output_tensors(self):
dispatcer = OutputTensorDispatcer()
for inf in self.infs:
dispatcer.add_entry(inf.get_output_tensors())
all_names = dispatcer.get_all_names()
IOTensor = InferenceRunner.IOTensor
self.output_tensors = list(filter((lambda x: (x not in self.input_tensors)), all_names))
def find_oid(idxs):
ret = []
for idx in idxs:
name = all_names[idx]
if (name in self.input_tensors):
ret.append(IOTensor(self.input_tensors.index(name), False))
else:
ret.append(IOTensor(self.output_tensors.index(name), True))
return ret
self.inf_to_tensors = [find_oid(t) for t in dispatcer.get_idx_for_each_entry()]
def _trigger_epoch(self):
if np.any((self.inf_epochs[:] == self.epoch_num)):
for inf in self.infs:
inf.before_inference()
sess = tf.get_default_session()
self.ds.reset_state()
with get_tqdm(total=self.ds.size()) as pbar:
for dp in self.ds.get_data():
outputs = self.pred_func(dp)
for (inf, tensormap) in zip(self.infs, self.inf_to_tensors):
inf_output = [(outputs if k.isOutput else dp)[k.index] for k in tensormap]
inf.datapoint(inf_output)
pbar.update()
self._write_summary_after_inference()
def _write_summary_after_inference(self):
summary_inferencer(self.trainer, self.infs)
|
class FeedfreeInferenceRunner(Callback):
IOTensor = namedtuple('IOTensor', ['index', 'isOutput'])
def __init__(self, input, infs, input_tensors=None):
assert isinstance(input, FeedfreeInput), input
self._input_data = input
if (not isinstance(infs, list)):
self.infs = [infs]
else:
self.infs = infs
for v in self.infs:
assert isinstance(v, Inferencer), v
self.input_tensor_names = input_tensors
def _setup_graph(self):
self._find_input_tensors()
self._find_output_tensors()
def _find_input_tensors(self):
self._input_data._setup(self.trainer)
self._input_tensors = self._input_data.get_input_tensors()
model_placehdrs = self.trainer.model.get_reuse_placehdrs()
assert (len(self._input_tensors) == len(model_placehdrs)), "FeedfreeInput doesn't produce correct number of output tensors"
if (self.input_tensor_names is not None):
assert isinstance(self.input_tensor_names, list)
self._input_tensors = [k for (idx, k) in enumerate(self._input_tensors) if (model_placehdrs[idx].name in self.input_tensor_names)]
assert (len(self._input_tensors) == len(self.input_tensor_names)), 'names of input tensors are not defined in the Model'
def _find_output_tensors(self):
dispatcer = OutputTensorDispatcer()
for inf in self.infs:
dispatcer.add_entry(inf.get_output_tensors())
all_names = dispatcer.get_all_names()
IOTensor = InferenceRunner.IOTensor
self.output_tensors = all_names
def find_oid(idxs):
ret = []
for idx in idxs:
name = all_names[idx]
ret.append(IOTensor(self.output_tensors.index(name), True))
return ret
self.inf_to_tensors = [find_oid(t) for t in dispatcer.get_idx_for_each_entry()]
def _trigger_epoch(self):
for inf in self.infs:
inf.before_inference()
sess = tf.get_default_session()
sz = self._input_data.size()
with get_tqdm(total=sz) as pbar:
for _ in range(sz):
pbar.update()
self._write_summary_after_inference()
def _write_summary_after_inference(self):
summary_inferencer(self.trainer, self.infs)
|
@six.add_metaclass(ABCMeta)
class HyperParam(object):
' Base class for a hyper param'
def setup_graph(self):
' setup the graph in `setup_graph` callback stage, if necessary'
pass
@abstractmethod
def set_value(self, v):
' define how the value of the param will be set'
pass
@property
def readable_name(self):
' A name to display'
return self._readable_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.