repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/examples/chainermn/seq2seq/seq2seq_mp1.py
|
# encoding: utf-8
import argparse
import math
import os.path
import pickle
import re
import sys
import time
from nltk.translate import bleu_score
import numpy
import six
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
import chainermn
import chainermn.functions
import chainermn.links
import europal
def cached_call(fname, func, *args):
if os.path.exists(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
else:
# not yet cached
val = func(*args)
with open(fname, 'wb') as f:
pickle.dump(val, f)
return val
def read_source(in_dir, cache=None):
en_path = os.path.join(in_dir, 'giga-fren.release2.fixed.en')
source_vocab = ['<eos>', '<unk>'] + europal.count_words(en_path)
source_data = europal.make_dataset(en_path, source_vocab)
return source_vocab, source_data
def read_target(in_dir, cahce=None):
fr_path = os.path.join(in_dir, 'giga-fren.release2.fixed.fr')
target_vocab = ['<eos>', '<unk>'] + europal.count_words(fr_path)
target_data = europal.make_dataset(fr_path, target_vocab)
return target_vocab, target_data
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0, force_tuple=True)
return exs
class Encoder(chainer.Chain):
def __init__(
self, comm, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Encoder, self).__init__(
embed_x=L.EmbedID(n_source_vocab, n_units),
# Corresponding decoder LSTM will be invoked on process 1.
mn_encoder=chainermn.links.create_multi_node_n_step_rnn(
L.NStepLSTM(n_layers, n_units, n_units, 0.1),
comm, rank_in=None, rank_out=1
),
)
self.comm = comm
self.n_layers = n_layers
self.n_units = n_units
def __call__(self, *inputs):
xs = inputs[:len(inputs) // 2]
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
# Encode input sequence and send hidden states to decoder.
_, _, _, delegate_variable = self.mn_encoder(exs)
# Last element represents delegate variable.
return delegate_variable
def translate(self, xs, max_length=100):
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
# Encode input sequence and send hidden stats to decoder.
self.mn_encoder(exs)
# Encoder does not return anything.
# All evaluation will be done in decoder process.
return None
class Decoder(chainer.Chain):
def __init__(
self, comm, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Decoder, self).__init__(
embed_y=L.EmbedID(n_target_vocab, n_units),
# Corresponding encoder LSTM will be invoked on process 0.
mn_decoder=chainermn.links.create_multi_node_n_step_rnn(
L.NStepLSTM(n_layers, n_units, n_units, 0.1),
comm, rank_in=0, rank_out=None),
W=L.Linear(n_units, n_target_vocab),
)
self.comm = comm
self.n_layers = n_layers
self.n_units = n_units
def __call__(self, *inputs):
xs = inputs[:len(inputs) // 2]
ys = inputs[len(inputs) // 2:]
xs = [x[::-1] for x in xs]
batch = len(xs)
eos = self.xp.zeros(1, self.xp.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
eys = sequence_embed(self.embed_y, ys_in)
# Receive hidden states from encoder process and decode.
_, _, os, _ = self.mn_decoder(eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
reporter.report({'loss': loss.data}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.data * batch / n_words)
reporter.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
result = []
ys = self.xp.zeros(batch, self.xp.int32)
eys = self.embed_y(ys)
eys = chainer.functions.split_axis(
eys, batch, 0, force_tuple=True)
# Receive hidden stats from encoder process.
h, c, ys, _ = self.mn_decoder(eys)
cys = chainer.functions.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(self.xp.int32)
result.append(ys)
# Recursively decode using the previously predicted token.
for i in range(1, max_length):
eys = self.embed_y(ys)
eys = chainer.functions.split_axis(
eys, batch, 0, force_tuple=True)
# Non-MN RNN link can be accessed via `actual_rnn`.
h, c, ys = self.mn_decoder.actual_rnn(h, c, eys)
cys = chainer.functions.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(self.xp.int32)
result.append(ys)
result = cuda.to_cpu(self.xp.stack(result).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == 0)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = numpy.cumsum(
[len(x) for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
return tuple(
to_device_batch([x for x, _ in batch]) +
to_device_batch([y for _, y in batch]))
class BleuEvaluator(extensions.Evaluator):
def __init__(self, model, test_data, device=-1, batch=100,
max_length=100, comm=None):
super(BleuEvaluator, self).__init__({'main': None}, model)
self.model = model
self.test_data = test_data
self.batch = batch
self.device = device
self.max_length = max_length
self.comm = comm
def evaluate(self):
bt = time.time()
with chainer.no_backprop_mode():
references = []
hypotheses = []
observation = {}
with reporter.report_scope(observation):
for i in range(0, len(self.test_data), self.batch):
src, trg = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in trg])
src = [chainer.dataset.to_device(self.device, x)
for x in src]
if self.comm.rank == 0:
self.model.translate(src, self.max_length)
elif self.comm.rank == 1:
ys = [y.tolist()
for y in self.model.translate(
src, self.max_length)]
hypotheses.extend(ys)
if self.comm.rank == 1:
bleu = bleu_score.corpus_bleu(
references, hypotheses, smoothing_function=bleu_score.
SmoothingFunction().method1)
reporter.report({'bleu': bleu}, self.model)
et = time.time()
if self.comm.rank == 1:
print('BleuEvaluator(single)::evaluate(): '
'took {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
return observation
def create_optimizer(opt_arg):
"""Parse a string and get an optimizer.
The syntax is:
opt(params...)
where
opt := sgd | adam
param := [float | key=val]...
"""
m = re.match(r'(adam|sgd)\(([^)]*)\)', opt_arg, re.I)
name = m.group(1).lower()
args = m.group(2)
names_dict = {
'adadelta': chainer.optimizers.AdaDelta,
'adagrad': chainer.optimizers.AdaGrad,
'adam': chainer.optimizers.Adam,
'momentumsgd': chainer.optimizers.MomentumSGD,
'nesterovag': chainer.optimizers.NesterovAG,
'rmsprop': chainer.optimizers.RMSprop,
'rmspropgraves': chainer.optimizers.RMSpropGraves,
'sgd': chainer.optimizers.SGD,
'smorms3': chainer.optimizers.SMORMS3,
}
try:
opt = names_dict[name]
except KeyError:
raise RuntimeError('Unknown optimizer: \'{}\' in \'{}\''.format(
name, opt_arg))
# positional arguments
pos = []
# keyword arguments
kw = {}
args = args.strip()
if args:
for a in re.split(r',\s*', args):
if a.find('=') >= 0:
key, val = a.split('=')
kw[key] = float(val)
else:
pos.append(float(a))
return opt(*pos, **kw)
def _get_num_split(excp):
"""Get the preferrable number of split from a DataSizeError error"""
ps = excp.pickled_size
mx = excp.max_size
return (ps + mx - 1) // mx
def _slices(excp):
"""Get a list of slices that are expected to fit in a single send/recv."""
ds = excp.dataset_size
nsplit = _get_num_split(excp)
size = math.ceil(ds / nsplit)
return [(b, min(e, ds)) for b, e in
((i * size, (i + 1) * size) for i in range(0, nsplit))]
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--bleu', action='store_true', default=False,
help='Report BLEU score')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--cache', '-c', default=None,
help='Directory to cache pre-processed dataset')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='Number of units')
parser.add_argument('--communicator', default='pure_nccl',
help='Type of communicator')
parser.add_argument('--stop', '-s', type=str, default='15e',
help='Stop trigger (ex. "500i", "15e")')
parser.add_argument('--input', '-i', type=str, default='wmt',
help='Input directory')
parser.add_argument('--optimizer', type=str, default='adam()',
help='Optimizer and its argument')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
args = parser.parse_args()
# Prepare ChainerMN communicator
if args.gpu:
comm = chainermn.create_communicator(args.communicator)
dev = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
dev = -1
if comm.size != 2:
raise ValueError(
'This example can only be executed on exactly 2 processes.')
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('==========================================')
# Both processes prepare datasets.
if comm.rank == 0 or comm.rank == 1:
if args.cache and not os.path.exists(args.cache):
os.mkdir(args.cache)
# Read source data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'source.pickle')
source_vocab, source_data = cached_call(cache_file,
read_source,
args.input, args.cache)
else:
source_vocab, source_data = read_source(args.input, args.cache)
et = time.time()
print('RD source done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
# Read target data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'target.pickle')
target_vocab, target_data = cached_call(cache_file,
read_target,
args.input, args.cache)
else:
target_vocab, target_data = read_target(args.input, args.cache)
et = time.time()
print('RD target done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
print('Original training data size: %d' % len(source_data))
train_data = [(s, t)
for s, t in six.moves.zip(source_data, target_data)
if 0 < len(s) < 50 and 0 < len(t) < 50]
print('Filtered training data size: %d' % len(train_data))
en_path = os.path.join(args.input, 'dev', 'newstest2013.en')
source_data = europal.make_dataset(en_path, source_vocab)
fr_path = os.path.join(args.input, 'dev', 'newstest2013.fr')
target_data = europal.make_dataset(fr_path, target_vocab)
assert(len(source_data) == len(target_data))
test_data = [(s, t) for s, t
in six.moves.zip(source_data, target_data)
if 0 < len(s) and 0 < len(t)]
source_ids = {word: index
for index, word in enumerate(source_vocab)}
target_ids = {word: index
for index, word in enumerate(target_vocab)}
else:
train_data, test_data = None, None
target_ids, source_ids = None, None
# Print GPU id
for i in range(0, comm.size):
if comm.rank == i:
print('Rank {} GPU: {}'.format(comm.rank, dev))
sys.stdout.flush()
comm.mpi_comm.Barrier()
# broadcast id -> word dictionary
source_ids = comm.bcast_obj(source_ids, root=0)
target_ids = comm.bcast_obj(target_ids, root=0)
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
if comm.rank == 0:
print('target_words : {}'.format(len(target_words)))
print('source_words : {}'.format(len(source_words)))
n_lstm_layers = 3
if comm.rank == 0:
model = Encoder(
comm, n_lstm_layers, len(source_ids), len(target_ids), args.unit)
elif comm.rank == 1:
model = Decoder(
comm, n_lstm_layers, len(source_ids), len(target_ids), args.unit)
if dev >= 0:
chainer.cuda.get_device_from_id(dev).use()
model.to_gpu(dev)
# determine the stop trigger
m = re.match(r'^(\d+)e$', args.stop)
if m:
trigger = (int(m.group(1)), 'epoch')
else:
m = re.match(r'^(\d+)i$', args.stop)
if m:
trigger = (int(m.group(1)), 'iteration')
else:
if comm.rank == 0:
sys.stderr.write('Error: unknown stop trigger: {}'.format(
args.stop))
exit(-1)
if comm.rank == 0:
print('Trigger: {}'.format(trigger))
optimizer = create_optimizer(args.optimizer)
optimizer.setup(model)
train_iter = chainer.iterators.SerialIterator(train_data,
args.batchsize,
shuffle=False)
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert, device=dev)
trainer = training.Trainer(updater,
trigger,
out=args.out)
# Do not use multi node evaluator.
# (because evaluation is done only on decoder process)
trainer.extend(BleuEvaluator(model, test_data, device=dev, comm=comm))
def translate_one(source, target):
words = europal.split_sentence(source)
print('# source : ' + ' '.join(words))
x = model.xp.array(
[source_ids.get(w, 1) for w in words], model.xp.int32)
ys = model.translate([x])[0]
words = [target_words[y] for y in ys]
print('# result : ' + ' '.join(words))
print('# expect : ' + target)
def translate(trainer):
translate_one(
'Who are we ?',
'Qui sommes-nous?')
translate_one(
'And it often costs over a hundred dollars ' +
'to obtain the required identity card .',
'Or, il en coûte souvent plus de cent dollars ' +
'pour obtenir la carte d\'identité requise.')
source, target = test_data[numpy.random.choice(len(test_data))]
source = ' '.join([source_words.get(i, '') for i in source])
target = ' '.join([target_words.get(i, '') for i in target])
translate_one(source, target)
if comm.rank == 1:
trigger = (1, 'epoch')
trainer.extend(extensions.LogReport(trigger=trigger),
trigger=trigger)
report = extensions.PrintReport(['epoch',
'iteration',
'main/loss',
'main/perp',
'validation/main/bleu',
'elapsed_time'])
trainer.extend(report, trigger=trigger)
trainer.extend(extensions.ProgressBar(update_interval=1))
comm.mpi_comm.Barrier()
if comm.rank == 0:
print('start training')
sys.stdout.flush()
trainer.run()
if __name__ == '__main__':
main()
| 19,042
| 34.005515
| 78
|
py
|
chainer
|
chainer-master/examples/chainermn/seq2seq/seq2seq.py
|
# encoding: utf-8
import argparse
import math
import os.path
import pickle
import re
import sys
import time
from nltk.translate import bleu_score
import numpy
import six
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
import chainermn
import europal
def cached_call(fname, func, *args):
if os.path.exists(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
else:
# not yet cached
val = func(*args)
with open(fname, 'wb') as f:
pickle.dump(val, f)
return val
def read_source(in_dir, cache=None):
en_path = os.path.join(in_dir, 'giga-fren.release2.fixed.en')
source_vocab = ['<eos>', '<unk>'] + europal.count_words(en_path)
source_data = europal.make_dataset(en_path, source_vocab)
return source_vocab, source_data
def read_target(in_dir, cahce=None):
fr_path = os.path.join(in_dir, 'giga-fren.release2.fixed.fr')
target_vocab = ['<eos>', '<unk>'] + europal.count_words(fr_path)
target_data = europal.make_dataset(fr_path, target_vocab)
return target_vocab, target_data
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0, force_tuple=True)
return exs
class Seq2seq(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Seq2seq, self).__init__(
embed_x=L.EmbedID(n_source_vocab, n_units),
embed_y=L.EmbedID(n_target_vocab, n_units),
encoder=L.NStepLSTM(n_layers, n_units, n_units, 0.1),
decoder=L.NStepLSTM(n_layers, n_units, n_units, 0.1),
W=L.Linear(n_units, n_target_vocab),
)
self.n_layers = n_layers
self.n_units = n_units
def __call__(self, *inputs):
xs = inputs[:len(inputs) // 2]
ys = inputs[len(inputs) // 2:]
xs = [x[::-1] for x in xs]
eos = self.xp.zeros(1, self.xp.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
# Both xs and ys_in are lists of arrays.
exs = sequence_embed(self.embed_x, xs)
eys = sequence_embed(self.embed_y, ys_in)
batch = len(xs)
# None represents a zero vector in an encoder.
hx, cx, _ = self.encoder(None, None, exs)
_, _, os = self.decoder(hx, cx, eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
reporter.report({'loss': loss.data}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.data * batch / n_words)
reporter.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
# Initial hidden variable and cell variable
# zero = self.xp.zeros((self.n_layers, batch, self.n_units), self.xp.float32) # NOQA
# h, c, _ = self.encoder(zero, zero, exs, train=False) # NOQA
h, c, _ = self.encoder(None, None, exs)
ys = self.xp.zeros(batch, self.xp.int32)
result = []
for i in range(max_length):
eys = self.embed_y(ys)
eys = chainer.functions.split_axis(
eys, batch, 0, force_tuple=True)
h, c, ys = self.decoder(h, c, eys)
cys = chainer.functions.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(self.xp.int32)
result.append(ys)
result = cuda.to_cpu(self.xp.stack(result).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == 0)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = numpy.cumsum(
[len(x) for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
return tuple(
to_device_batch([x for x, _ in batch]) +
to_device_batch([y for _, y in batch]))
class CalculateBleu(chainer.training.Extension):
# priority = chainer.training.PRIORITY_WRITER
def __init__(
self, model, test_data, key, batch=100, device=-1, max_length=100):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = device
self.max_length = max_length
def __call__(self, trainer):
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [
chainer.dataset.to_device(self.device, x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
reporter.report({self.key: bleu})
class BleuEvaluator(extensions.Evaluator):
def __init__(self, model, test_data, device=-1, batch=100,
max_length=100, comm=None):
super(BleuEvaluator, self).__init__({'main': None}, model)
self.model = model
self.test_data = test_data
self.batch = batch
self.device = device
self.max_length = max_length
self.comm = comm
def evaluate(self):
bt = time.time()
with chainer.no_backprop_mode():
references = []
hypotheses = []
observation = {}
with reporter.report_scope(observation):
for i in range(0, len(self.test_data), self.batch):
src, trg = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in trg])
src = [chainer.dataset.to_device(self.device, x)
for x in src]
ys = [y.tolist()
for y in self.model.translate(src, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
reporter.report({'bleu': bleu}, self.model)
et = time.time()
if self.comm is not None:
# This evaluator is called via chainermn.MultiNodeEvaluator
for i in range(0, self.comm.size):
print('BleuEvaluator::evaluate(): '
'took {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
self.comm.mpi_comm.Barrier()
else:
# This evaluator is called from a conventional
# Chainer exntension
print('BleuEvaluator(single)::evaluate(): '
'took {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
return observation
def create_optimizer(opt_arg):
"""Parse a string and get an optimizer.
The syntax is:
opt(params...)
where
opt := sgd | adam
param := [float | key=val]...
"""
m = re.match(r'(adam|sgd)\(([^)]*)\)', opt_arg, re.I)
name = m.group(1).lower()
args = m.group(2)
names_dict = {
'adadelta': chainer.optimizers.AdaDelta,
'adagrad': chainer.optimizers.AdaGrad,
'adam': chainer.optimizers.Adam,
'momentumsgd': chainer.optimizers.MomentumSGD,
'nesterovag': chainer.optimizers.NesterovAG,
'rmsprop': chainer.optimizers.RMSprop,
'rmspropgraves': chainer.optimizers.RMSpropGraves,
'sgd': chainer.optimizers.SGD,
'smorms3': chainer.optimizers.SMORMS3,
}
try:
opt = names_dict[name]
except KeyError:
raise RuntimeError('Unknown optimizer: \'{}\' in \'{}\''.format(
name, opt_arg))
# positional arguments
pos = []
# keyword arguments
kw = {}
args = args.strip()
if args:
for a in re.split(r',\s*', args):
if a.find('=') >= 0:
key, val = a.split('=')
kw[key] = float(val)
else:
pos.append(float(a))
return opt(*pos, **kw)
def _get_num_split(excp):
"""Get the preferrable number of split from a DataSizeError error"""
ps = excp.pickled_size
mx = excp.max_size
return (ps + mx - 1) // mx
def _slices(excp):
"""Get a list of slices that are expected to fit in a single send/recv."""
ds = excp.dataset_size
nsplit = _get_num_split(excp)
size = math.ceil(ds / nsplit)
return [(b, min(e, ds)) for b, e in
((i * size, (i + 1) * size) for i in range(0, nsplit))]
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--bleu', action='store_true', default=False,
help='Report BLEU score')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--cache', '-c', default=None,
help='Directory to cache pre-processed dataset')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='Number of units')
parser.add_argument('--communicator', default='pure_nccl',
help='Type of communicator')
parser.add_argument('--stop', '-s', type=str, default='15e',
help='Stop trigger (ex. "500i", "15e")')
parser.add_argument('--input', '-i', type=str, default='wmt',
help='Input directory')
parser.add_argument('--optimizer', type=str, default='adam()',
help='Optimizer and its argument')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
args = parser.parse_args()
# Prepare ChainerMN communicator
if args.gpu:
comm = chainermn.create_communicator(args.communicator)
dev = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
dev = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('==========================================')
# Rank 0 prepares all data
if comm.rank == 0:
if args.cache and not os.path.exists(args.cache):
os.mkdir(args.cache)
# Read source data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'source.pickle')
source_vocab, source_data = cached_call(cache_file,
read_source,
args.input, args.cache)
else:
source_vocab, source_data = read_source(args.input, args.cache)
et = time.time()
print('RD source done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
# Read target data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'target.pickle')
target_vocab, target_data = cached_call(cache_file,
read_target,
args.input, args.cache)
else:
target_vocab, target_data = read_target(args.input, args.cache)
et = time.time()
print('RD target done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
print('Original training data size: %d' % len(source_data))
train_data = [(s, t)
for s, t in six.moves.zip(source_data, target_data)
if 0 < len(s) < 50 and 0 < len(t) < 50]
print('Filtered training data size: %d' % len(train_data))
en_path = os.path.join(args.input, 'dev', 'newstest2013.en')
source_data = europal.make_dataset(en_path, source_vocab)
fr_path = os.path.join(args.input, 'dev', 'newstest2013.fr')
target_data = europal.make_dataset(fr_path, target_vocab)
assert(len(source_data) == len(target_data))
test_data = [(s, t) for s, t
in six.moves.zip(source_data, target_data)
if 0 < len(s) and 0 < len(t)]
source_ids = {word: index
for index, word in enumerate(source_vocab)}
target_ids = {word: index
for index, word in enumerate(target_vocab)}
else:
# target_data, source_data = None, None
train_data, test_data = None, None
target_ids, source_ids = None, None
# Print GPU id
for i in range(0, comm.size):
if comm.rank == i:
print('Rank {} GPU: {}'.format(comm.rank, dev))
sys.stdout.flush()
comm.mpi_comm.Barrier()
# broadcast id- > word dictionary
source_ids = comm.bcast_obj(source_ids, root=0)
target_ids = comm.bcast_obj(target_ids, root=0)
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
if comm.rank == 0:
print('target_words : {}'.format(len(target_words)))
print('source_words : {}'.format(len(source_words)))
model = Seq2seq(3, len(source_ids), len(target_ids), args.unit)
if dev >= 0:
chainer.cuda.get_device_from_id(dev).use()
model.to_gpu(dev)
# determine the stop trigger
m = re.match(r'^(\d+)e$', args.stop)
if m:
trigger = (int(m.group(1)), 'epoch')
else:
m = re.match(r'^(\d+)i$', args.stop)
if m:
trigger = (int(m.group(1)), 'iteration')
else:
if comm.rank == 0:
sys.stderr.write('Error: unknown stop trigger: {}'.format(
args.stop))
exit(-1)
if comm.rank == 0:
print('Trigger: {}'.format(trigger))
optimizer = chainermn.create_multi_node_optimizer(
create_optimizer(args.optimizer), comm)
optimizer.setup(model)
# Broadcast dataset
# Sanity check of train_data
train_data = chainermn.scatter_dataset(train_data, comm)
test_data = chainermn.scatter_dataset(test_data, comm)
train_iter = chainer.iterators.SerialIterator(train_data,
args.batchsize,
shuffle=False)
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert, device=dev)
trainer = training.Trainer(updater,
trigger,
out=args.out)
trainer.extend(chainermn.create_multi_node_evaluator(
BleuEvaluator(model, test_data, device=dev, comm=comm),
comm))
def translate_one(source, target):
words = europal.split_sentence(source)
print('# source : ' + ' '.join(words))
x = model.xp.array(
[source_ids.get(w, 1) for w in words], numpy.int32)
ys = model.translate([x])[0]
words = [target_words[y] for y in ys]
print('# result : ' + ' '.join(words))
print('# expect : ' + target)
# @chainer.training.make_extension(trigger=(200, 'iteration'))
def translate(trainer):
translate_one(
'Who are we ?',
'Qui sommes-nous?')
translate_one(
'And it often costs over a hundred dollars ' +
'to obtain the required identity card .',
'Or, il en coûte souvent plus de cent dollars ' +
'pour obtenir la carte d\'identité requise.')
source, target = test_data[numpy.random.choice(len(test_data))]
source = ' '.join([source_words.get(i, '') for i in source])
target = ' '.join([target_words.get(i, '') for i in target])
translate_one(source, target)
if comm.rank == 0:
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')),
trigger=(1, 'epoch'))
report = extensions.PrintReport(['epoch',
'iteration',
'main/loss',
'main/perp',
'validation/main/bleu',
'elapsed_time'])
trainer.extend(report, trigger=(1, 'epoch'))
comm.mpi_comm.Barrier()
if comm.rank == 0:
print('start training')
sys.stdout.flush()
trainer.run()
if __name__ == '__main__':
main()
| 18,549
| 34.536398
| 101
|
py
|
chainer
|
chainer-master/examples/chainermn/parallel_convolution/VGG.py
|
from __future__ import print_function
import chainer
import chainer.functions as F
import chainer.links as L
import chainermn.functions
import numpy as np
"""
This example is ported from Chainer official VGG16 example.
https://github.com/chainer/chainer/blob/master/examples/cifar/models/VGG.py
"""
class ParallelConvolution2D(chainer.links.Convolution2D):
def __init__(self, comm, in_channels, out_channels, *args, **kwargs):
self.comm = comm
self.in_channels = in_channels
self.out_channels = out_channels
super(ParallelConvolution2D, self).__init__(
self._in_channel_size, self._out_channel_size, *args, **kwargs)
def _channel_size(self, n_channel):
# Return the size of the corresponding channels.
n_proc = self.comm.size
i_proc = self.comm.rank
return n_channel // n_proc + (1 if i_proc < n_channel % n_proc else 0)
@property
def _in_channel_size(self):
return self._channel_size(self.in_channels)
@property
def _out_channel_size(self):
return self._channel_size(self.out_channels)
@property
def _channel_indices(self):
# Return the indices of the corresponding channel.
indices = np.arange(self.in_channels)
indices = indices[indices % self.comm.size == 0] + self.comm.rank
return [i for i in indices if i < self.in_channels]
def __call__(self, x):
x = x[:, self._channel_indices, :, :]
y = super(ParallelConvolution2D, self).__call__(x)
ys = chainermn.functions.allgather(self.comm, y)
return F.concat(ys, axis=1)
class Block(chainer.Chain):
"""A convolution, batch norm, ReLU block.
A block in a feedforward network that performs a
convolution followed by batch normalization followed
by a ReLU activation.
For the convolution operation, a square filter size is used.
The convolution performs as either single-process or model-parallel
depending on the number of input channels.
Args:
comm: ChainerMN communicator.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
ksize (int): The size of the filter is ksize x ksize.
pad (int): The padding to use for the convolution.
"""
def __init__(self, comm, in_channels, out_channels, ksize, pad=1):
super(Block, self).__init__()
with self.init_scope():
if comm.size <= in_channels:
self.conv = ParallelConvolution2D(comm,
in_channels,
out_channels,
ksize,
pad=pad,
nobias=True)
else:
self.conv = chainer.links.Convolution2D(in_channels,
out_channels,
ksize,
pad=pad,
nobias=True)
self.bn = L.BatchNormalization(out_channels)
def __call__(self, x):
h = self.conv(x)
h = self.bn(h)
return F.relu(h)
class VGG(chainer.Chain):
"""A VGG-style network for very small images.
This model implementation is ported from Chainer official example:
https://github.com/chainer/chainer/blob/master/examples/cifar/models/VGG.py
Args:
comm: ChainerMN communicator.
class_labels (int): The number of class labels.
"""
def __init__(self, comm, class_labels=10):
super(VGG, self).__init__()
self.comm = comm
with self.init_scope():
self.block1_1 = Block(comm, 3, 64, 3)
self.block1_2 = Block(comm, 64, 64, 3)
self.block2_1 = Block(comm, 64, 128, 3)
self.block2_2 = Block(comm, 128, 128, 3)
self.block3_1 = Block(comm, 128, 256, 3)
self.block3_2 = Block(comm, 256, 256, 3)
self.block3_3 = Block(comm, 256, 256, 3)
self.block4_1 = Block(comm, 256, 512, 3)
self.block4_2 = Block(comm, 512, 512, 3)
self.block4_3 = Block(comm, 512, 512, 3)
self.block5_1 = Block(comm, 512, 512, 3)
self.block5_2 = Block(comm, 512, 512, 3)
self.block5_3 = Block(comm, 512, 512, 3)
self.fc1 = L.Linear(None, 512, nobias=True)
self.bn_fc1 = L.BatchNormalization(512)
self.fc2 = L.Linear(None, class_labels, nobias=True)
def __call__(self, x):
# 64 channel blocks:
h = self.block1_1(x)
h = F.dropout(h, ratio=0.3)
h = self.block1_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 128 channel blocks:
h = self.block2_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block2_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 256 channel blocks:
h = self.block3_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block4_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block5_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.dropout(h, ratio=0.5)
h = self.fc1(h)
h = self.bn_fc1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5)
h = self.fc2(h)
return h
| 6,013
| 32.786517
| 79
|
py
|
chainer
|
chainer-master/examples/chainermn/parallel_convolution/train.py
|
from __future__ import print_function
import argparse
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainermn
import VGG
import matplotlib
matplotlib.use('Agg')
def main():
parser = argparse.ArgumentParser(description='ChainerMN example: VGG16')
parser.add_argument('--dataset', '-d', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--gpu', '-g', action='store_true', default=False,
help='use GPU')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
args = parser.parse_args()
# Create ChainerMN communicator.
if args.gpu:
comm = chainermn.create_communicator('pure_nccl')
device = comm.rank
else:
comm = chainermn.create_communicator('naive')
device = -1
if comm.rank == 0:
print('GPU: {}'.format(args.gpu))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Load the CIFAR10 dataset
if args.dataset == 'cifar10':
class_labels = 10
train, test = chainer.datasets.get_cifar10()
elif args.dataset == 'cifar100':
class_labels = 100
train, test = chainer.datasets.get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
model = L.Classifier(VGG.VGG(comm, class_labels))
if args.gpu:
# Make a specified GPU current
chainer.cuda.get_device_from_id(device).use()
model.to_gpu() # Copy the model to the GPU
# Setup an optimizer
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
if comm.rank != 0:
train = chainermn.datasets.create_empty_dataset(train)
test = chainermn.datasets.create_empty_dataset(test)
train_iter = chainermn.iterators.create_multi_node_iterator(
chainer.iterators.SerialIterator(train, args.batchsize), comm)
test_iter = chainermn.iterators.create_multi_node_iterator(
chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False),
comm)
# Set up a trainer
updater = training.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
if comm.rank == 0:
# Dump a computational graph from 'loss' variable
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.DumpGraph('main/loss'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 4,238
| 34.621849
| 76
|
py
|
chainer
|
chainer-master/examples/optuna/chainer_simple.py
|
"""
Optuna example that optimizes multi-layer perceptrons using Chainer.
In this example, we optimize the validation accuracy of hand-written digit
recognition using Chainer and MNIST. We optimize the neural network
architecture as well as the optimizer configuration. As it is too time
consuming to use the whole MNIST dataset, we here use a small subset of it.
"""
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from packaging import version
import optuna
from optuna.integration import ChainerPruningExtension
if version.parse(chainer.__version__) < version.parse("4.0.0"):
raise RuntimeError("Chainer>=4.0.0 is required for this example.")
N_TRAIN_EXAMPLES = 3000
N_VALID_EXAMPLES = 1000
BATCHSIZE = 128
EPOCH = 10
def create_model(trial):
# We optimize the numbers of layers and their units.
n_layers = trial.suggest_int("n_layers", 1, 3)
layers = []
for i in range(n_layers):
n_units = int(
trial.suggest_float("n_units_l{}".format(i), 4, 128, log=True)
)
layers.append(L.Linear(None, n_units))
layers.append(F.relu)
layers.append(L.Linear(None, 10))
return chainer.Sequential(*layers)
def create_optimizer(trial, model):
# We optimize the choice of optimizers as well as their parameters.
optimizer_name = trial.suggest_categorical(
"optimizer", ["Adam", "MomentumSGD"]
)
if optimizer_name == "Adam":
adam_alpha = trial.suggest_float("adam_alpha", 1e-5, 1e-1, log=True)
optimizer = chainer.optimizers.Adam(alpha=adam_alpha)
else:
momentum_sgd_lr = trial.suggest_float(
"momentum_sgd_lr", 1e-5, 1e-1, log=True
)
optimizer = chainer.optimizers.MomentumSGD(lr=momentum_sgd_lr)
weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))
return optimizer
# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
def objective(trial):
# Model and optimizer
model = L.Classifier(create_model(trial))
optimizer = create_optimizer(trial, model)
# Dataset
rng = np.random.RandomState(0)
train, valid = chainer.datasets.get_mnist()
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE)
valid_iter = chainer.iterators.SerialIterator(
valid, BATCHSIZE, repeat=False, shuffle=False
)
# Trainer
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
trainer.extend(chainer.training.extensions.Evaluator(valid_iter, model))
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(
chainer.training.extensions.PrintReport(
[
"epoch",
"main/loss",
"validation/main/loss",
"main/accuracy",
"validation/main/accuracy",
]
)
)
trainer.extend(log_report_extension)
trainer.extend(
ChainerPruningExtension(
trial, "validation/main/accuracy", (1, "epoch")
)
)
# Run!
trainer.run(show_loop_exception_msg=False)
# Set the user attributes such as loss and accuracy for train and
# validation sets
log_last = log_report_extension.log[-1]
for key, value in log_last.items():
trial.set_user_attr(key, value)
# Return the validation accuracy
return log_report_extension.log[-1]["validation/main/accuracy"]
if __name__ == "__main__":
# This verbosity change is just to simplify the script output.
optuna.logging.set_verbosity(optuna.logging.WARNING)
study = optuna.create_study(
direction="maximize", pruner=optuna.pruners.MedianPruner()
)
study.optimize(objective, n_trials=100)
print("Number of finished trials: ", len(study.trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
print(" User attrs:")
for key, value in trial.user_attrs.items():
print(" {}: {}".format(key, value))
| 4,621
| 30.442177
| 84
|
py
|
chainer
|
chainer-master/examples/optuna/chainermn_simple.py
|
"""
Optuna example that optimizes multi-layer perceptrons using ChainerMN.
In this example, we optimize the validation accuracy of hand-written digit
recognition using ChainerMN and MNIST, where architecture of neural network is
optimized.
ChainerMN and it's Optuna integration are supposed to be invoked via MPI. You
can run this example as follows:
$ STORAGE_URL=sqlite:///example.db
$ STUDY_NAME=`optuna create-study --direction maximizes \
--storage $STORAGE_URL`
$ mpirun -n 2 -- python chainermn_simple.py $STUDY_NAME $STORAGE_URL
"""
import sys
import chainer
import chainer.functions as F
import chainer.links as L
import chainermn
import numpy as np
import optuna
N_TRAIN_EXAMPLES = 3000
N_VALID_EXAMPLES = 1000
BATCHSIZE = 128
EPOCH = 10
def create_model(trial):
# We optimize the numbers of layers and their units.
n_layers = trial.suggest_int("n_layers", 1, 3)
layers = []
for i in range(n_layers):
n_units = trial.suggest_int("n_units_l{}".format(i), 4, 128, log=True)
layers.append(L.Linear(None, n_units))
layers.append(F.relu)
layers.append(L.Linear(None, 10))
return chainer.Sequential(*layers)
# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
def objective(trial, comm):
# Sample an architecture.
model = L.Classifier(create_model(trial))
# Setup optimizer.
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(model)
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
# Setup dataset and iterator. Only worker 0 loads the whole dataset.
# The dataset of worker 0 is evenly split and distributed to all workers.
if comm.rank == 0:
train, valid = chainer.datasets.get_mnist()
rng = np.random.RandomState(0)
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
else:
train, valid = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
valid = chainermn.scatter_dataset(valid, comm)
train_iter = chainer.iterators.SerialIterator(
train, BATCHSIZE, shuffle=True
)
valid_iter = chainer.iterators.SerialIterator(
valid, BATCHSIZE, repeat=False, shuffle=False
)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
if comm.rank == 0:
trainer.extend(chainer.training.extensions.ProgressBar())
# Run training.
trainer.run()
# Evaluate.
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
report = evaluator()
return report["main/accuracy"]
if __name__ == "__main__":
# Please make sure common study and storage are shared among nodes.
study_name = sys.argv[1]
storage_url = sys.argv[2]
study = optuna.load_study(study_name, storage_url)
comm = chainermn.create_communicator("naive")
if comm.rank == 0:
print("Study name:", study_name)
print("Storage URL:", storage_url)
print("Number of nodes:", comm.size)
# Run optimization!
chainermn_study = optuna.integration.ChainerMNStudy(study, comm)
chainermn_study.optimize(objective, n_trials=25)
if comm.rank == 0:
print("Number of finished trials: ", len(study.trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
| 3,898
| 30.443548
| 84
|
py
|
chainer
|
chainer-master/examples/optuna/chainer_integration.py
|
"""
Optuna example that demonstrates a pruner for Chainer.
In this example, we optimize the hyperparameters of a neural network for
hand-written digit recognition in terms of validation loss. The network is
implemented by Chainer and evaluated by MNIST dataset. Throughout the training
of neural networks, a pruner observes intermediate results and stops
unpromising trials.
You can run this example as follows:
$ python chainer_integration.py
"""
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from packaging import version
import optuna
if version.parse(chainer.__version__) < version.parse("4.0.0"):
raise RuntimeError("Chainer>=4.0.0 is required for this example.")
N_TRAIN_EXAMPLES = 3000
N_VALID_EXAMPLES = 1000
BATCHSIZE = 128
EPOCH = 10
PRUNER_INTERVAL = 3
def create_model(trial):
# We optimize the numbers of layers and their units.
n_layers = trial.suggest_int("n_layers", 1, 3)
layers = []
for i in range(n_layers):
n_units = trial.suggest_int("n_units_l{}".format(i), 32, 256, log=True)
layers.append(L.Linear(None, n_units))
layers.append(F.relu)
layers.append(L.Linear(None, 10))
return chainer.Sequential(*layers)
# FYI: Objective functions can take additional arguments
# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
def objective(trial):
model = L.Classifier(create_model(trial))
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
rng = np.random.RandomState(0)
train, valid = chainer.datasets.get_mnist()
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
train_iter = chainer.iterators.SerialIterator(train, BATCHSIZE)
valid_iter = chainer.iterators.SerialIterator(
valid, BATCHSIZE, repeat=False, shuffle=False
)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
# Add Chainer extension for pruners.
trainer.extend(
optuna.integration.ChainerPruningExtension(
trial, "validation/main/accuracy", (PRUNER_INTERVAL, "epoch")
)
)
trainer.extend(chainer.training.extensions.Evaluator(valid_iter, model))
trainer.extend(
chainer.training.extensions.PrintReport(
[
"epoch",
"main/loss",
"validation/main/loss",
"main/accuracy",
"validation/main/accuracy",
]
)
)
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(log_report_extension)
# Run training.
# Please set show_loop_exception_msg False to inhibit messages about
# TrialPruned exception. ChainerPruningExtension raises TrialPruned
# exception to stop training, and trainer shows some messages every time
# it receives TrialPruned.
trainer.run(show_loop_exception_msg=False)
# Save loss and accuracy to user attributes.
log_last = log_report_extension.log[-1]
for key, value in log_last.items():
trial.set_user_attr(key, value)
return log_report_extension.log[-1]["validation/main/accuracy"]
if __name__ == "__main__":
study = optuna.create_study(
direction="maximize", pruner=optuna.pruners.MedianPruner()
)
study.optimize(objective, n_trials=100)
pruned_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.PRUNED
]
complete_trials = [
t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE
]
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
print(" User attrs:")
for key, value in trial.user_attrs.items():
print(" {}: {}".format(key, value))
| 4,372
| 30.919708
| 84
|
py
|
chainer
|
chainer-master/examples/optuna/chainermn_integration.py
|
"""
Optuna example that demonstrates a pruner for ChainerMN.
In this example, we optimize the validation accuracy of hand-written digit
recognition using ChainerMN and MNIST, where architecture of neural network is
optimized. Throughout the training of neural networks, a pruner observes
intermediate results and stops unpromising trials.
ChainerMN and it's Optuna integration are supposed to be invoked via MPI. You
can run this example as follows:
$ STORAGE_URL=sqlite:///example.db
$ STUDY_NAME=`optuna create-study --storage $STORAGE_URL \
--direction maximize`
$ mpirun -n 2 -- python chainermn_integration.py $STUDY_NAME $STORAGE_URL
"""
import sys
import chainer
import chainer.functions as F
import chainer.links as L
import chainermn
import numpy as np
import optuna
N_TRAIN_EXAMPLES = 3000
N_VALID_EXAMPLES = 1000
BATCHSIZE = 128
EPOCH = 10
PRUNER_INTERVAL = 3
def create_model(trial):
# We optimize the numbers of layers and their units.
n_layers = trial.suggest_int("n_layers", 1, 3)
layers = []
for i in range(n_layers):
n_units = trial.suggest_int("n_units_l{}".format(i), 4, 128, log=True)
layers.append(L.Linear(None, n_units))
layers.append(F.relu)
layers.append(L.Linear(None, 10))
return chainer.Sequential(*layers)
def objective(trial, comm):
# Sample an architecture.
model = L.Classifier(create_model(trial))
# Setup optimizer.
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(model)
optimizer = chainermn.create_multi_node_optimizer(optimizer, comm)
# Setup dataset and iterator. Only worker 0 loads the whole dataset.
# The dataset of worker 0 is evenly split and distributed to all workers.
if comm.rank == 0:
train, valid = chainer.datasets.get_mnist()
rng = np.random.RandomState(0)
train = chainer.datasets.SubDataset(
train, 0, N_TRAIN_EXAMPLES, order=rng.permutation(len(train))
)
valid = chainer.datasets.SubDataset(
valid, 0, N_VALID_EXAMPLES, order=rng.permutation(len(valid))
)
else:
train, valid = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
valid = chainermn.scatter_dataset(valid, comm)
train_iter = chainer.iterators.SerialIterator(
train, BATCHSIZE, shuffle=True
)
valid_iter = chainer.iterators.SerialIterator(
valid, BATCHSIZE, repeat=False, shuffle=False
)
# Setup trainer.
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (EPOCH, "epoch"))
# Add Chainer extension for pruners.
trainer.extend(
optuna.integration.ChainerPruningExtension(
trial, "validation/main/accuracy", (PRUNER_INTERVAL, "epoch")
)
)
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
trainer.extend(chainermn.create_multi_node_evaluator(evaluator, comm))
log_report_extension = chainer.training.extensions.LogReport(log_name=None)
trainer.extend(log_report_extension)
if comm.rank == 0:
trainer.extend(chainer.training.extensions.ProgressBar())
# Run training.
# Please set show_loop_exception_msg False to inhibit messages about
# TrialPruned exception. ChainerPruningExtension raises TrialPruned
# exception to stop training, and trainer shows some messages every time
# it receives TrialPruned.
trainer.run(show_loop_exception_msg=False)
# Evaluate.
evaluator = chainer.training.extensions.Evaluator(valid_iter, model)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
report = evaluator()
return report["main/accuracy"]
if __name__ == "__main__":
# Please make sure common study and storage are shared among nodes.
study_name = sys.argv[1]
storage_url = sys.argv[2]
study = optuna.load_study(
study_name, storage_url, pruner=optuna.pruners.MedianPruner()
)
comm = chainermn.create_communicator("naive")
if comm.rank == 0:
print("Study name:", study_name)
print("Storage URL:", storage_url)
print("Number of nodes:", comm.size)
# Run optimization!
chainermn_study = optuna.integration.ChainerMNStudy(study, comm)
chainermn_study.optimize(objective, n_trials=25)
if comm.rank == 0:
pruned_trials = [
t
for t in study.trials
if t.state == optuna.trial.TrialState.PRUNED
]
complete_trials = [
t
for t in study.trials
if t.state == optuna.trial.TrialState.COMPLETE
]
print("Study statistics: ")
print(" Number of finished trials: ", len(study.trials))
print(" Number of pruned trials: ", len(pruned_trials))
print(" Number of complete trials: ", len(complete_trials))
print("Best trial:")
trial = study.best_trial
print(" Value: ", trial.value)
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
| 5,146
| 31.99359
| 79
|
py
|
chainer
|
chainer-master/examples/text_classification/text_datasets.py
|
import csv
import glob
import io
import os
import shutil
import sys
import tarfile
import tempfile
import numpy
import chainer
from nlp_utils import make_vocab
from nlp_utils import normalize_text
from nlp_utils import split_text
from nlp_utils import transform_to_array
URL_DBPEDIA = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz' # NOQA
URL_IMDB = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
URL_OTHER_BASE = 'https://raw.githubusercontent.com/harvardnlp/sent-conv-torch/master/data/' # NOQA
def download_dbpedia():
path = chainer.dataset.cached_download(URL_DBPEDIA)
tf = tarfile.open(path, 'r')
return tf
def read_dbpedia(tf, split, shrink=1, char_based=False):
dataset = []
f = tf.extractfile('dbpedia_csv/{}.csv'.format(split))
if sys.version_info > (3, 0):
f = io.TextIOWrapper(f, encoding='utf-8')
for i, (label, title, text) in enumerate(csv.reader(f)):
if i % shrink != 0:
continue
label = int(label) - 1 # Index begins from 1
tokens = split_text(normalize_text(text), char_based)
dataset.append((tokens, label))
return dataset
def get_dbpedia(vocab=None, shrink=1, char_based=False):
tf = download_dbpedia()
print('read dbpedia')
train = read_dbpedia(tf, 'train', shrink=shrink, char_based=char_based)
test = read_dbpedia(tf, 'test', shrink=shrink, char_based=char_based)
if vocab is None:
print('constract vocabulary based on frequency')
vocab = make_vocab(train)
train = transform_to_array(train, vocab)
test = transform_to_array(test, vocab)
return train, test, vocab
def download_imdb():
path = chainer.dataset.cached_download(URL_IMDB)
tf = tarfile.open(path, 'r')
# To read many files fast, tarfile is untared
path = tempfile.mkdtemp()
tf.extractall(path)
return path
def read_imdb(path, split,
shrink=1, fine_grained=False, char_based=False):
fg_label_dict = {'1': 0, '2': 0, '3': 1, '4': 1,
'7': 2, '8': 2, '9': 3, '10': 3}
def read_and_label(posneg, label):
dataset = []
target = os.path.join(path, 'aclImdb', split, posneg, '*')
for i, f_path in enumerate(glob.glob(target)):
if i % shrink != 0:
continue
with io.open(f_path, encoding='utf-8', errors='ignore') as f:
text = f.read().strip()
tokens = split_text(normalize_text(text), char_based)
if fine_grained:
# extract from f_path. e.g. /pos/200_8.txt -> 8
label = fg_label_dict[f_path.split('_')[-1][:-4]]
dataset.append((tokens, label))
else:
dataset.append((tokens, label))
return dataset
pos_dataset = read_and_label('pos', 0)
neg_dataset = read_and_label('neg', 1)
return pos_dataset + neg_dataset
def get_imdb(vocab=None, shrink=1, fine_grained=False,
char_based=False):
tmp_path = download_imdb()
print('read imdb')
train = read_imdb(tmp_path, 'train',
shrink=shrink, fine_grained=fine_grained,
char_based=char_based)
test = read_imdb(tmp_path, 'test',
shrink=shrink, fine_grained=fine_grained,
char_based=char_based)
shutil.rmtree(tmp_path)
if vocab is None:
print('constract vocabulary based on frequency')
vocab = make_vocab(train)
train = transform_to_array(train, vocab)
test = transform_to_array(test, vocab)
return train, test, vocab
def download_other_dataset(name):
if name in ['custrev', 'mpqa', 'rt-polarity', 'subj']:
files = [name + '.all']
elif name == 'TREC':
files = [name + suff for suff in ['.train.all', '.test.all']]
else:
files = [name + suff for suff in ['.train', '.test']]
file_paths = []
for f_name in files:
url = os.path.join(URL_OTHER_BASE, f_name)
path = chainer.dataset.cached_download(url)
file_paths.append(path)
return file_paths
def read_other_dataset(path, shrink=1, char_based=False):
dataset = []
with io.open(path, encoding='utf-8', errors='ignore') as f:
for i, l in enumerate(f):
if i % shrink != 0 or not len(l.strip()) >= 3:
continue
label, text = l.strip().split(None, 1)
label = int(label)
tokens = split_text(normalize_text(text), char_based)
dataset.append((tokens, label))
return dataset
def get_other_text_dataset(name, vocab=None, shrink=1,
char_based=False, seed=777):
assert(name in ['TREC', 'stsa.binary', 'stsa.fine',
'custrev', 'mpqa', 'rt-polarity', 'subj'])
datasets = download_other_dataset(name)
train = read_other_dataset(
datasets[0], shrink=shrink, char_based=char_based)
if len(datasets) == 2:
test = read_other_dataset(
datasets[1], shrink=shrink, char_based=char_based)
else:
numpy.random.seed(seed)
alldata = numpy.random.permutation(train)
train = alldata[:-len(alldata) // 10]
test = alldata[-len(alldata) // 10:]
if vocab is None:
print('constract vocabulary based on frequency')
vocab = make_vocab(train)
train = transform_to_array(train, vocab)
test = transform_to_array(test, vocab)
return train, test, vocab
| 5,533
| 30.988439
| 102
|
py
|
chainer
|
chainer-master/examples/text_classification/run_text_classifier.py
|
#!/usr/bin/env python
import argparse
import json
import sys
import chainer
import numpy
import nets
import nlp_utils
def setup_model(device, model_setup):
sys.stderr.write(json.dumps(args.__dict__, indent=2) + '\n')
setup = json.load(open(model_setup))
sys.stderr.write(json.dumps(setup, indent=2) + '\n')
vocab = json.load(open(setup['vocab_path']))
n_class = setup['n_class']
# Setup a model
if setup['model'] == 'rnn':
Encoder = nets.RNNEncoder
elif setup['model'] == 'cnn':
Encoder = nets.CNNEncoder
elif setup['model'] == 'bow':
Encoder = nets.BOWMLPEncoder
encoder = Encoder(n_layers=setup['layer'], n_vocab=len(vocab),
n_units=setup['unit'], dropout=setup['dropout'])
model = nets.TextClassifier(encoder, n_class)
chainer.serializers.load_npz(setup['model_path'], model)
model.to_device(device) # Copy the model to the device
return model, vocab, setup
def run_online(device):
# predict labels online
print('Enter inputs for Online Predictions')
for l in sys.stdin:
l = l.strip()
if not l:
print('# blank line')
continue
text = nlp_utils.normalize_text(l)
words = nlp_utils.split_text(text, char_based=setup['char_based'])
xs = nlp_utils.transform_to_array([words], vocab, with_label=False)
xs = nlp_utils.convert_seq(xs, device=device, with_label=False)
with chainer.using_config('train', False), chainer.no_backprop_mode():
prob = model.predict(xs, softmax=True)[0]
answer = int(model.xp.argmax(prob))
score = float(prob[answer])
print('{}\t{:.4f}\t{}'.format(answer, score, ' '.join(words)))
def run_batch(device, batchsize=64):
# predict labels by batch
def predict_batch(words_batch):
xs = nlp_utils.transform_to_array(words_batch, vocab, with_label=False)
xs = nlp_utils.convert_seq(xs, device=device, with_label=False)
with chainer.using_config('train', False), chainer.no_backprop_mode():
probs = model.predict(xs, softmax=True)
answers = model.xp.argmax(probs, axis=1)
scores = probs[model.xp.arange(answers.size), answers].tolist()
for words, answer, score in zip(words_batch, answers, scores):
print('{}\t{:.4f}\t{}'.format(answer, score, ' '.join(words)))
batch = []
print('Enter inputs for Batch Predictions')
for l in sys.stdin:
l = l.strip()
if not l:
if batch:
predict_batch(batch)
batch = []
print('# blank line')
continue
text = nlp_utils.normalize_text(l)
words = nlp_utils.split_text(text, char_based=setup['char_based'])
batch.append(words)
if len(batch) >= batchsize:
predict_batch(batch)
batch = []
if batch:
predict_batch(batch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Chainer example: Text Classification')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--model-setup', required=True,
help='Model setup dictionary.')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
model, vocab, setup = setup_model(device, args.model_setup)
if device.xp is numpy:
run_online(device)
else:
run_batch(device)
| 4,034
| 34.707965
| 79
|
py
|
chainer
|
chainer-master/examples/text_classification/nlp_utils.py
|
import collections
import io
import numpy
import chainer
def split_text(text, char_based=False):
if char_based:
return list(text)
else:
return text.split()
def normalize_text(text):
return text.strip().lower()
def make_vocab(dataset, max_vocab_size=20000, min_freq=2):
counts = collections.defaultdict(int)
for tokens, _ in dataset:
for token in tokens:
counts[token] += 1
vocab = {'<eos>': 0, '<unk>': 1}
for w, c in sorted(counts.items(), key=lambda x: (-x[1], x[0])):
if len(vocab) >= max_vocab_size or c < min_freq:
break
vocab[w] = len(vocab)
return vocab
def read_vocab_list(path, max_vocab_size=20000):
vocab = {'<eos>': 0, '<unk>': 1}
with io.open(path, encoding='utf-8', errors='ignore') as f:
for l in f:
w = l.strip()
if w not in vocab and w:
vocab[w] = len(vocab)
if len(vocab) >= max_vocab_size:
break
return vocab
def make_array(tokens, vocab, add_eos=True):
unk_id = vocab['<unk>']
eos_id = vocab['<eos>']
ids = [vocab.get(token, unk_id) for token in tokens]
if add_eos:
ids.append(eos_id)
return numpy.array(ids, numpy.int32)
def transform_to_array(dataset, vocab, with_label=True):
if with_label:
return [(make_array(tokens, vocab), numpy.array([cls], numpy.int32))
for tokens, cls in dataset]
else:
return [make_array(tokens, vocab)
for tokens in dataset]
@chainer.dataset.converter()
def convert_seq(batch, device=None, with_label=True):
def to_device_batch(batch):
if device is None:
return batch
src_xp = chainer.backend.get_array_module(*batch)
xp = device.xp
concat = src_xp.concatenate(batch, axis=0)
sections = numpy.cumsum([len(x)
for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = xp.split(concat_dev, sections)
return batch_dev
if with_label:
return {'xs': to_device_batch([x for x, _ in batch]),
'ys': to_device_batch([y for _, y in batch])}
else:
return to_device_batch([x for x in batch])
| 2,316
| 26.915663
| 76
|
py
|
chainer
|
chainer-master/examples/text_classification/train_text_classifier.py
|
#!/usr/bin/env python
import argparse
import datetime
import json
import os
import chainer
from chainer import training
from chainer.training import extensions
import nets
from nlp_utils import convert_seq
import text_datasets
def main():
current_datetime = '{}'.format(datetime.datetime.today())
parser = argparse.ArgumentParser(
description='Chainer example: Text Classification')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=30,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=300,
help='Number of units')
parser.add_argument('--layer', '-l', type=int, default=1,
help='Number of layers of RNN or MLP following CNN')
parser.add_argument('--dropout', type=float, default=0.4,
help='Dropout rate')
parser.add_argument('--dataset', '-data', default='imdb.binary',
choices=['dbpedia', 'imdb.binary', 'imdb.fine',
'TREC', 'stsa.binary', 'stsa.fine',
'custrev', 'mpqa', 'rt-polarity', 'subj'],
help='Name of dataset.')
parser.add_argument('--model', '-model', default='cnn',
choices=['cnn', 'rnn', 'bow'],
help='Name of encoder model type.')
parser.add_argument('--char-based', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
print(json.dumps(args.__dict__, indent=2))
device = chainer.get_device(args.device)
device.use()
# Load a dataset
if args.dataset == 'dbpedia':
train, test, vocab = text_datasets.get_dbpedia(
char_based=args.char_based)
elif args.dataset.startswith('imdb.'):
train, test, vocab = text_datasets.get_imdb(
fine_grained=args.dataset.endswith('.fine'),
char_based=args.char_based)
elif args.dataset in ['TREC', 'stsa.binary', 'stsa.fine',
'custrev', 'mpqa', 'rt-polarity', 'subj']:
train, test, vocab = text_datasets.get_other_text_dataset(
args.dataset, char_based=args.char_based)
if args.test:
train = train[:100]
test = test[:100]
print('Device: {}'.format(device))
print('# train data: {}'.format(len(train)))
print('# test data: {}'.format(len(test)))
print('# vocab: {}'.format(len(vocab)))
n_class = len(set([int(d[1]) for d in train]))
print('# class: {}'.format(n_class))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Setup a model
if args.model == 'rnn':
Encoder = nets.RNNEncoder
elif args.model == 'cnn':
Encoder = nets.CNNEncoder
elif args.model == 'bow':
Encoder = nets.BOWMLPEncoder
encoder = Encoder(n_layers=args.layer, n_vocab=len(vocab),
n_units=args.unit, dropout=args.dropout)
model = nets.TextClassifier(encoder, n_class)
model.to_device(device) # Copy the model to the device
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(1e-4))
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer,
converter=convert_seq, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(
test_iter, model,
converter=convert_seq, device=device))
# Take a snapshot of Trainer every epoch
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
trigger=(1, 'epoch'))
# Take the best snapshot of the model
best_trigger = training.triggers.MaxValueTrigger(
'validation/main/accuracy', (1, 'epoch'))
trainer.extend(extensions.snapshot_object(
model, 'best_model.npz'),
trigger=best_trigger)
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
# Save vocabulary and model's setting
if not os.path.isdir(args.out):
os.mkdir(args.out)
vocab_path = os.path.join(args.out, 'vocab.json')
with open(vocab_path, 'w') as f:
json.dump(vocab, f)
model_path = os.path.join(args.out, 'best_model.npz')
model_setup = args.__dict__
model_setup['vocab_path'] = vocab_path
model_setup['model_path'] = model_path
model_setup['n_class'] = n_class
model_setup['datetime'] = current_datetime
with open(os.path.join(args.out, 'args.json'), 'w') as f:
json.dump(args.__dict__, f)
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 6,397
| 38.9875
| 77
|
py
|
chainer
|
chainer-master/examples/text_classification/nets.py
|
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import reporter
embed_init = chainer.initializers.Uniform(.25)
def sequence_embed(embed, xs, dropout=0.):
"""Efficient embedding function for variable-length sequences
This output is equally to
"return [F.dropout(embed(x), ratio=dropout) for x in xs]".
However, calling the functions is one-shot and faster.
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
xs (list of :class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): i-th element in the list is an input variable,
which is a :math:`(L_i, )`-shaped int array.
dropout (float): Dropout ratio.
Returns:
list of ~chainer.Variable: Output variables. i-th element in the
list is an output variable, which is a :math:`(L_i, N)`-shaped
float array. :math:`(N)` is the number of dimensions of word embedding.
"""
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
ex = F.dropout(ex, ratio=dropout)
exs = F.split_axis(ex, x_section, 0)
return exs
def block_embed(embed, x, dropout=0.):
"""Embedding function followed by convolution
Args:
embed (callable): A :func:`~chainer.functions.embed_id` function
or :class:`~chainer.links.EmbedID` link.
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable, which
is a :math:`(B, L)`-shaped int array. Its first dimension
:math:`(B)` is assumed to be the *minibatch dimension*.
The second dimension :math:`(L)` is the length of padded
sentences.
dropout (float): Dropout ratio.
Returns:
~chainer.Variable: Output variable. A float array with shape
of :math:`(B, N, L, 1)`. :math:`(N)` is the number of dimensions
of word embedding.
"""
e = embed(x)
e = F.dropout(e, ratio=dropout)
e = F.transpose(e, (0, 2, 1))
e = e[:, :, :, None]
return e
class TextClassifier(chainer.Chain):
"""A classifier using a given encoder.
This chain encodes a sentence and classifies it into classes.
Args:
encoder (Link): A callable encoder, which extracts a feature.
Input is a list of variables whose shapes are
"(sentence_length, )".
Output is a variable whose shape is "(batchsize, n_units)".
n_class (int): The number of classes to be predicted.
"""
def __init__(self, encoder, n_class, dropout=0.1):
super(TextClassifier, self).__init__()
with self.init_scope():
self.encoder = encoder
self.output = L.Linear(encoder.out_units, n_class)
self.dropout = dropout
def forward(self, xs, ys):
concat_outputs = self.predict(xs)
concat_truths = F.concat(ys, axis=0)
loss = F.softmax_cross_entropy(concat_outputs, concat_truths)
accuracy = F.accuracy(concat_outputs, concat_truths)
reporter.report({'loss': loss}, self)
reporter.report({'accuracy': accuracy}, self)
return loss
def predict(self, xs, softmax=False, argmax=False):
concat_encodings = F.dropout(self.encoder(xs), ratio=self.dropout)
concat_outputs = self.output(concat_encodings)
if softmax:
return F.softmax(concat_outputs).array
elif argmax:
return self.xp.argmax(concat_outputs.array, axis=1)
else:
return concat_outputs
class RNNEncoder(chainer.Chain):
"""A LSTM-RNN Encoder with Word Embedding.
This model encodes a sentence sequentially using LSTM.
Args:
n_layers (int): The number of LSTM layers.
n_vocab (int): The size of vocabulary.
n_units (int): The number of units of a LSTM layer and word embedding.
dropout (float): The dropout ratio.
"""
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1):
super(RNNEncoder, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units,
initialW=embed_init)
self.encoder = L.NStepLSTM(n_layers, n_units, n_units, dropout)
self.n_layers = n_layers
self.out_units = n_units
self.dropout = dropout
def forward(self, xs):
exs = sequence_embed(self.embed, xs, self.dropout)
last_h, last_c, ys = self.encoder(None, None, exs)
assert(last_h.shape == (self.n_layers, len(xs), self.out_units))
concat_outputs = last_h[-1]
return concat_outputs
class CNNEncoder(chainer.Chain):
"""A CNN encoder with word embedding.
This model encodes a sentence as a set of n-gram chunks
using convolutional filters.
Following the convolution, max-pooling is applied over time.
Finally, the output is fed into a multilayer perceptron.
Args:
n_layers (int): The number of layers of MLP.
n_vocab (int): The size of vocabulary.
n_units (int): The number of units of MLP and word embedding.
dropout (float): The dropout ratio.
"""
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1):
out_units = n_units // 3
super(CNNEncoder, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
initialW=embed_init)
self.cnn_w3 = L.Convolution2D(
n_units, out_units, ksize=(3, 1), stride=1, pad=(2, 0),
nobias=True)
self.cnn_w4 = L.Convolution2D(
n_units, out_units, ksize=(4, 1), stride=1, pad=(3, 0),
nobias=True)
self.cnn_w5 = L.Convolution2D(
n_units, out_units, ksize=(5, 1), stride=1, pad=(4, 0),
nobias=True)
self.mlp = MLP(n_layers, out_units * 3, dropout)
self.out_units = out_units * 3
self.dropout = dropout
def forward(self, xs):
x_block = chainer.dataset.convert.concat_examples(xs, padding=-1)
ex_block = block_embed(self.embed, x_block, self.dropout)
h_w3 = F.max(self.cnn_w3(ex_block), axis=2)
h_w4 = F.max(self.cnn_w4(ex_block), axis=2)
h_w5 = F.max(self.cnn_w5(ex_block), axis=2)
h = F.concat([h_w3, h_w4, h_w5], axis=1)
h = F.relu(h)
h = F.dropout(h, ratio=self.dropout)
h = self.mlp(h)
return h
class MLP(chainer.ChainList):
"""A multilayer perceptron.
Args:
n_vocab (int): The size of vocabulary.
n_units (int): The number of units in a hidden or output layer.
dropout (float): The dropout ratio.
"""
def __init__(self, n_layers, n_units, dropout=0.1):
super(MLP, self).__init__()
for i in range(n_layers):
self.add_link(L.Linear(None, n_units))
self.dropout = dropout
self.out_units = n_units
def forward(self, x):
for i, link in enumerate(self.children()):
x = F.dropout(x, ratio=self.dropout)
x = F.relu(link(x))
return x
class BOWEncoder(chainer.Chain):
"""A BoW encoder with word embedding.
This model encodes a sentence as just a set of words by averaging.
Args:
n_vocab (int): The size of vocabulary.
n_units (int): The number of units of word embedding.
dropout (float): The dropout ratio.
"""
def __init__(self, n_vocab, n_units, dropout=0.1):
super(BOWEncoder, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
initialW=embed_init)
self.out_units = n_units
self.dropout = dropout
def forward(self, xs):
x_block = chainer.dataset.convert.concat_examples(xs, padding=-1)
ex_block = block_embed(self.embed, x_block)
x_len = self.xp.array([len(x) for x in xs], numpy.int32)[:, None, None]
h = F.sum(ex_block, axis=2) / x_len
return h
class BOWMLPEncoder(chainer.Chain):
"""A BOW encoder with word embedding and MLP.
This model encodes a sentence as just a set of words by averaging.
Additionally, its output is fed into a multilayer perceptron.
Args:
n_layers (int): The number of layers of MLP.
n_vocab (int): The size of vocabulary.
n_units (int): The number of units of MLP and word embedding.
dropout (float): The dropout ratio.
"""
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1):
super(BOWMLPEncoder, self).__init__()
with self.init_scope():
self.bow_encoder = BOWEncoder(n_vocab, n_units, dropout)
self.mlp_encoder = MLP(n_layers, n_units, dropout)
self.out_units = n_units
def forward(self, xs):
h = self.bow_encoder(xs)
h = self.mlp_encoder(h)
return h
| 9,165
| 32.330909
| 79
|
py
|
chainer
|
chainer-master/examples/imagenet/nin.py
|
import chainer
import chainer.functions as F
import chainer.initializers as I
import chainer.links as L
class NIN(chainer.Chain):
"""Network-in-Network example model."""
insize = 227
def __init__(self):
super(NIN, self).__init__()
conv_init = I.HeNormal() # MSRA scaling
with self.init_scope():
self.mlpconv1 = L.MLPConvolution2D(
None, (96, 96, 96), 11, stride=4, conv_init=conv_init)
self.mlpconv2 = L.MLPConvolution2D(
None, (256, 256, 256), 5, pad=2, conv_init=conv_init)
self.mlpconv3 = L.MLPConvolution2D(
None, (384, 384, 384), 3, pad=1, conv_init=conv_init)
self.mlpconv4 = L.MLPConvolution2D(
None, (1024, 1024, 1000), 3, pad=1, conv_init=conv_init)
def forward(self, x, t):
h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2)
h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2)
h = self.mlpconv4(F.dropout(h))
h = F.reshape(F.average_pooling_2d(h, 6), (len(x), 1000))
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 1,295
| 34.027027
| 74
|
py
|
chainer
|
chainer-master/examples/imagenet/resnext50.py
|
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, stride, 1, initialW=initialW, nobias=True,
groups=groups)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def forward(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch, groups=1):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
groups=groups)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def forward(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2, groups=1):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride, groups))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch, groups))
def forward(self, x):
for f in self.children():
x = f(x)
return x
class ResNeXt50(chainer.Chain):
insize = 224
def __init__(self):
super(ResNeXt50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 128, 256, 1, groups=32)
self.res3 = Block(4, 256, 256, 512, groups=32)
self.res4 = Block(6, 512, 512, 1024, groups=32)
self.res5 = Block(3, 1024, 1024, 2048, groups=32)
self.fc = L.Linear(2048, 1000)
def forward(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 3,601
| 32.351852
| 74
|
py
|
chainer
|
chainer-master/examples/imagenet/train_imagenet.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
"""
import argparse
import numpy as np
import chainer
from chainer import dataset
from chainer import training
from chainer.training import extensions
import chainerx
import dali_util
from dataset_util import PreprocessedDataset
import alex
import googlenet
import googlenetbn
import nin
import resnet50
import resnext50
def main():
archs = {
'alex': alex.Alex,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
'resnext50': resnext50.ResNeXt50,
'resnet50_nhwc': resnet50.ResNet50_Nhwc,
}
dtypes = {
'float16': np.float16,
'float32': np.float32,
'float64': np.float64,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--dtype', choices=dtypes, help='Specify the dtype '
'used. If not supplied, the default dtype is used')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
parser.add_argument('--dali', action='store_true')
parser.set_defaults(dali=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
# Set the dtype if supplied.
if args.dtype is not None:
chainer.config.dtype = args.dtype
print('Device: {}'.format(device))
print('Dtype: {}'.format(chainer.config.dtype))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from {}'.format(args.initmodel))
chainer.serializers.load_npz(args.initmodel, model)
model.to_device(device)
device.use()
# Load the mean file
mean = np.load(args.mean)
if args.dali:
if not dali_util._dali_available:
raise RuntimeError('DALI seems not available on your system.')
if device.xp is not chainer.backend.cuda.cupy:
raise RuntimeError('Using DALI requires GPU device. Please '
'specify it with --device option.')
n_threads = args.loaderjob
if n_threads is None or n_threads <= 0:
n_threads = 1
ch_mean = list(np.average(mean, axis=(1, 2)))
ch_std = [255.0, 255.0, 255.0]
# Setup DALI pipelines
train_pipe = dali_util.DaliPipelineTrain(
args.train, args.root, model.insize, args.batchsize,
n_threads, device.device.id, True, mean=ch_mean, std=ch_std)
val_pipe = dali_util.DaliPipelineVal(
args.val, args.root, model.insize, args.val_batchsize,
n_threads, device.device.id, False, mean=ch_mean, std=ch_std)
train_iter = chainer.iterators.DaliIterator(train_pipe)
val_iter = chainer.iterators.DaliIterator(val_pipe, repeat=False)
# converter = dali_converter
converter = dali_util.DaliConverter(mean=mean, crop_size=model.insize)
else:
# Load the dataset files
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
val = PreprocessedDataset(args.val, args.root, mean, model.insize,
False)
# These iterators load the images with subprocesses running in parallel
# to the training/validation.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
converter = dataset.concat_examples
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=converter, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (100000, 'iteration')
log_interval = (1000, 'iteration')
if args.test:
val_interval = (1, 'iteration')
log_interval = (1, 'iteration')
trainer.extend(extensions.Evaluator(val_iter, model, converter=converter,
device=device), trigger=val_interval)
# TODO(sonots): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 7,826
| 40.632979
| 79
|
py
|
chainer
|
chainer-master/examples/imagenet/googlenet.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class GoogLeNet(chainer.Chain):
insize = 224
def __init__(self):
super(GoogLeNet, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(None, 64, 7, stride=2, pad=3)
self.conv2_reduce = L.Convolution2D(None, 64, 1)
self.conv2 = L.Convolution2D(None, 192, 3, stride=1, pad=1)
self.inc3a = L.Inception(None, 64, 96, 128, 16, 32, 32)
self.inc3b = L.Inception(None, 128, 128, 192, 32, 96, 64)
self.inc4a = L.Inception(None, 192, 96, 208, 16, 48, 64)
self.inc4b = L.Inception(None, 160, 112, 224, 24, 64, 64)
self.inc4c = L.Inception(None, 128, 128, 256, 24, 64, 64)
self.inc4d = L.Inception(None, 112, 144, 288, 32, 64, 64)
self.inc4e = L.Inception(None, 256, 160, 320, 32, 128, 128)
self.inc5a = L.Inception(None, 256, 160, 320, 32, 128, 128)
self.inc5b = L.Inception(None, 384, 192, 384, 48, 128, 128)
self.loss3_fc = L.Linear(None, 1000)
self.loss1_conv = L.Convolution2D(None, 128, 1)
self.loss1_fc1 = L.Linear(None, 1024)
self.loss1_fc2 = L.Linear(None, 1000)
self.loss2_conv = L.Convolution2D(None, 128, 1)
self.loss2_fc1 = L.Linear(None, 1024)
self.loss2_fc2 = L.Linear(None, 1000)
def forward(self, x, t):
h = F.relu(self.conv1(x))
h = F.local_response_normalization(
F.max_pooling_2d(h, 3, stride=2), n=5)
h = F.relu(self.conv2_reduce(h))
h = F.relu(self.conv2(h))
h = F.max_pooling_2d(
F.local_response_normalization(h, n=5), 3, stride=2)
h = self.inc3a(h)
h = self.inc3b(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc4a(h)
l = F.average_pooling_2d(h, 5, stride=3)
l = F.relu(self.loss1_conv(l))
l = F.relu(self.loss1_fc1(l))
l = self.loss1_fc2(l)
loss1 = F.softmax_cross_entropy(l, t)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
l = F.average_pooling_2d(h, 5, stride=3)
l = F.relu(self.loss2_conv(l))
l = F.relu(self.loss2_fc1(l))
l = self.loss2_fc2(l)
loss2 = F.softmax_cross_entropy(l, t)
h = self.inc4e(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc5a(h)
h = self.inc5b(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.loss3_fc(F.dropout(h, 0.4))
loss3 = F.softmax_cross_entropy(h, t)
loss = 0.3 * (loss1 + loss2) + loss3
accuracy = F.accuracy(h, t)
chainer.report({
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
'accuracy': accuracy
}, self)
return loss
| 2,938
| 33.576471
| 71
|
py
|
chainer
|
chainer-master/examples/imagenet/compute_mean.py
|
#!/usr/bin/env python
import argparse
import sys
import numpy as np
import chainer
def compute_mean(dataset):
print('compute mean image')
sum_image = 0
N = len(dataset)
for i, (image, _) in enumerate(dataset):
sum_image += image
sys.stderr.write('{} / {}\r'.format(i, N))
sys.stderr.flush()
sys.stderr.write('\n')
return sum_image / N
def main():
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('dataset',
help='Path to training image-label list file')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--output', '-o', default='mean.npy',
help='path to output mean array')
args = parser.parse_args()
dataset = chainer.datasets.LabeledImageDataset(args.dataset, args.root)
mean = compute_mean(dataset)
np.save(args.output, mean)
if __name__ == '__main__':
main()
| 1,037
| 25.615385
| 77
|
py
|
chainer
|
chainer-master/examples/imagenet/googlenetbn.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class GoogLeNetBN(chainer.Chain):
"""New GoogLeNet of BatchNormalization version."""
insize = 224
def __init__(self):
super(GoogLeNetBN, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
None, 64, 7, stride=2, pad=3, nobias=True)
self.norm1 = L.BatchNormalization(64)
self.conv2 = L.Convolution2D(None, 192, 3, pad=1, nobias=True)
self.norm2 = L.BatchNormalization(192)
self.inc3a = L.InceptionBN(
None, 64, 64, 64, 64, 96, 'avg', 32)
self.inc3b = L.InceptionBN(
None, 64, 64, 96, 64, 96, 'avg', 64)
self.inc3c = L.InceptionBN(
None, 0, 128, 160, 64, 96, 'max', stride=2)
self.inc4a = L.InceptionBN(
None, 224, 64, 96, 96, 128, 'avg', 128)
self.inc4b = L.InceptionBN(
None, 192, 96, 128, 96, 128, 'avg', 128)
self.inc4c = L.InceptionBN(
None, 160, 128, 160, 128, 160, 'avg', 128)
self.inc4d = L.InceptionBN(
None, 96, 128, 192, 160, 192, 'avg', 128)
self.inc4e = L.InceptionBN(
None, 0, 128, 192, 192, 256, 'max', stride=2)
self.inc5a = L.InceptionBN(
None, 352, 192, 320, 160, 224, 'avg', 128)
self.inc5b = L.InceptionBN(
None, 352, 192, 320, 192, 224, 'max', 128)
self.out = L.Linear(None, 1000)
self.conva = L.Convolution2D(None, 128, 1, nobias=True)
self.norma = L.BatchNormalization(128)
self.lina = L.Linear(None, 1024, nobias=True)
self.norma2 = L.BatchNormalization(1024)
self.outa = L.Linear(None, 1000)
self.convb = L.Convolution2D(None, 128, 1, nobias=True)
self.normb = L.BatchNormalization(128)
self.linb = L.Linear(None, 1024, nobias=True)
self.normb2 = L.BatchNormalization(1024)
self.outb = L.Linear(None, 1000)
def forward(self, x, t):
h = F.max_pooling_2d(
F.relu(self.norm1(self.conv1(x))), 3, stride=2, pad=1)
h = F.max_pooling_2d(
F.relu(self.norm2(self.conv2(h))), 3, stride=2, pad=1)
h = self.inc3a(h)
h = self.inc3b(h)
h = self.inc3c(h)
h = self.inc4a(h)
a = F.average_pooling_2d(h, 5, stride=3)
a = F.relu(self.norma(self.conva(a)))
a = F.relu(self.norma2(self.lina(a)))
a = self.outa(a)
loss1 = F.softmax_cross_entropy(a, t)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
b = F.average_pooling_2d(h, 5, stride=3)
b = F.relu(self.normb(self.convb(b)))
b = F.relu(self.normb2(self.linb(b)))
b = self.outb(b)
loss2 = F.softmax_cross_entropy(b, t)
h = self.inc4e(h)
h = self.inc5a(h)
h = F.average_pooling_2d(self.inc5b(h), 7)
h = self.out(h)
loss3 = F.softmax_cross_entropy(h, t)
loss = 0.3 * (loss1 + loss2) + loss3
accuracy = F.accuracy(h, t)
chainer.report({
'loss': loss,
'loss1': loss1,
'loss2': loss2,
'loss3': loss3,
'accuracy': accuracy,
}, self)
return loss
| 3,429
| 34
| 74
|
py
|
chainer
|
chainer-master/examples/imagenet/resnet50.py
|
# Original author: yasunorikudo
# (https://github.com/yasunorikudo/chainer-ResNet)
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2, groups=1):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
groups=groups)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def forward(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch, groups=1):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True,
groups=groups)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def forward(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2, groups=1):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride, groups))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch, groups))
def forward(self, x):
for f in self.children():
x = f(x)
return x
class ResNet50(chainer.Chain):
insize = 224
def __init__(self):
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, 1000)
def forward(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
class ResNet50_Nhwc(chainer.Chain):
insize = 224
def __init__(self):
super(ResNet50_Nhwc, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
with chainer.using_config('compute_mode', 'cudnn_fast'):
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, 1000)
def forward(self, x, t):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = h.as_layout(chainer.memory_layouts.CUDNN_CHANNEL_LAST_X)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = h.as_layout(None)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.fc(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 4,807
| 32.158621
| 74
|
py
|
chainer
|
chainer-master/examples/imagenet/dali_util.py
|
import numpy as np
try:
from nvidia import dali
from nvidia.dali import ops
from nvidia.dali import pipeline
_dali_available = True
except ImportError:
class pipeline(object):
Pipeline = object
pass
_dali_available = False
import chainer
from chainer.backends import cuda
import ctypes
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class DaliPipelineTrain(pipeline.Pipeline):
def __init__(self, file_list, file_root, crop_size,
batch_size, n_threads, device_id,
random_shuffle=True, seed=-1, mean=None, std=None,
n_samples=None):
super(DaliPipelineTrain, self).__init__(batch_size, n_threads,
device_id, seed=seed)
crop_size = _pair(crop_size)
if mean is None:
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
if std is None:
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
if n_samples is None:
initial_fill = 4096
else:
initial_fill = min(4096, n_samples)
self.loader = ops.FileReader(file_root=file_root, file_list=file_list,
random_shuffle=random_shuffle,
initial_fill=initial_fill)
self.decode = ops.HostDecoder()
self.resize = ops.Resize(device='gpu', resize_x=256, resize_y=256)
# self.hue = ops.Hue(device="gpu")
# self.bright = ops.Brightness(device="gpu")
# self.cntrst = ops.Contrast(device="gpu")
# self.rotate = ops.Rotate(device="gpu")
# self.jitter = ops.Jitter(device="gpu")
random_area = (crop_size[0] / 256.0) * (crop_size[1] / 256.0)
random_area = _pair(random_area)
random_aspect_ratio = _pair(1.0)
self.rrcrop = ops.RandomResizedCrop(
device='gpu', size=crop_size, random_area=random_area,
random_aspect_ratio=random_aspect_ratio)
self.cmnorm = ops.CropMirrorNormalize(
device='gpu', crop=list(crop_size), mean=mean, std=std)
self.coin = ops.CoinFlip(probability=0.5)
def define_graph(self):
jpegs, labels = self.loader()
images = self.decode(jpegs)
images = self.resize(images.gpu())
# images = self.hue(images, hue=ops.Uniform(range=(-3.0, 3.0))())
# images = self.bright(images,
# brightness=ops.Uniform(range=(0.9, 1.1))())
# images = self.cntrst(images,
# contrast=ops.Uniform(range=(0.9, 1.1))())
# images = self.rotate(images,
# angle=ops.Uniform(range=(-5.0, 5.0))())
# images = self.jitter(images)
images = self.rrcrop(images)
images = self.cmnorm(images, mirror=self.coin())
return images, labels
class DaliPipelineVal(pipeline.Pipeline):
def __init__(self, file_list, file_root, crop_size,
batch_size, n_threads, device_id,
random_shuffle=False, seed=-1, mean=None, std=None,
n_samples=None):
super(DaliPipelineVal, self).__init__(batch_size, n_threads,
device_id, seed=seed)
crop_size = _pair(crop_size)
if mean is None:
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
if std is None:
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
if n_samples is None:
initial_fill = 512
else:
initial_fill = min(512, n_samples)
self.loader = ops.FileReader(file_root=file_root, file_list=file_list,
random_shuffle=random_shuffle,
initial_fill=initial_fill)
self.decode = ops.HostDecoder()
self.resize = ops.Resize(device='gpu', resize_x=256, resize_y=256)
self.cmnorm = ops.CropMirrorNormalize(
device='gpu', crop=list(crop_size), mean=mean, std=std)
def define_graph(self):
jpegs, labels = self.loader()
images = self.decode(jpegs)
images = self.resize(images.gpu())
images = self.cmnorm(images)
return images, labels
class DaliConverter(object):
def __init__(self, mean, crop_size):
self.mean = mean
self.crop_size = crop_size
ch_mean = np.average(mean, axis=(1, 2))
perturbation = (mean - ch_mean.reshape(3, 1, 1)) / 255.0
perturbation = perturbation[:3, :crop_size, :crop_size].astype(
chainer.get_dtype())
self.perturbation = perturbation.reshape(1, 3, crop_size, crop_size)
def __call__(self, inputs, device=None):
"""Convert DALI arrays to Numpy/CuPy arrays"""
xp = chainer.backend.get_array_module(self.perturbation)
if xp is not cuda.cupy:
self.perturbation = cuda.to_gpu(self.perturbation, device)
outputs = []
for i in range(len(inputs)):
x = inputs[i].as_tensor()
if (isinstance(x, dali.backend_impl.TensorCPU)):
x = np.array(x)
if x.ndim == 2 and x.shape[1] == 1:
x = x.squeeze(axis=1)
if device is not None and device >= 0:
x = cuda.to_gpu(x, device)
elif (isinstance(x, dali.backend_impl.TensorGPU)):
x_cupy = cuda.cupy.empty(shape=x.shape(), dtype=x.dtype())
# Synchronization is necessary here to avoid data corruption
# because DALI and CuPy will use different CUDA streams.
cuda.cupy.cuda.runtime.deviceSynchronize()
# copy data from DALI array to CuPy array
x.copy_to_external(ctypes.c_void_p(x_cupy.data.ptr))
cuda.cupy.cuda.runtime.deviceSynchronize()
x = x_cupy.astype(chainer.get_dtype())
if self.perturbation is not None:
x = x - self.perturbation
if device is not None and device < 0:
x = cuda.to_cpu(x)
else:
raise ValueError('Unexpected object')
outputs.append(x)
return tuple(outputs)
def dali_converter(inputs, device=None):
"""Convert DALI arrays to Numpy/CuPy arrays"""
outputs = []
for i in range(len(inputs)):
x = inputs[i].as_tensor()
if (isinstance(x, dali.backend_impl.TensorCPU)):
x = np.array(x)
if x.ndim == 2 and x.shape[1] == 1:
x = x.squeeze(axis=1)
if device is not None and device >= 0:
x = cuda.to_gpu(x, device)
elif (isinstance(x, dali.backend_impl.TensorGPU)):
x_cupy = cuda.cupy.empty(shape=x.shape(), dtype=x.dtype())
# Synchronization is necessary here to avoid data corruption
# because DALI and CuPy will use different CUDA streams.
cuda.cupy.cuda.runtime.deviceSynchronize()
# copy data from DALI array to CuPy array
x.copy_to_external(ctypes.c_void_p(x_cupy.data.ptr))
cuda.cupy.cuda.runtime.deviceSynchronize()
x = x_cupy.astype(chainer.get_dtype())
if device is not None and device < 0:
x = cuda.to_cpu(x)
else:
raise ValueError('Unexpected object')
outputs.append(x)
return tuple(outputs)
| 7,470
| 38.951872
| 78
|
py
|
chainer
|
chainer-master/examples/imagenet/dataset_util.py
|
import random
import chainer
from chainer import dataset
from chainer import datasets
class PreprocessedDataset(dataset.DatasetMixin):
def __init__(self, path, root, mean, crop_size, random=True):
self.base = datasets.LabeledImageDataset(path, root)
self.mean = mean.astype(chainer.get_dtype())
self.crop_size = crop_size
self.random = random
def __len__(self):
return len(self.base)
def get_example(self, i):
# It reads the i-th image/label pair and return a preprocessed image.
# It applies following preprocesses:
# - Cropping (random or center rectangular)
# - Random flip
# - Scaling to [0, 1] value
crop_size = self.crop_size
image, label = self.base[i]
_, h, w = image.shape
if self.random:
# Randomly crop a region and flip the image
top = random.randint(0, h - crop_size - 1)
left = random.randint(0, w - crop_size - 1)
if random.randint(0, 1):
image = image[:, :, ::-1]
else:
# Crop the center
top = (h - crop_size) // 2
left = (w - crop_size) // 2
bottom = top + crop_size
right = left + crop_size
image = image[:, top:bottom, left:right]
image -= self.mean[:, top:bottom, left:right]
image *= (1.0 / 255.0) # Scale to [0, 1]
return image, label
| 1,459
| 30.06383
| 77
|
py
|
chainer
|
chainer-master/examples/imagenet/alex.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class Alex(chainer.Chain):
"""Single-GPU AlexNet without partition toward the channel axis."""
insize = 227
def __init__(self):
super(Alex, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(None, 96, 11, stride=4)
self.conv2 = L.Convolution2D(None, 256, 5, pad=2)
self.conv3 = L.Convolution2D(None, 384, 3, pad=1)
self.conv4 = L.Convolution2D(None, 384, 3, pad=1)
self.conv5 = L.Convolution2D(None, 256, 3, pad=1)
self.fc6 = L.Linear(None, 4096)
self.fc7 = L.Linear(None, 4096)
self.fc8 = L.Linear(None, 1000)
def forward(self, x, t):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
loss = F.softmax_cross_entropy(h, t)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
| 1,369
| 34.128205
| 74
|
py
|
chainer
|
chainer-master/examples/imagenet/train_imagenet_data_parallel.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from LSVRC2012 dataset
with multiple GPUs using data parallelism.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
You need to install chainer with NCCL to run this example.
Please see https://github.com/nvidia/nccl#build--run .
"""
import argparse
import sys
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
from chainer.training import updaters
import chainerx
from dataset_util import PreprocessedDataset
import alex
import googlenet
import googlenetbn
import nin
import resnet50
import resnext50
def main():
archs = {
'alex': alex.Alex,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
'resnext50': resnext50.ResNeXt50,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(),
default='nin', help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--devices', '-d', type=str, nargs='*',
default=['0', '1', '2', '3'],
help='Device specifiers. Either ChainerX device '
'specifiers or integers. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpus', '-g', dest='devices',
type=int, nargs='?', const=0,
help='GPU IDs (negative value indicates CPU)')
args = parser.parse_args()
devices = tuple([chainer.get_device(d) for d in args.devices])
if any(device.xp is chainerx for device in devices):
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from {}'.format(args.initmodel))
chainer.serializers.load_npz(args.initmodel, model)
# Load the datasets and mean file
mean = np.load(args.mean)
train = PreprocessedDataset(
args.train, args.root, mean, model.insize)
val = PreprocessedDataset(
args.val, args.root, mean, model.insize, False)
# These iterators load the images with subprocesses running in parallel to
# the training/validation.
train_iters = [
chainer.iterators.MultiprocessIterator(i,
args.batchsize,
n_processes=args.loaderjob)
for i in chainer.datasets.split_dataset_n_random(train, len(devices))]
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = updaters.MultiprocessParallelUpdater(train_iters, optimizer,
devices=devices)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
if args.test:
val_interval = 5, 'epoch'
log_interval = 1, 'epoch'
else:
val_interval = 100000, 'iteration'
log_interval = 1000, 'iteration'
trainer.extend(extensions.Evaluator(val_iter, model, device=devices[0]),
trigger=val_interval)
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=2))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 6,197
| 40.046358
| 79
|
py
|
chainer
|
chainer-master/examples/imagenet/.testdata/replacements/train_imagenet.py
|
#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
"""
import argparse
import numpy as np
import chainer
from chainer import dataset
from chainer import training
from chainer.training import extensions
import chainerx
import dali_util
from dataset_util import PreprocessedDataset
import alex
import googlenet
import googlenetbn
import nin
import resnet50
import resnext50
def main():
archs = {
'alex': alex.Alex,
'googlenet': googlenet.GoogLeNet,
'googlenetbn': googlenetbn.GoogLeNetBN,
'nin': nin.NIN,
'resnet50': resnet50.ResNet50,
'resnext50': resnext50.ResNeXt50,
'resnet50_nhwc': resnet50.ResNet50_Nhwc,
}
dtypes = {
'float16': np.float16,
'float32': np.float32,
'float64': np.float64,
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
help='Convnet architecture')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--dtype', choices=dtypes, help='Specify the dtype '
'used. If not supplied, the default dtype is used')
parser.add_argument('--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--initmodel',
help='Initialize the model from given file')
parser.add_argument('--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument('--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument('--resume', '-r', default='',
help='Initialize the trainer from given file')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--test', action='store_true')
parser.set_defaults(test=False)
parser.add_argument('--dali', action='store_true')
parser.set_defaults(dali=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
# Set the dtype if supplied.
if args.dtype is not None:
chainer.config.dtype = args.dtype
print('Device: {}'.format(device))
print('Dtype: {}'.format(chainer.config.dtype))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Initialize the model to train
model = archs[args.arch]()
if args.initmodel:
print('Load model from {}'.format(args.initmodel))
chainer.serializers.load_npz(args.initmodel, model)
model.to_device(device)
device.use()
# Load the mean file
mean = np.load(args.mean)
if args.dali:
if not dali_util._dali_available:
raise RuntimeError('DALI seems not available on your system.')
if device.xp is not chainer.backend.cuda.cupy:
raise RuntimeError('Using DALI requires GPU device. Please '
'specify it with --device option.')
n_threads = args.loaderjob
if n_threads is None or n_threads <= 0:
n_threads = 1
ch_mean = list(np.average(mean, axis=(1, 2)))
ch_std = [255.0, 255.0, 255.0]
# Setup DALI pipelines
train_pipe = dali_util.DaliPipelineTrain(
args.train, args.root, model.insize, args.batchsize,
n_threads, device.device.id, True, mean=ch_mean, std=ch_std)
val_pipe = dali_util.DaliPipelineVal(
args.val, args.root, model.insize, args.val_batchsize,
n_threads, device.device.id, False, mean=ch_mean, std=ch_std)
train_iter = chainer.iterators.DaliIterator(train_pipe)
val_iter = chainer.iterators.DaliIterator(val_pipe, repeat=False)
# converter = dali_converter
converter = dali_util.DaliConverter(mean=mean, crop_size=model.insize)
else:
# Load the dataset files
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
val = PreprocessedDataset(args.val, args.root, mean, model.insize,
False)
# These iterators load the images with subprocesses running in parallel
# to the training/validation.
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=args.loaderjob)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
converter = dataset.concat_examples
# Set up an optimizer
optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=converter, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)
val_interval = (100000, 'iteration')
log_interval = (1000, 'iteration')
if args.test:
val_interval = (1, 'iteration')
log_interval = (1, 'iteration')
# BEGIN ADDITIONAL TEST CODE
val_interval = (1, 'iteration')
log_interval = (1, 'iteration')
# END ADDITIONAL TEST CODE
trainer.extend(extensions.Evaluator(val_iter, model, converter=converter,
device=device), trigger=val_interval)
# TODO(sonots): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(extensions.snapshot_object(
model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
# Be careful to pass the interval directly to LogReport
# (it determines when to emit log rather than when to read observations)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'lr'
]), trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 7,962
| 40.473958
| 79
|
py
|
chainer
|
chainer-master/examples/sentiment/download.py
|
#!/usr/bin/env python
import os
import os.path
from six.moves.urllib import request
import zipfile
request.urlretrieve(
'https://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip',
'trainDevTestTrees_PTB.zip')
zf = zipfile.ZipFile('trainDevTestTrees_PTB.zip')
for name in zf.namelist():
(dirname, filename) = os.path.split(name)
if not filename == '':
zf.extract(name, '.')
| 403
| 24.25
| 67
|
py
|
chainer
|
chainer-master/examples/sentiment/thin_stack.py
|
import chainer
from chainer import backend
from chainer.utils import type_check
class ThinStackSet(chainer.Function):
"""Set values to a thin stack."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
s_type, i_type, v_type = in_types
type_check.expect(
s_type.dtype.kind == 'f',
i_type.dtype.kind == 'i',
s_type.dtype == v_type.dtype,
s_type.ndim == 3,
i_type.ndim == 1,
v_type.ndim == 2,
s_type.shape[0] >= i_type.shape[0],
i_type.shape[0] == v_type.shape[0],
s_type.shape[2] == v_type.shape[1],
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
stack, indices, values = inputs
stack[xp.arange(len(indices)), indices] = values
return stack,
def backward(self, inputs, grads):
xp = backend.get_array_module(*inputs)
_, indices, _ = inputs
g = grads[0]
gv = g[xp.arange(len(indices)), indices]
g[xp.arange(len(indices)), indices] = 0
return g, None, gv
def thin_stack_set(s, i, x):
return ThinStackSet()(s, i, x)
class ThinStackGet(chainer.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
s_type, i_type = in_types
type_check.expect(
s_type.dtype.kind == 'f',
i_type.dtype.kind == 'i',
s_type.ndim == 3,
i_type.ndim == 1,
s_type.shape[0] >= i_type.shape[0],
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
stack, indices = inputs
return stack[xp.arange(len(indices)), indices], stack
def backward(self, inputs, grads):
xp = backend.get_array_module(*inputs)
stack, indices = inputs
g, gs = grads
if gs is None:
gs = xp.zeros_like(stack)
if g is not None:
gs[xp.arange(len(indices)), indices] += g
return gs, None
def thin_stack_get(s, i):
return ThinStackGet()(s, i)
| 2,137
| 27.506667
| 61
|
py
|
chainer
|
chainer-master/examples/sentiment/test_thin_stack.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer import cuda
from chainer import testing
from chainer.testing import attr
import thin_stack
class TestThinStackGet(unittest.TestCase):
shape = (3, 4, 5)
dtype = numpy.float32
def setUp(self):
self.s = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.i = numpy.array([0, 1, 0], numpy.int32)
x_shape = (len(self.i), self.shape[-1])
self.gx = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.gt = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
def check_forward(self, s_data, i_data):
xp = backend.get_array_module(s_data)
s_old = s_data.copy()
s = chainer.Variable(s_data)
i = chainer.Variable(i_data)
x, t = thin_stack.thin_stack_get(s, i)
expect = s_old[xp.arange(len(i_data)), i_data]
testing.assert_allclose(x.array, expect)
# Thin stack reuses the same ndarray.
self.assertIs(s_data, t.array)
def test_forward_cpu(self):
self.check_forward(self.s, self.i)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.s), cuda.to_gpu(self.i))
def check_backward(self, s_data, i_data, gx_data, gt_data):
# We cannot use check_backward method as a thin stack reuses ndarray.
gt_old = gt_data.copy()
s = chainer.Variable(s_data)
i = chainer.Variable(i_data)
x, t = thin_stack.thin_stack_get(s, i)
x.grad = gx_data
t.grad = gt_data
t.backward()
for j, ind in enumerate(i_data):
for k in range(self.shape[1]):
if k == ind:
testing.assert_allclose(
s.grad[j, k], gt_old[j, k] + gx_data[j])
else:
testing.assert_allclose(
s.grad[j, k], gt_old[j, k])
self.assertIsNone(i.grad)
# Thin stack reueses the same gradient array.
self.assertIs(s.grad, gt_data)
def test_backward_cpu(self):
self.check_backward(self.s, self.i, self.gx, self.gt)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.s), cuda.to_gpu(self.i), cuda.to_gpu(self.gx),
cuda.to_gpu(self.gt))
class TestThinStackSet(unittest.TestCase):
shape = (3, 4, 5)
dtype = numpy.float32
def setUp(self):
self.s = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.i = numpy.array([0, 1, 0], numpy.int32)
x_shape = (len(self.i), self.shape[-1])
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
self.gt = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
def check_forward(self, s_data, i_data, x_data):
xp = backend.get_array_module(s_data)
s = chainer.Variable(s_data)
i = chainer.Variable(i_data)
x = chainer.Variable(x_data)
t = thin_stack.thin_stack_set(s, i, x)
testing.assert_allclose(
t.array[xp.arange(len(i_data)), i_data], x_data)
# Thin stack reuses the same ndarray.
self.assertIs(s_data, t.array)
def test_forward_cpu(self):
self.check_forward(self.s, self.i, self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.s), cuda.to_gpu(self.i), cuda.to_gpu(self.x))
def check_backward(self, s_data, i_data, x_data, gt_data):
# We cannot use check_backward method as a thin stack reuses ndarray.
gt_old = gt_data.copy()
s = chainer.Variable(s_data)
i = chainer.Variable(i_data)
x = chainer.Variable(x_data)
t = thin_stack.thin_stack_set(s, i, x)
t.grad = gt_data
t.backward()
for j, ind in enumerate(i_data):
testing.assert_allclose(x.grad[j], gt_old[j, ind])
for k in range(self.shape[1]):
if k == ind:
testing.assert_allclose(s.grad[j, k], 0)
else:
testing.assert_allclose(s.grad[j, k], gt_old[j, k])
self.assertIsNone(i.grad)
# Thin stack reueses the same gradient array.
self.assertIs(s.grad, gt_data)
def test_backward_cpu(self):
self.check_backward(self.s, self.i, self.x, self.gt)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.s), cuda.to_gpu(self.i), cuda.to_gpu(self.x),
cuda.to_gpu(self.gt))
testing.run_module(__name__, __file__)
| 4,647
| 30.835616
| 77
|
py
|
chainer
|
chainer-master/examples/sentiment/train_recursive_minibatch.py
|
import argparse
import warnings
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
import data
import thin_stack
def linearize_tree(vocab, root, xp=numpy):
# Left node indexes for all parent nodes
lefts = []
# Right node indexes for all parent nodes
rights = []
# Parent node indexes
dests = []
# All labels to predict for all parent nodes
labels = []
# All words of leaf nodes
words = []
# Leaf labels
leaf_labels = []
# Current leaf node index
leaf_index = [0]
def traverse_leaf(exp):
if len(exp) == 2:
label, leaf = exp
if leaf not in vocab:
vocab[leaf] = len(vocab)
words.append(vocab[leaf])
leaf_labels.append(int(label))
leaf_index[0] += 1
elif len(exp) == 3:
_, left, right = exp
traverse_leaf(left)
traverse_leaf(right)
traverse_leaf(root)
# Current internal node index
node_index = leaf_index
leaf_index = [0]
def traverse_node(exp):
if len(exp) == 2:
leaf_index[0] += 1
return leaf_index[0] - 1
elif len(exp) == 3:
label, left, right = exp
l = traverse_node(left)
r = traverse_node(right)
lefts.append(l)
rights.append(r)
dests.append(node_index[0])
labels.append(int(label))
node_index[0] += 1
return node_index[0] - 1
traverse_node(root)
assert len(lefts) == len(words) - 1
return {
'lefts': xp.array(lefts, xp.int32),
'rights': xp.array(rights, xp.int32),
'dests': xp.array(dests, xp.int32),
'words': xp.array(words, xp.int32),
'labels': xp.array(labels, xp.int32),
'leaf_labels': xp.array(leaf_labels, xp.int32),
}
@chainer.dataset.converter()
def convert(batch, device):
return tuple(
[device.send(d['lefts']) for d in batch] +
[device.send(d['rights']) for d in batch] +
[device.send(d['dests']) for d in batch] +
[device.send(d['labels']) for d in batch] +
[device.send(d['words']) for d in batch] +
[device.send(d['leaf_labels']) for d in batch]
)
class ThinStackRecursiveNet(chainer.Chain):
def __init__(self, n_vocab, n_units, n_label):
super(ThinStackRecursiveNet, self).__init__(
embed=L.EmbedID(n_vocab, n_units),
l=L.Linear(n_units * 2, n_units),
w=L.Linear(n_units, n_label))
self.n_units = n_units
def leaf(self, x):
return self.embed(x)
def node(self, left, right):
return F.tanh(self.l(F.concat((left, right))))
def label(self, v):
return self.w(v)
def forward(self, *inputs):
batch = len(inputs) // 6
lefts = inputs[0: batch]
rights = inputs[batch: batch * 2]
dests = inputs[batch * 2: batch * 3]
labels = inputs[batch * 3: batch * 4]
sequences = inputs[batch * 4: batch * 5]
leaf_labels = inputs[batch * 5: batch * 6]
inds = numpy.argsort([-len(l) for l in lefts])
# Sort all arrays in descending order and transpose them
lefts = F.transpose_sequence([lefts[i] for i in inds])
rights = F.transpose_sequence([rights[i] for i in inds])
dests = F.transpose_sequence([dests[i] for i in inds])
labels = F.transpose_sequence([labels[i] for i in inds])
sequences = F.transpose_sequence([sequences[i] for i in inds])
leaf_labels = F.transpose_sequence(
[leaf_labels[i] for i in inds])
batch = len(inds)
maxlen = len(sequences)
loss = 0
count = 0
correct = 0
dtype = chainer.get_dtype()
stack = self.xp.zeros((batch, maxlen * 2, self.n_units), dtype)
for i, (word, label) in enumerate(zip(sequences, leaf_labels)):
batch = word.shape[0]
es = self.leaf(word)
ds = self.xp.full((batch,), i, self.xp.int32)
y = self.label(es)
loss += F.softmax_cross_entropy(y, label, normalize=False) * batch
count += batch
predict = self.xp.argmax(y.array, axis=1)
correct += (predict == label.array).sum()
stack = thin_stack.thin_stack_set(stack, ds, es)
for left, right, dest, label in zip(lefts, rights, dests, labels):
l, stack = thin_stack.thin_stack_get(stack, left)
r, stack = thin_stack.thin_stack_get(stack, right)
o = self.node(l, r)
y = self.label(o)
batch = l.shape[0]
loss += F.softmax_cross_entropy(y, label, normalize=False) * batch
count += batch
predict = self.xp.argmax(y.array, axis=1)
correct += (predict == label.array).sum()
stack = thin_stack.thin_stack_set(stack, dest, o)
loss /= count
reporter.report({'loss': loss}, self)
reporter.report({'total': count}, self)
reporter.report({'correct': correct}, self)
return loss
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--epoch', '-e', default=400, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=30, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=25,
help='learning minibatch size')
parser.add_argument('--label', '-l', type=int, default=5,
help='number of labels')
parser.add_argument('--epocheval', '-p', type=int, default=5,
help='number of epochs per evaluation')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
vocab = {}
max_size = None
train_trees = data.read_corpus('trees/train.txt', max_size)
test_trees = data.read_corpus('trees/test.txt', max_size)
device = chainer.get_device(args.device)
device.use()
xp = device.xp
train_data = [linearize_tree(vocab, t, xp) for t in train_trees]
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
test_data = [linearize_tree(vocab, t, xp) for t in test_trees]
test_iter = chainer.iterators.SerialIterator(
test_data, args.batchsize, repeat=False, shuffle=False)
model = ThinStackRecursiveNet(len(vocab), args.unit, args.label)
model.to_device(device)
optimizer = chainer.optimizers.AdaGrad(0.1)
optimizer.setup(model)
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'))
trainer.extend(
extensions.Evaluator(
test_iter, model, converter=convert, device=device),
trigger=(args.epocheval, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.MicroAverage(
'main/correct', 'main/total', 'main/accuracy'))
trainer.extend(extensions.MicroAverage(
'validation/main/correct', 'validation/main/total',
'validation/main/accuracy'))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
if __name__ == '__main__':
main()
| 8,430
| 32.724
| 78
|
py
|
chainer
|
chainer-master/examples/sentiment/data.py
|
import codecs
import re
class SexpParser(object):
def __init__(self, line):
self.tokens = re.findall(r'\(|\)|[^\(\) ]+', line)
self.pos = 0
def parse(self):
assert self.pos < len(self.tokens)
token = self.tokens[self.pos]
assert token != ')'
self.pos += 1
if token == '(':
children = []
while True:
assert self.pos < len(self.tokens)
if self.tokens[self.pos] == ')':
self.pos += 1
break
else:
children.append(self.parse())
return children
else:
return token
def read_corpus(path, max_size):
with codecs.open(path, encoding='utf-8') as f:
trees = []
for line in f:
line = line.strip()
tree = SexpParser(line).parse()
trees.append(tree)
if max_size and len(trees) >= max_size:
break
return trees
| 1,018
| 23.261905
| 58
|
py
|
chainer
|
chainer-master/examples/sentiment/train_sentiment.py
|
#!/usr/bin/env python
"""Sample script of recursive neural networks for sentiment analysis.
This is Socher's simple recursive model, not RTNN:
R. Socher, C. Lin, A. Y. Ng, and C.D. Manning.
Parsing Natural Scenes and Natural Language with Recursive Neural Networks.
in ICML2011.
"""
import argparse
import collections
import warnings
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
import chainer.links as L
from chainer import optimizers
from chainer import reporter
from chainer.training import extensions
import data
def convert_tree(vocab, exp):
assert isinstance(exp, list) and (len(exp) == 2 or len(exp) == 3)
if len(exp) == 2:
label, leaf = exp
if leaf not in vocab:
vocab[leaf] = len(vocab)
return {'label': int(label), 'node': vocab[leaf]}
elif len(exp) == 3:
label, left, right = exp
node = (convert_tree(vocab, left), convert_tree(vocab, right))
return {'label': int(label), 'node': node}
class RecursiveNet(chainer.Chain):
def __init__(self, n_vocab, n_units, n_label):
super(RecursiveNet, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l = L.Linear(n_units * 2, n_units)
self.w = L.Linear(n_units, n_label)
def forward(self, x):
accum_loss = 0.0
result = collections.defaultdict(lambda: 0)
# calculate each tree in batch ``x`` because we cannot process as batch
for tree in x:
loss, _ = self.traverse(tree, evaluate=result)
accum_loss += loss
reporter.report({'loss': accum_loss}, self)
reporter.report({'total': result['total_node']}, self)
reporter.report({'correct': result['correct_node']}, self)
return accum_loss
def leaf(self, x):
return self.embed(x)
def node(self, left, right):
return F.tanh(self.l(F.concat((left, right))))
def label(self, v):
return self.w(v)
def traverse(self, node, evaluate, root=True):
if isinstance(node['node'], int):
# leaf node
word = self.xp.array([node['node']], np.int32)
loss = 0
v = self.leaf(word)
else:
# internal node
left_node, right_node = node['node']
left_loss, left = self.traverse(
left_node, evaluate=evaluate, root=False)
right_loss, right = self.traverse(
right_node, evaluate=evaluate, root=False)
v = self.node(left, right)
loss = left_loss + right_loss
y = self.label(v)
label = self.xp.array([node['label']], np.int32)
t = chainer.Variable(label, requires_grad=False)
loss += F.softmax_cross_entropy(y, t)
predict = cuda.to_cpu(y.array.argmax(1))
if predict[0] == node['label']:
evaluate['correct_node'] += 1
evaluate['total_node'] += 1
if root:
if predict[0] == node['label']:
evaluate['correct_root'] += 1
evaluate['total_root'] += 1
return loss, v
def evaluate(model, test_trees):
result = collections.defaultdict(lambda: 0)
with chainer.using_config('train', False), chainer.no_backprop_mode():
for tree in test_trees:
model.traverse(tree, evaluate=result)
acc_node = 100.0 * result['correct_node'] / result['total_node']
acc_root = 100.0 * result['correct_root'] / result['total_root']
print(' Node accuracy: {0:.2f} %% ({1:,d}/{2:,d})'.format(
acc_node, result['correct_node'], result['total_node']))
print(' Root accuracy: {0:.2f} %% ({1:,d}/{2:,d})'.format(
acc_root, result['correct_root'], result['total_root']))
@chainer.dataset.converter()
def convert(batch, _):
return batch
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result', type=str,
help='Directory to ouput the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--epoch', '-e', default=400, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=30, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=25,
help='learning minibatch size')
parser.add_argument('--label', '-l', type=int, default=5,
help='number of labels')
parser.add_argument('--epocheval', '-p', type=int, default=5,
help='number of epochs per evaluation')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == np.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
n_epoch = args.epoch # number of epochs
n_units = args.unit # number of units per layer
batchsize = args.batchsize # minibatch size
n_label = args.label # number of labels
epoch_per_eval = args.epocheval # number of epochs per evaluation
if args.test:
max_size = 10
else:
max_size = None
device = chainer.get_device(args.device)
device.use()
vocab = {}
train_data = [convert_tree(vocab, tree)
for tree in data.read_corpus('trees/train.txt', max_size)]
train_iter = chainer.iterators.SerialIterator(train_data, batchsize)
validation_data = [convert_tree(vocab, tree)
for tree in data.read_corpus('trees/dev.txt', max_size)]
validation_iter = chainer.iterators.SerialIterator(
validation_data, batchsize, repeat=False, shuffle=False)
test_data = [convert_tree(vocab, tree)
for tree in data.read_corpus('trees/test.txt', max_size)]
model = RecursiveNet(len(vocab), n_units, n_label)
model.to_device(device)
# Setup optimizer
optimizer = optimizers.AdaGrad(lr=0.1)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(0.0001))
# Setup updater
updater = chainer.training.StandardUpdater(
train_iter, optimizer, converter=convert, device=device)
# Setup trainer and run
trainer = chainer.training.Trainer(updater, (n_epoch, 'epoch'), args.out)
trainer.extend(
extensions.Evaluator(
validation_iter, model, converter=convert, device=device),
trigger=(epoch_per_eval, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.MicroAverage(
'main/correct', 'main/total', 'main/accuracy'))
trainer.extend(extensions.MicroAverage(
'validation/main/correct', 'validation/main/total',
'validation/main/accuracy'))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
trigger=(epoch_per_eval, 'epoch'))
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
print('Test evaluation')
evaluate(model, test_data)
if __name__ == '__main__':
main()
| 8,186
| 34.288793
| 79
|
py
|
chainer
|
chainer-master/examples/pos/postagging.py
|
import argparse
import collections
import warnings
import nltk
import numpy
import six
import chainer
from chainer import datasets
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
class CRF(chainer.Chain):
def __init__(self, n_vocab, n_pos):
super(CRF, self).__init__()
with self.init_scope():
self.feature = L.EmbedID(n_vocab, n_pos)
self.crf = L.CRF1d(n_pos)
def forward(self, xs, ys):
# h[i] is feature vector for each batch of words.
hs = [self.feature(x) for x in xs]
loss = self.crf(hs, ys, transpose=True)
reporter.report({'loss': loss}, self)
# To predict labels, call argmax method.
_, predict = self.crf.argmax(hs, transpose=True)
correct = 0
total = 0
for y, p in six.moves.zip(ys, predict):
# NOTE y is ndarray because
# it does not pass to transpose_sequence
correct += self.xp.sum(y == p)
total += len(y)
reporter.report({'correct': correct}, self)
reporter.report({'total': total}, self)
return loss
def argmax(self, xs):
hs = [self.feature(x) for x in xs]
return self.crf.argmax(hs, transpose=True)
@chainer.dataset.converter()
def convert(batch, device):
sentences = [
chainer.dataset.to_device(device, sentence) for sentence, _ in batch]
poses = [chainer.dataset.to_device(device, pos) for _, pos in batch]
return {'xs': sentences, 'ys': poses}
def main():
parser = argparse.ArgumentParser(
description='Chainer example: POS-tagging')
parser.add_argument('--batchsize', '-b', type=int, default=30,
help='Number of images in each mini batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
vocab = collections.defaultdict(lambda: len(vocab))
pos_vocab = collections.defaultdict(lambda: len(pos_vocab))
# Convert word sequences and pos sequences to integer sequences.
nltk.download('brown')
data = []
for sentence in nltk.corpus.brown.tagged_sents():
xs = numpy.array([vocab[lex] for lex, _ in sentence], numpy.int32)
ys = numpy.array([pos_vocab[pos] for _, pos in sentence], numpy.int32)
data.append((xs, ys))
print('# of sentences: {}'.format(len(data)))
print('# of words: {}'.format(len(vocab)))
print('# of pos: {}'.format(len(pos_vocab)))
device = chainer.get_device(args.device)
device.use()
model = CRF(len(vocab), len(pos_vocab))
model.to_device(device)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
test_data, train_data = datasets.split_dataset_random(
data, len(data) // 10, seed=0)
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test_data, args.batchsize,
repeat=False, shuffle=False)
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
evaluator = extensions.Evaluator(
test_iter, model, device=device, converter=convert)
# Only validate in each 1000 iteration
trainer.extend(evaluator, trigger=(1000, 'iteration'))
trainer.extend(extensions.LogReport(trigger=(100, 'iteration')),
trigger=(100, 'iteration'))
trainer.extend(
extensions.MicroAverage(
'main/correct', 'main/total', 'main/accuracy'))
trainer.extend(
extensions.MicroAverage(
'validation/main/correct', 'validation/main/total',
'validation/main/accuracy'))
trainer.extend(
extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']),
trigger=(100, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=10))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 5,391
| 34.946667
| 78
|
py
|
chainer
|
chainer-master/examples/tests/test_mnist.py
|
import os
import test_utils
EXAMPLES_ROOT = test_utils.EXAMPLES_ROOT
def test_1():
root_dir = os.path.join(EXAMPLES_ROOT, 'mnist')
output_evaluator = test_utils.TemplateOutputEvaluator(
b'''\
Device: @numpy
# unit: 10
# Minibatch-size: 100
# epoch: 1
epoch main/loss validation/main/loss main/accuracy validation/main/accuracy elapsed_time
0 {b0 } {d0 } {e0 }
1 {a1 } {b1 } {c1 } {d1 } {e1 }
''', # NOQA
b0=(float, lambda x: 0.0 < x),
d0=(float, lambda x: 0.00 <= x <= 1.00),
e0=(float, lambda x: 0. < x < 100.),
a1=(float, lambda x: 0.6 < x < 1.5),
b1=(float, lambda x: 0.3 < x < 0.6),
c1=(float, lambda x: 0.62 < x < 0.82),
d1=(float, lambda x: 0.83 < x < 0.98),
e1=(float, lambda x: 0. < x < 100.),
)
with test_utils.ExampleRunner(root_dir) as r:
r.run(
'train_mnist.py',
[
'--epoch', '1',
'--unit', '10',
],
output_evaluator=output_evaluator)
| 1,192
| 27.404762
| 99
|
py
|
chainer
|
chainer-master/examples/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/examples/tests/test_imagenet.py
|
import os
import test_utils
EXAMPLES_ROOT = test_utils.EXAMPLES_ROOT
def test_1():
root_dir = os.path.join(EXAMPLES_ROOT, 'imagenet')
image_root_dir = os.path.join(root_dir, '.testdata/images')
list_file = os.path.join(root_dir, '.testdata/data.txt')
with test_utils.ExampleRunner(root_dir) as r:
r.run(
'compute_mean.py',
[
'-R', image_root_dir,
list_file,
])
r.run(
'train_imagenet.py',
[
'-a', 'nin', '-R', image_root_dir,
'-B', '1', '-b', '1', '-E', '1',
list_file,
list_file,
])
| 692
| 21.354839
| 63
|
py
|
chainer
|
chainer-master/examples/vae/train_vae.py
|
#!/usr/bin/env python
"""Chainer example: train a VAE on MNIST
"""
import argparse
import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import chainer
from chainer import training
from chainer.training import extensions
import chainerx
import net
import matplotlib
matplotlib.use('Agg')
def main():
parser = argparse.ArgumentParser(description='Chainer example: VAE')
parser.add_argument('--initmodel', '-m', type=str,
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', type=str,
help='Resume the optimization from snapshot')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='results',
help='Directory to output the result')
parser.add_argument('--epoch', '-e', default=100, type=int,
help='number of epochs to learn')
parser.add_argument('--dim-z', '-z', default=20, type=int,
help='dimension of encoded vector')
parser.add_argument('--dim-h', default=500, type=int,
help='dimension of hidden layer')
parser.add_argument('--beta', default=1.0, type=float,
help='Regularization coefficient for '
'the second term of ELBO bound')
parser.add_argument('--k', '-k', default=1, type=int,
help='Number of Monte Carlo samples used in '
'encoded vector')
parser.add_argument('--binary', action='store_true',
help='Use binarized MNIST')
parser.add_argument('--batch-size', '-b', type=int, default=100,
help='learning minibatch size')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == np.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
device.use()
print('Device: {}'.format(device))
print('# dim z: {}'.format(args.dim_z))
print('# Minibatch-size: {}'.format(args.batch_size))
print('# epoch: {}'.format(args.epoch))
print('')
# Prepare VAE model, defined in net.py
encoder = net.make_encoder(784, args.dim_z, args.dim_h)
decoder = net.make_decoder(784, args.dim_z, args.dim_h,
binary_check=args.binary)
prior = net.make_prior(args.dim_z)
avg_elbo_loss = net.AvgELBOLoss(encoder, decoder, prior,
beta=args.beta, k=args.k)
avg_elbo_loss.to_device(device)
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(avg_elbo_loss)
# If initial parameters are given, initialize the model with them.
if args.initmodel is not None:
chainer.serializers.load_npz(args.initmodel, avg_elbo_loss)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(withlabel=False)
if args.binary:
# Binarize dataset
train = (train >= 0.5).astype(np.float32)
test = (test >= 0.5).astype(np.float32)
if args.test:
train, _ = chainer.datasets.split_dataset(train, 100)
test, _ = chainer.datasets.split_dataset(test, 100)
train_iter = chainer.iterators.SerialIterator(train, args.batch_size)
test_iter = chainer.iterators.SerialIterator(test, args.batch_size,
repeat=False, shuffle=False)
# Set up an updater. StandardUpdater can explicitly specify a loss function
# used in the training with 'loss_func' option
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device, loss_func=avg_elbo_loss)
# Set up the trainer and extensions.
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(
test_iter, avg_elbo_loss, device=device))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/reconstr', 'main/kl_penalty', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
# If snapshot file is given, resume the training.
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
# Save images for demonstration
save_images(device, encoder, decoder, train, test, prior, args.out)
# Saves 3x3 tiled image
def save3x3(x, filename):
numpy_device = chainer.get_device('@numpy')
fig, ax = plt.subplots(3, 3, figsize=(9, 9), dpi=100)
for ai, xi in zip(ax.flatten(), x):
im = xi.reshape(28, 28)
im = numpy_device.send(im)
ai.imshow(im)
fig.savefig(filename)
# Saves reconstruction images using:
# - training image samples
# - test image samples
# - randomly sampled values of z
def save_images(device, encoder, decoder, train, test, prior, out_dir):
# Training samples
train_ind = [1, 3, 5, 10, 2, 0, 13, 15, 17]
x = device.send(np.asarray(train[train_ind]))
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
z = encoder(x).mean
y = decoder(z, inference=True).mean
y = y.array
save3x3(x, os.path.join(out_dir, 'train'))
save3x3(y, os.path.join(out_dir, 'train_reconstructed'))
# Test samples
test_ind = [3, 2, 1, 18, 4, 8, 11, 17, 61]
x = device.send(np.asarray(test[test_ind]))
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
z = encoder(x).mean
y = decoder(z, inference=True).mean
y = y.array
save3x3(x, os.path.join(out_dir, 'test'))
save3x3(y, os.path.join(out_dir, 'test_reconstructed'))
# Draw images from 9 randomly sampled values of z
z = prior().sample(9)
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
y = decoder(z, inference=True).mean
y = y.array
save3x3(y, os.path.join(out_dir, 'sampled'))
if __name__ == '__main__':
main()
| 7,083
| 37.291892
| 79
|
py
|
chainer
|
chainer-master/examples/vae/net.py
|
import numpy as np
import chainer
import chainer.distributions as D
import chainer.functions as F
import chainer.links as L
from chainer import reporter
class AvgELBOLoss(chainer.Chain):
"""Loss function of VAE.
The loss value is equal to ELBO (Evidence Lower Bound)
multiplied by -1.
Args:
encoder (chainer.Chain): A neural network which outputs variational
posterior distribution q(z|x) of a latent variable z given
an observed variable x.
decoder (chainer.Chain): A neural network which outputs conditional
distribution p(x|z) of the observed variable x given
the latent variable z.
prior (chainer.Chain): A prior distribution over the latent variable z.
beta (float): Usually this is 1.0. Can be changed to control the
second term of ELBO bound, which works as regularization.
k (int): Number of Monte Carlo samples used in encoded vector.
"""
def __init__(self, encoder, decoder, prior, beta=1.0, k=1):
super(AvgELBOLoss, self).__init__()
self.beta = beta
self.k = k
with self.init_scope():
self.encoder = encoder
self.decoder = decoder
self.prior = prior
def __call__(self, x):
q_z = self.encoder(x)
z = q_z.sample(self.k)
p_x = self.decoder(z)
p_z = self.prior()
reconstr = F.mean(p_x.log_prob(
F.broadcast_to(x[None, :], (self.k,) + x.shape)))
kl_penalty = F.mean(chainer.kl_divergence(q_z, p_z))
loss = - (reconstr - self.beta * kl_penalty)
reporter.report({'loss': loss}, self)
reporter.report({'reconstr': reconstr}, self)
reporter.report({'kl_penalty': kl_penalty}, self)
return loss
class Encoder(chainer.Chain):
def __init__(self, n_in, n_latent, n_h):
super(Encoder, self).__init__()
with self.init_scope():
self.linear = L.Linear(n_in, n_h)
self.mu = L.Linear(n_h, n_latent)
self.ln_sigma = L.Linear(n_h, n_latent)
def forward(self, x):
h = F.tanh(self.linear(x))
mu = self.mu(h)
ln_sigma = self.ln_sigma(h) # log(sigma)
return D.Independent(D.Normal(loc=mu, log_scale=ln_sigma))
class Decoder(chainer.Chain):
def __init__(self, n_in, n_latent, n_h, binary_check=False):
super(Decoder, self).__init__()
self.binary_check = binary_check
with self.init_scope():
self.linear = L.Linear(n_latent, n_h)
self.output = L.Linear(n_h, n_in)
def forward(self, z, inference=False):
n_batch_axes = 1 if inference else 2
h = F.tanh(self.linear(z, n_batch_axes=n_batch_axes))
h = self.output(h, n_batch_axes=n_batch_axes)
return D.Independent(
D.Bernoulli(logit=h, binary_check=self.binary_check),
reinterpreted_batch_ndims=1)
class Prior(chainer.Link):
def __init__(self, n_latent):
super(Prior, self).__init__()
dtype = chainer.get_dtype()
self.loc = np.zeros(n_latent, dtype)
self.scale = np.ones(n_latent, dtype)
self.register_persistent('loc')
self.register_persistent('scale')
def forward(self):
return D.Independent(
D.Normal(self.loc, scale=self.scale), reinterpreted_batch_ndims=1)
def make_encoder(n_in, n_latent, n_h):
return Encoder(n_in, n_latent, n_h)
def make_decoder(n_in, n_latent, n_h, binary_check=False):
return Decoder(n_in, n_latent, n_h, binary_check=binary_check)
def make_prior(n_latent):
return Prior(n_latent)
| 3,656
| 30.8
| 79
|
py
|
chainer
|
chainer-master/examples/wavenet/generate.py
|
import argparse
import chainer
import chainerx
import librosa
import numpy
import tqdm
from net import UpsampleNet
from net import WaveNet
from utils import MuLaw
from utils import Preprocess
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', required=True, help='input file')
parser.add_argument('--output', '-o', default='result.wav', help='output file')
parser.add_argument('--model', '-m', required=True,
help='snapshot of trained model')
parser.add_argument('--n_loop', type=int, default=4,
help='Number of residual blocks')
parser.add_argument('--n_layer', type=int, default=10,
help='Number of layers in each residual block')
parser.add_argument('--a_channels', type=int, default=256,
help='Number of channels in the output layers')
parser.add_argument('--r_channels', type=int, default=64,
help='Number of channels in residual layers and embedding')
parser.add_argument('--s_channels', type=int, default=256,
help='Number of channels in the skip layers')
parser.add_argument('--use_embed_tanh', type=bool, default=True,
help='Use tanh after an initial 2x1 convolution')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
if device.xp is chainer.backends.cuda.cupy:
chainer.global_config.autotune = True
# Preprocess
_, condition, _ = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=None, quantize=args.a_channels)(args.input)
x = numpy.zeros([1, args.a_channels, 1], dtype=condition.dtype)
condition = numpy.expand_dims(condition, axis=0)
# Define networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
args.n_loop, args.n_layer,
args.a_channels, args.r_channels, args.s_channels,
args.use_embed_tanh)
# Load trained parameters
chainer.serializers.load_npz(
args.model, encoder, 'updater/model:main/predictor/encoder/')
chainer.serializers.load_npz(
args.model, decoder, 'updater/model:main/predictor/decoder/')
# Non-autoregressive generate
x = device.send(x)
condition = device.send(condition)
encoder.to_device(device)
decoder.to_device(device)
x = chainer.Variable(x)
condition = chainer.Variable(condition)
conditions = encoder(condition)
decoder.initialize(1)
output = decoder.xp.zeros(conditions.shape[3])
# A workaround for ChainerX, which does not have random.choice.
# TODO(niboshi): Implement it in ChainerX
def random_choice(device, a, size, p):
if device.xp is chainerx:
return device.send(
numpy.random.choice(a, size=size, p=chainerx.to_numpy(p)))
return device.xp.random.choice(a, size=size, p=p)
# Autoregressive generate
for i in tqdm.tqdm(range(len(output))):
with chainer.no_backprop_mode():
out = decoder.generate(x, conditions[:, :, :, i:i + 1]).array
value = random_choice(
device,
args.a_channels, size=1,
p=chainer.functions.softmax(out).array[0, :, 0])[0]
zeros = decoder.xp.zeros_like(x.array)
zeros[:, value, :] = 1
x = chainer.Variable(zeros)
output[i] = value
# Save
output = chainer.get_device('@numpy').send(output)
wave = MuLaw(args.a_channels).itransform(output)
librosa.output.write_wav(args.output, wave, 16000)
| 3,910
| 35.896226
| 79
|
py
|
chainer
|
chainer-master/examples/wavenet/modules.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class ResidualBlock(chainer.Chain):
def __init__(self, filter_size, dilation,
residual_channels, dilated_channels, skip_channels):
super(ResidualBlock, self).__init__()
with self.init_scope():
self.conv = L.Convolution1D(
residual_channels, dilated_channels,
ksize=filter_size,
pad=dilation * (filter_size - 1), dilate=dilation)
self.res = L.Convolution1D(
dilated_channels // 2, residual_channels, 1)
self.skip = L.Convolution1D(
dilated_channels // 2, skip_channels, 1)
self.filter_size = filter_size
self.dilation = dilation
self.residual_channels = residual_channels
def forward(self, x, condition):
length = x.shape[2]
h = self.conv(x)
h = h[:, :, :length] # crop
h += condition
tanh_z, sig_z = F.split_axis(h, 2, axis=1)
z = F.tanh(tanh_z) * F.sigmoid(sig_z)
if x.shape[2] == z.shape[2]:
residual = self.res(z) + x
else:
residual = self.res(z) + x[:, :, -1:] # crop
skip_conenection = self.skip(z)
return residual, skip_conenection
def initialize(self, n):
self.queue = chainer.Variable(self.xp.zeros((
n, self.residual_channels,
self.dilation * (self.filter_size - 1) + 1),
dtype=self.conv.W.dtype))
self.conv.pad = 0
def pop(self, condition):
return self(self.queue, condition)
def push(self, x):
self.queue = F.concat((self.queue[:, :, 1:], x), axis=2)
class ResidualNet(chainer.ChainList):
def __init__(self, n_loop, n_layer, filter_size,
residual_channels, dilated_channels, skip_channels):
super(ResidualNet, self).__init__()
dilations = [2 ** i for i in range(n_layer)] * n_loop
for dilation in dilations:
self.add_link(ResidualBlock(
filter_size, dilation,
residual_channels, dilated_channels, skip_channels))
def forward(self, x, conditions):
for i, (func, cond) in enumerate(zip(self.children(), conditions)):
x, skip = func(x, cond)
if i == 0:
skip_connections = skip
else:
skip_connections += skip
return skip_connections
def initialize(self, n):
for block in self.children():
block.initialize(n)
def generate(self, x, conditions):
for i, (func, cond) in enumerate(zip(self.children(), conditions)):
func.push(x)
x, skip = func.pop(cond)
if i == 0:
skip_connections = skip
else:
skip_connections += skip
return skip_connections
| 2,897
| 33.5
| 75
|
py
|
chainer
|
chainer-master/examples/wavenet/utils.py
|
import random
import librosa
import numpy
import chainer
class MuLaw(object):
def __init__(self, mu=256, int_type=numpy.int32, float_type=numpy.float32):
self.mu = mu
self.int_type = int_type
self.float_type = float_type
def transform(self, x):
x = x.astype(self.float_type)
y = numpy.sign(x) * numpy.log(1 + self.mu * numpy.abs(x)) / \
numpy.log(1 + self.mu)
y = numpy.digitize(y, 2 * numpy.arange(self.mu) / self.mu - 1) - 1
return y.astype(self.int_type)
def itransform(self, y):
y = y.astype(self.float_type)
y = 2 * y / self.mu - 1
x = numpy.sign(y) / self.mu * ((1 + self.mu) ** numpy.abs(y) - 1)
return x.astype(self.float_type)
class Preprocess(object):
def __init__(self, sr, n_fft, hop_length, n_mels, top_db,
length, quantize, dtype=None):
self.sr = sr
self.n_fft = n_fft
self.hop_length = hop_length
self.n_mels = n_mels
self.top_db = top_db
self.mu_law = MuLaw(quantize)
self.quantize = quantize
if length is None:
self.length = None
else:
self.length = length + 1
self.dtype = chainer.get_dtype(dtype)
def __call__(self, path):
# load data with trimming and normalizing
raw, _ = librosa.load(path, self.sr, res_type='kaiser_fast')
raw, _ = librosa.effects.trim(raw, self.top_db)
raw /= numpy.abs(raw).max()
raw = raw.astype(numpy.float32)
# mu-law transform
quantized = self.mu_law.transform(raw)
# padding/triming
if self.length is not None:
if len(raw) <= self.length:
# padding
pad = self.length - len(raw)
raw = numpy.concatenate(
(raw, numpy.zeros(pad, dtype=numpy.float32)))
quantized = numpy.concatenate(
(quantized, self.quantize // 2 * numpy.ones(pad)))
quantized = quantized.astype(numpy.int32)
else:
# triming
start = random.randint(0, len(raw) - self.length - 1)
raw = raw[start:start + self.length]
quantized = quantized[start:start + self.length]
# calculate mel-spectrogram
spectrogram = librosa.feature.melspectrogram(
raw, self.sr, n_fft=self.n_fft, hop_length=self.hop_length,
n_mels=self.n_mels)
spectrogram = librosa.power_to_db(
spectrogram, ref=numpy.max)
# normalize mel spectrogram into [-1, 1]
spectrogram += 40
spectrogram /= 40
if self.length is not None:
spectrogram = spectrogram[:, :self.length // self.hop_length]
spectrogram = spectrogram.astype(self.dtype)
# expand dimensions
one_hot = numpy.identity(
self.quantize, dtype=self.dtype)[quantized].T
return one_hot[:, :-1], spectrogram, quantized[1:]
| 3,031
| 32.688889
| 79
|
py
|
chainer
|
chainer-master/examples/wavenet/net.py
|
import chainer
import chainer.functions as F
import chainer.links as L
from modules import ResidualNet
class UpsampleNet(chainer.ChainList):
def __init__(self, out_layers, r_channels,
channels=[128, 128], upscale_factors=[16, 16]):
super(UpsampleNet, self).__init__()
for channel, factor in zip(channels, upscale_factors):
self.add_link(L.Deconvolution1D(
None, channel, factor, stride=factor, pad=0))
for i in range(out_layers):
self.add_link(L.Convolution1D(None, 2 * r_channels, 1))
self.n_deconvolutions = len(channels)
def forward(self, x):
conditions = []
for i, link in enumerate(self.children()):
if i < self.n_deconvolutions:
x = F.relu(link(x))
else:
conditions.append(link(x))
return F.stack(conditions)
class WaveNet(chainer.Chain):
def __init__(self, n_loop, n_layer, a_channels, r_channels, s_channels,
use_embed_tanh):
super(WaveNet, self).__init__()
with self.init_scope():
self.embed = L.Convolution1D(
a_channels, r_channels, 2, pad=1, nobias=True)
self.resnet = ResidualNet(
n_loop, n_layer, 2, r_channels, 2 * r_channels, s_channels)
self.proj1 = L.Convolution1D(
s_channels, s_channels, 1, nobias=True)
self.proj2 = L.Convolution1D(
s_channels, a_channels, 1, nobias=True)
self.a_channels = a_channels
self.s_channels = s_channels
self.use_embed_tanh = use_embed_tanh
def forward(self, x, condition, generating=False):
length = x.shape[2]
x = self.embed(x)
x = x[:, :, :length] # crop
if self.use_embed_tanh:
x = F.tanh(x)
z = F.relu(self.resnet(x, condition))
z = F.relu(self.proj1(z))
y = self.proj2(z)
return y
def initialize(self, n):
self.resnet.initialize(n)
self.embed.pad = 0
self.embed_queue = chainer.Variable(self.xp.zeros(
(n, self.a_channels, 2), dtype=self.embed.W.dtype))
self.proj1_queue = chainer.Variable(self.xp.zeros(
(n, self.s_channels, 1), dtype=self.proj1.W.dtype))
self.proj2_queue3 = chainer.Variable(self.xp.zeros(
(n, self.s_channels, 1), dtype=self.proj2.W.dtype))
def generate(self, x, condition):
self.embed_queue = F.concat((self.embed_queue[:, :, 1:], x), axis=2)
x = self.embed(self.embed_queue)
if self.use_embed_tanh:
x = F.tanh(x)
x = F.relu(self.resnet.generate(x, condition))
self.proj1_queue = F.concat((self.proj1_queue[:, :, 1:], x), axis=2)
x = F.relu(self.proj1(self.proj1_queue))
self.proj2_queue3 = F.concat((self.proj2_queue3[:, :, 1:], x), axis=2)
x = self.proj2(self.proj2_queue3)
return x
class EncoderDecoderModel(chainer.Chain):
def __init__(self, encoder, decoder):
super(EncoderDecoderModel, self).__init__()
with self.init_scope():
self.encoder = encoder
self.decoder = decoder
def forward(self, x, condition):
encoded_condition = self.encoder(condition)
y = self.decoder(x, encoded_condition)
return y
| 3,367
| 34.083333
| 78
|
py
|
chainer
|
chainer-master/examples/wavenet/train.py
|
import argparse
import os
import pathlib
import warnings
import numpy
import chainer
from chainer.training import extensions
import chainerx
from net import EncoderDecoderModel
from net import UpsampleNet
from net import WaveNet
from utils import Preprocess
import matplotlib
matplotlib.use('Agg')
parser = argparse.ArgumentParser(description='Chainer example: WaveNet')
parser.add_argument('--batchsize', '-b', type=int, default=4,
help='Numer of audio clips in each mini-batch')
parser.add_argument('--length', '-l', type=int, default=7680,
help='Number of samples in each audio clip')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='./VCTK-Corpus',
help='Directory of dataset')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--n_loop', type=int, default=4,
help='Number of residual blocks')
parser.add_argument('--n_layer', type=int, default=10,
help='Number of layers in each residual block')
parser.add_argument('--a_channels', type=int, default=256,
help='Number of channels in the output layers')
parser.add_argument('--r_channels', type=int, default=64,
help='Number of channels in residual layers and embedding')
parser.add_argument('--s_channels', type=int, default=256,
help='Number of channels in the skip layers')
parser.add_argument('--use_embed_tanh', type=bool, default=True,
help='Use tanh after an initial 2x1 convolution')
parser.add_argument('--seed', type=int, default=0,
help='Random seed to split dataset into train and test')
parser.add_argument('--snapshot_interval', type=int, default=10000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
parser.add_argument('--process', type=int, default=1,
help='Number of parallel processes')
parser.add_argument('--prefetch', type=int, default=8,
help='Number of prefetch samples')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('GPU: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
if device.xp is chainer.backends.cuda.cupy:
chainer.global_config.autotune = True
# Datasets
if not os.path.isdir(args.dataset):
raise RuntimeError('Dataset directory not found: {}'.format(args.dataset))
paths = sorted([
str(path) for path in pathlib.Path(args.dataset).glob('wav48/*/*.wav')])
preprocess = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=args.length, quantize=args.a_channels)
dataset = chainer.datasets.TransformDataset(paths, preprocess)
train, valid = chainer.datasets.split_dataset_random(
dataset, int(len(dataset) * 0.9), args.seed)
# Networks
encoder = UpsampleNet(args.n_loop * args.n_layer, args.r_channels)
decoder = WaveNet(
args.n_loop, args.n_layer,
args.a_channels, args.r_channels, args.s_channels,
args.use_embed_tanh)
model = chainer.links.Classifier(EncoderDecoderModel(encoder, decoder))
# Optimizer
optimizer = chainer.optimizers.Adam(1e-4)
optimizer.setup(model)
# Iterators
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize,
n_processes=args.process, n_prefetch=args.prefetch)
valid_iter = chainer.iterators.MultiprocessIterator(
valid, args.batchsize, repeat=False, shuffle=False,
n_processes=args.process, n_prefetch=args.prefetch)
# Updater and Trainer
updater = chainer.training.StandardUpdater(
train_iter, optimizer, device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
# Extensions
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'main/accuracy',
'validation/main/loss', 'validation/main/accuracy']),
trigger=display_interval)
trainer.extend(extensions.PlotReport(
['main/loss', 'validation/main/loss'],
'iteration', file_name='loss.png', trigger=display_interval))
trainer.extend(extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'iteration', file_name='accuracy.png', trigger=display_interval))
trainer.extend(extensions.ProgressBar(update_interval=10))
# Resume
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# Run
trainer.run()
| 5,955
| 39.794521
| 79
|
py
|
chainer
|
chainer-master/examples/mnist/inference.py
|
#!/usr/bin/env python
import argparse
import chainer
from train_mnist import MLP
from train_mnist_model_parallel import ParallelMLP
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--snapshot', '-s',
default='result/snapshot_iter_12000',
help='The path to a saved snapshot (NPZ)')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('')
device.use()
# Create a same model object as what you used for training
if 'result_model_parallel' in args.snapshot:
model = ParallelMLP(args.unit, 10, args.gpu, args.gpu)
else:
model = MLP(args.unit, 10)
# Load saved parameters from a NPZ file of the Trainer object
try:
chainer.serializers.load_npz(
args.snapshot, model, path='updater/model:main/predictor/')
except Exception:
chainer.serializers.load_npz(
args.snapshot, model, path='predictor/')
model.to_device(device)
# Prepare data
train, test = chainer.datasets.get_mnist()
x, answer = test[0]
x = device.send(x)
with chainer.using_config('train', False):
prediction = model(x[None, ...])[0].array.argmax()
print('Prediction:', prediction)
print('Answer:', answer)
if __name__ == '__main__':
main()
| 2,171
| 32.9375
| 76
|
py
|
chainer
|
chainer-master/examples/mnist/train_mnist_custom_loop.py
|
#!/usr/bin/env python
"""Fully-connected neural network example using MNIST dataset
This code is a custom loop version of train_mnist.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
import argparse
import os
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
import train_mnist
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot using model '
'and state files in the specified directory')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train
model = L.Classifier(train_mnist.MLP(args.unit, 10))
model.to_device(device)
device.use()
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
if args.resume is not None:
# Resume from a snapshot
resume = args.resume
if os.path.exists(resume):
serializers.load_npz(os.path.join(resume, 'mlp.model'), model)
serializers.load_npz(os.path.join(resume, 'mlp.state'), optimizer)
else:
raise ValueError(
'`args.resume` ("{}") is specified,'
' but it does not exist'.format(resume)
)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
test_count = len(test)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
train_count = 0
sum_accuracy = 0
sum_loss = 0
while train_iter.epoch < args.epoch:
batch = train_iter.next()
x, t = convert.concat_examples(batch, device)
optimizer.update(model, x, t)
train_count += len(t)
sum_loss += float(model.loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
if train_iter.is_new_epoch:
print('epoch: {}'.format(train_iter.epoch))
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_accuracy / train_count))
# evaluation
train_count = 0
sum_accuracy = 0
sum_loss = 0
# Enable evaluation mode.
with configuration.using_config('train', False):
# This is optional but can reduce computational overhead.
with chainer.using_config('enable_backprop', False):
for batch in test_iter:
x, t = convert.concat_examples(batch, device)
loss = model(x, t)
sum_loss += float(loss.array) * len(t)
sum_accuracy += float(
model.accuracy.array) * len(t)
test_iter.reset()
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
sum_accuracy = 0
sum_loss = 0
# Save the model and the optimizer
out = args.out
if not os.path.isdir(out):
os.makedirs(out)
print('save the model')
serializers.save_npz(os.path.join(out, 'mlp.model'), model)
print('save the optimizer')
serializers.save_npz(os.path.join(out, 'mlp.state'), optimizer)
if __name__ == '__main__':
main()
| 5,047
| 36.954887
| 78
|
py
|
chainer
|
chainer-master/examples/mnist/train_mnist_model_parallel.py
|
#!/usr/bin/env python
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
import train_mnist
# Network definition
class ParallelMLP(chainer.Chain):
def __init__(self, n_units, n_out, device0, device1):
super(ParallelMLP, self).__init__()
self.device0 = device0
self.device1 = device1
with self.init_scope():
# the input size, 784, is inferred
self.first0 = train_mnist.MLP(n_units // 2, n_units)
self.first1 = train_mnist.MLP(n_units // 2, n_units)
self.first0.to_device(device0)
self.first1.to_device(device1)
# the input size, n_units, is inferred
self.second0 = train_mnist.MLP(n_units // 2, n_out)
self.second1 = train_mnist.MLP(n_units // 2, n_out)
self.second0.to_device(device0)
self.second1.to_device(device1)
def forward(self, x):
if self.device0 != self.device1:
# assume x is on device0
x1 = F.copy(x, self.device1)
z0 = self.first0(x)
z1 = self.first1(x1)
# synchronize
h0 = z0 + F.copy(z1, self.device0)
h1 = z1 + F.copy(z0, self.device1)
y0 = self.second0(F.relu(h0))
y1 = self.second1(F.relu(h1))
y = y0 + F.copy(y1, self.device0)
return y # output is on device0
else:
z0 = self.first0(x)
z1 = self.first1(x)
h = z0 + z1
y0 = self.second0(F.relu(h))
y1 = self.second1(F.relu(h))
y = y0 + y1
return y
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', default=20, type=int,
help='Number of sweeps over the dataset to train')
parser.add_argument('--out', '-o', default='result_model_parallel',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', default=1000, type=int,
help='Number of units')
parser.add_argument('--device0', '-d', type=str, default='0',
help='Device specifier of the first device. '
'Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--device1', '-D', type=str, default='1',
help='Device specifier of the second device. '
'Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu0', '-g', dest='device0', type=int, nargs='?',
const=0,
help='First GPU ID')
group.add_argument('--gpu1', '-G', dest='device1', type=int, nargs='?',
const=1,
help='Second GPU ID')
args = parser.parse_args()
device0 = chainer.get_device(args.device0)
device1 = chainer.get_device(args.device1)
print('Devices: {}, {}'.format(device0, device1))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# See train_mnist.py for the meaning of these lines
model = L.Classifier(ParallelMLP(args.unit, 10, device0, device1))
device0.use()
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
updater = training.updaters.StandardUpdater(
train_iter, optimizer, input_device=device0)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=device0))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device0.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 5,362
| 36.767606
| 77
|
py
|
chainer
|
chainer-master/examples/mnist/train_mnist_data_parallel_updater.py
|
#!/usr/bin/env python
import argparse
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
import sys
import train_mnist
def main():
# This script is almost identical to train_mnist.py. The only difference is
# that this script uses data-parallel computation on two GPUs.
# See train_mnist.py for more details.
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=400,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--out', '-o', default='result_data_parallel',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
parser.add_argument('--devices', '-d', type=str, nargs='*',
default=['0', '1', '2', '3'],
help='Device specifiers. Either ChainerX device '
'specifiers or integers. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--ljob', '-j', type=int, default=4,
help='Number of parallel data loading processes')
args = parser.parse_args()
devices = tuple([chainer.get_device(d) for d in args.devices])
if any(device.xp is chainerx for device in devices):
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
print('Devices: {}'.format(args.devices))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
model = L.Classifier(train_mnist.MLP(args.unit, 10))
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train, test = chainer.datasets.get_mnist()
train_iters = [
chainer.iterators.MultiprocessIterator(i,
args.batchsize,
n_processes=args.ljob)
for i in chainer.datasets.split_dataset_n_random(train, args.ljob)]
test_iter = chainer.iterators.MultiprocessIterator(
test, args.batchsize, repeat=False, n_processes=args.ljob)
updater = training.updaters.MultiprocessParallelUpdater(train_iters,
optimizer,
devices=(devices))
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=devices[0]))
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 3,603
| 40.425287
| 79
|
py
|
chainer
|
chainer-master/examples/mnist/train_mnist_data_parallel.py
|
#!/usr/bin/env python
import argparse
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
import train_mnist
def main():
# This script is almost identical to train_mnist.py. The only difference is
# that this script uses data-parallel computation on two GPUs.
# See train_mnist.py for more details.
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=400,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--out', '-o', default='result_data_parallel',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
parser.add_argument('--device0', '-d', type=str, default='0',
help='Device specifier of the first device. '
'Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--device1', '-D', type=str, default='1',
help='Device specifier of the second device. '
'Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu0', '-g', dest='device0', type=int, nargs='?',
const=0,
help='First GPU ID')
group.add_argument('--gpu1', '-G', dest='device1', type=int, nargs='?',
const=1,
help='Second GPU ID')
args = parser.parse_args()
device0 = chainer.get_device(args.device0)
device1 = chainer.get_device(args.device1)
print('Devices: {}, {}'.format(device0, device1))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device0.use()
model = L.Classifier(train_mnist.MLP(args.unit, 10))
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# ParallelUpdater implements the data-parallel gradient computation on
# multiple devices. It accepts "devices" argument that specifies which
# device to use.
updater = training.updaters.ParallelUpdater(
train_iter,
optimizer,
# The device of the name 'main' is used as a "master", while others are
# used as slaves. Names other than 'main' are arbitrary.
devices={'main': device0, 'second': device1},
)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=device0))
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device0.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 4,284
| 42.282828
| 79
|
py
|
chainer
|
chainer-master/examples/mnist/train_mnist.py
|
#!/usr/bin/env python
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
import matplotlib
matplotlib.use('Agg')
# Network definition
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def forward(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--autoload', action='store_true',
help='Automatically load trainer snapshots in case'
' of preemption or other temporary system failure')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
model = L.Classifier(MLP(args.unit, 10))
model.to_device(device)
device.use()
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=device),
call_before_training=True)
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
# Take a snapshot for each specified epoch
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
# Take a snapshot each ``frequency`` epoch, delete old stale
# snapshots and automatically load from snapshot files if any
# files are already resident at result directory.
trainer.extend(extensions.snapshot(n_retains=1, autoload=args.autoload),
trigger=(frequency, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(), call_before_training=True)
# Save two plot images to the result dir
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'),
call_before_training=True)
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'),
call_before_training=True)
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']),
call_before_training=True)
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume is not None:
# Resume from a snapshot (Note: this loaded model is to be
# overwritten by --autoload option, autoloading snapshots, if
# any snapshots exist in output directory)
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 6,024
| 39.709459
| 79
|
py
|
chainer
|
chainer-master/examples/mnist/.testdata/replacements/train_mnist.py
|
#!/usr/bin/env python
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
import matplotlib
matplotlib.use('Agg')
# Network definition
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def forward(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--autoload', action='store_true',
help='Automatically load trainer snapshots in case'
' of preemption or other temporary system failure')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
model = L.Classifier(MLP(args.unit, 10))
model.to_device(device)
device.use()
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=device),
call_before_training=True)
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
# TODO(niboshi): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
# Take a snapshot for each specified epoch
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
# Take a snapshot each ``frequency`` epoch, delete old stale
# snapshots and automatically load from snapshot files if any
# files are already resident at result directory.
trainer.extend(extensions.snapshot(n_retains=1, autoload=args.autoload),
trigger=(frequency, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(), call_before_training=True)
# Save two plot images to the result dir
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'),
call_before_training=True)
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'),
call_before_training=True)
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']),
call_before_training=True)
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume is not None:
# Resume from a snapshot (Note: this loaded model is to be
# overwritten by --autoload option, autoloading snapshots, if
# any snapshots exist in output directory)
chainer.serializers.load_npz(args.resume, trainer)
# BEGIN ADDITIONAL TEST CODE
del trainer._extensions['ProgressBar']
# END ADDITIONAL TEST CODE
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 6,131
| 39.609272
| 79
|
py
|
chainer
|
chainer-master/examples/pix2pix/updater.py
|
#!/usr/bin/env python
from __future__ import print_function
import chainer
import chainer.functions as F
class FacadeUpdater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.enc, self.dec, self.dis = kwargs.pop('models')
super(FacadeUpdater, self).__init__(*args, **kwargs)
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize, _, w, h = y_out.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, enc)
return loss
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
batchsize, _, w, h = y_out.shape
loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
loss = loss_rec + loss_adv
chainer.report({'loss': loss}, dec)
return loss
def loss_dis(self, dis, y_in, y_out):
batchsize, _, w, h = y_in.shape
L1 = F.sum(F.softplus(-y_in)) / batchsize / w / h
L2 = F.sum(F.softplus(y_out)) / batchsize / w / h
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def update_core(self):
enc_optimizer = self.get_optimizer('enc')
dec_optimizer = self.get_optimizer('dec')
dis_optimizer = self.get_optimizer('dis')
enc, dec, dis = self.enc, self.dec, self.dis
xp = enc.xp
batch = self.get_iterator('main').next()
batchsize = len(batch)
in_ch = batch[0][0].shape[0]
out_ch = batch[0][1].shape[0]
w_in = 256
w_out = 256
x_in = xp.zeros((batchsize, in_ch, w_in, w_in)).astype('f')
t_out = xp.zeros((batchsize, out_ch, w_out, w_out)).astype('f')
for i in range(batchsize):
x_in[i, :] = xp.asarray(batch[i][0])
t_out[i, :] = xp.asarray(batch[i][1])
z = enc(x_in)
x_out = dec(z)
y_fake = dis(x_in, x_out)
y_real = dis(x_in, t_out)
enc_optimizer.update(self.loss_enc, enc, x_out, t_out, y_fake)
for z_ in z:
z_.unchain_backward()
dec_optimizer.update(self.loss_dec, dec, x_out, t_out, y_fake)
x_out.unchain_backward()
dis_optimizer.update(self.loss_dis, dis, y_real, y_fake)
| 2,430
| 31.851351
| 71
|
py
|
chainer
|
chainer-master/examples/pix2pix/facade_visualizer.py
|
#!/usr/bin/env python
import os
from PIL import Image
import chainer
import chainer.cuda
from chainer import Variable
import numpy as np
def out_image(updater, enc, dec, rows, cols, seed, dst):
@chainer.training.make_extension()
def make_image(trainer):
np.random.seed(seed)
n_images = rows * cols
xp = enc.xp
w_in = 256
w_out = 256
in_ch = 12
out_ch = 3
in_all = np.zeros((n_images, in_ch, w_in, w_in)).astype('i')
gt_all = np.zeros((n_images, out_ch, w_out, w_out)).astype('f')
gen_all = np.zeros((n_images, out_ch, w_out, w_out)).astype('f')
for it in range(n_images):
batch = updater.get_iterator('test').next()
batchsize = len(batch)
x_in = xp.zeros((batchsize, in_ch, w_in, w_in)).astype('f')
t_out = xp.zeros((batchsize, out_ch, w_out, w_out)).astype('f')
for i in range(batchsize):
x_in[i, :] = xp.asarray(batch[i][0])
t_out[i, :] = xp.asarray(batch[i][1])
x_in = Variable(x_in)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
z = enc(x_in)
x_out = dec(z)
in_all[it, :] = x_in.array.get()[0, :]
gt_all[it, :] = t_out.get()[0, :]
gen_all[it, :] = x_out.array.get()[0, :]
def save_image(x, name, mode=None):
_, C, H, W = x.shape
x = x.reshape((rows, cols, C, H, W))
x = x.transpose(0, 3, 1, 4, 2)
if C == 1:
x = x.reshape((rows*H, cols*W))
else:
x = x.reshape((rows*H, cols*W, C))
preview_dir = '{}/preview'.format(dst)
preview_path = preview_dir +\
'/image_{}_{:0>8}.png'.format(name, trainer.updater.iteration)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
Image.fromarray(x, mode=mode).convert('RGB').save(preview_path)
x = np.asarray(np.clip(gen_all * 128 + 128,
0.0, 255.0), dtype=np.uint8)
save_image(x, 'gen')
x = np.ones((n_images, 3, w_in, w_in)).astype(np.uint8)*255
x[:, 0, :, :] = 0
for i in range(12):
x[:, 0, :, :] += np.uint8(15*i*in_all[:, i, :, :])
save_image(x, 'in', mode='HSV')
x = np.asarray(np.clip(gt_all * 128+128, 0.0, 255.0), dtype=np.uint8)
save_image(x, 'gt')
return make_image
| 2,567
| 31.506329
| 78
|
py
|
chainer
|
chainer-master/examples/pix2pix/facade_dataset.py
|
from PIL import Image
from chainer.dataset import dataset_mixin
import numpy as np
# download `BASE` dataset from http://cmp.felk.cvut.cz/~tylecr1/facade/
class FacadeDataset(dataset_mixin.DatasetMixin):
def __init__(self, dataDir='./facade/base', data_range=(1, 300)):
print('load dataset start')
print(' from: %s' % dataDir)
print(' range: [%d, %d)' % (data_range[0], data_range[1]))
self.dataDir = dataDir
self.dataset = []
for i in range(data_range[0], data_range[1]):
img = Image.open(dataDir+'/cmp_b%04d.jpg' % i)
label = Image.open(dataDir+'/cmp_b%04d.png' % i)
w, h = img.size
r = 286/float(min(w, h))
# resize images so that min(w, h) == 286
img = img.resize((int(r*w), int(r*h)), Image.BILINEAR)
label = label.resize((int(r*w), int(r*h)), Image.NEAREST)
img = np.asarray(img).astype('f').transpose(2, 0, 1)/128.0-1.0
label_ = np.asarray(label)-1 # [0, 12)
label = np.zeros((12, img.shape[1], img.shape[2])).astype('i')
for j in range(12):
label[j, :] = label_ == j
self.dataset.append((img, label))
print('load dataset done')
def __len__(self):
return len(self.dataset)
# return (label, img)
def get_example(self, i, crop_width=256):
_, h, w = self.dataset[i][0].shape
x_l = np.random.randint(0, w-crop_width)
x_r = x_l+crop_width
y_l = np.random.randint(0, h-crop_width)
y_r = y_l+crop_width
label = self.dataset[i][1][:, y_l:y_r, x_l:x_r]
img = self.dataset[i][0][:, y_l:y_r, x_l:x_r]
return label, img
| 1,733
| 36.695652
| 74
|
py
|
chainer
|
chainer-master/examples/pix2pix/net.py
|
#!/usr/bin/env python
from __future__ import print_function
import chainer
import chainer.functions as F
import chainer.links as L
# U-net https://arxiv.org/pdf/1611.07004v1.pdf
# convolution-batchnormalization-(dropout)-relu
class ConvBNR(chainer.Chain):
def __init__(self, ch0, ch1, use_bn=True,
sample='down', activation=F.relu, dropout=False):
self.use_bn = use_bn
self.activation = activation
self.dropout = dropout
w = chainer.initializers.Normal(0.02)
super(ConvBNR, self).__init__()
with self.init_scope():
if sample == 'down':
self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)
else:
self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w)
if use_bn:
self.bn = L.BatchNormalization(ch1)
def forward(self, x):
h = self.c(x)
if self.use_bn:
h = self.bn(h)
if self.dropout:
h = F.dropout(h)
if self.activation is not None:
h = self.activation(h)
return h
class Encoder(chainer.Chain):
def __init__(self, in_ch):
w = chainer.initializers.Normal(0.02)
super(Encoder, self).__init__()
with self.init_scope():
self.c0 = L.Convolution2D(in_ch, 64, 3, 1, 1, initialW=w)
self.c1 = ConvBNR(64, 128, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c2 = ConvBNR(128, 256, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c3 = ConvBNR(256, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c4 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c5 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c6 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c7 = ConvBNR(512, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
def forward(self, x):
hs = [F.leaky_relu(self.c0(x))]
for i in range(1, 8):
hs.append(self['c%d' % i](hs[i-1]))
return hs
class Decoder(chainer.Chain):
def __init__(self, out_ch):
w = chainer.initializers.Normal(0.02)
super(Decoder, self).__init__()
with self.init_scope():
self.c0 = ConvBNR(512, 512, use_bn=True, sample='up',
activation=F.relu, dropout=True)
self.c1 = ConvBNR(1024, 512, use_bn=True,
sample='up', activation=F.relu, dropout=True)
self.c2 = ConvBNR(1024, 512, use_bn=True,
sample='up', activation=F.relu, dropout=True)
self.c3 = ConvBNR(1024, 512, use_bn=True,
sample='up', activation=F.relu, dropout=False)
self.c4 = ConvBNR(1024, 256, use_bn=True,
sample='up', activation=F.relu, dropout=False)
self.c5 = ConvBNR(512, 128, use_bn=True, sample='up',
activation=F.relu, dropout=False)
self.c6 = ConvBNR(256, 64, use_bn=True, sample='up',
activation=F.relu, dropout=False)
self.c7 = L.Convolution2D(128, out_ch, 3, 1, 1, initialW=w)
def forward(self, hs):
h = self.c0(hs[-1])
for i in range(1, 8):
h = F.concat([h, hs[-i-1]])
if i < 7:
h = self['c%d' % i](h)
else:
h = self.c7(h)
return h
class Discriminator(chainer.Chain):
def __init__(self, in_ch, out_ch):
w = chainer.initializers.Normal(0.02)
super(Discriminator, self).__init__()
with self.init_scope():
self.c0_0 = ConvBNR(in_ch, 32, use_bn=False, sample='down',
activation=F.leaky_relu, dropout=False)
self.c0_1 = ConvBNR(out_ch, 32, use_bn=False, sample='down',
activation=F.leaky_relu, dropout=False)
self.c1 = ConvBNR(64, 128, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c2 = ConvBNR(128, 256, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c3 = ConvBNR(256, 512, use_bn=True, sample='down',
activation=F.leaky_relu, dropout=False)
self.c4 = L.Convolution2D(512, 1, 3, 1, 1, initialW=w)
def forward(self, x_0, x_1):
h = F.concat([self.c0_0(x_0), self.c0_1(x_1)])
h = self.c1(h)
h = self.c2(h)
h = self.c3(h)
h = self.c4(h)
return h
| 5,072
| 39.584
| 76
|
py
|
chainer
|
chainer-master/examples/pix2pix/train_facade.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import warnings
import numpy
import chainer
from chainer import training
from chainer.training import extensions
import chainerx
from facade_dataset import FacadeDataset
from facade_visualizer import out_image
from net import Decoder
from net import Discriminator
from net import Encoder
from updater import FacadeUpdater
def main():
parser = argparse.ArgumentParser(
description='chainer implementation of pix2pix')
parser.add_argument('--batchsize', '-b', type=int, default=1,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=200,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--dataset', '-i', default='./facade/base',
help='Directory of image files.')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--seed', type=int, default=0,
help='Random seed')
parser.add_argument('--snapshot_interval', type=int, default=1000,
help='Interval of snapshot')
parser.add_argument('--display_interval', type=int, default=100,
help='Interval of displaying log to console')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
if device.xp is chainerx:
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device.use()
# Set up a neural network to train
enc = Encoder(in_ch=12)
dec = Decoder(out_ch=3)
dis = Discriminator(in_ch=12, out_ch=3)
enc.to_device(device)
dec.to_device(device)
dis.to_device(device)
# Setup an optimizer
def make_optimizer(model, alpha=0.0002, beta1=0.5):
optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.00001), 'hook_dec')
return optimizer
opt_enc = make_optimizer(enc)
opt_dec = make_optimizer(dec)
opt_dis = make_optimizer(dis)
train_d = FacadeDataset(args.dataset, data_range=(1, 300))
test_d = FacadeDataset(args.dataset, data_range=(300, 379))
train_iter = chainer.iterators.SerialIterator(train_d, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test_d, args.batchsize)
# Set up a trainer
updater = FacadeUpdater(
models=(enc, dec, dis),
iterator={
'main': train_iter,
'test': test_iter},
optimizer={
'enc': opt_enc, 'dec': opt_dec,
'dis': opt_dis},
device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
snapshot_interval = (args.snapshot_interval, 'iteration')
display_interval = (args.display_interval, 'iteration')
trainer.extend(extensions.snapshot(
filename='snapshot_iter_{.updater.iteration}.npz'),
trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
enc, 'enc_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dec, 'dec_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.snapshot_object(
dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval)
trainer.extend(extensions.LogReport(trigger=display_interval))
trainer.extend(extensions.PrintReport([
'epoch', 'iteration', 'enc/loss', 'dec/loss', 'dis/loss',
]), trigger=display_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(
out_image(
updater, enc, dec,
5, 5, args.seed, args.out),
trigger=snapshot_interval)
if args.resume is not None:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 5,144
| 35.75
| 78
|
py
|
chainer
|
chainer-master/examples/memnn/download.py
|
#!/usr/bin/env python
from six.moves.urllib import request
def main():
request.urlretrieve(
'http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz',
'tasks_1-20_v1-2.tar.gz')
if __name__ == '__main__':
main()
| 252
| 17.071429
| 78
|
py
|
chainer
|
chainer-master/examples/memnn/memnn.py
|
import collections
import json
import os
import numpy
import six
import chainer
from chainer import backend
import chainer.functions as F
from chainer import initializers
import chainer.links as L
import babi
def bow_encode(embed, sentences):
"""BoW sentence encoder.
It is defined as:
.. math::
m = \\sum_j A x_j,
where :math:`A` is an embed matrix, and :math:`x_j` is :math:`j`-th word
ID.
"""
e = embed(sentences)
s = F.sum(e, axis=-2)
return s
def position_encode(embed, sentences):
"""Position encoding.
It is defined as:
.. math::
m = \\sum_j l_j A x_j,
where :math:`A` is an embed matrix, :math:`x_j` is :math:`j`-th word ID and
.. math::
l_{kj} = (1 - j / J) - (k / d)(1 - 2j / J).
:math:`J` is length of a sentence and :math:`d` is the dimension of the
embedding.
"""
xp = backend.get_array_module(sentences)
e = embed(sentences)
n_words, n_units = e.shape[-2:]
# To avoid 0/0, we use max(length, 1) here.
# Note that when the length is zero, its embedding is always zero and
# is ignored.
length = xp.maximum(
xp.sum((sentences != 0).astype(xp.float32), axis=-1), 1)
length = length.reshape((length.shape + (1, 1)))
k = xp.arange(1, n_units + 1, dtype=numpy.float32) / n_units
i = xp.arange(1, n_words + 1, dtype=numpy.float32)[:, None]
coeff = (1 - i / length) - k * (1 - 2.0 * i / length)
e = coeff * e
s = F.sum(e, axis=-2)
return s
def make_encoder(name):
if name == 'bow':
return bow_encode
elif name == 'pe':
return position_encode
else:
raise ValueError('Unknonw encoder type: "%s"' % name)
class Memory(object):
"""Memory component in a memory network.
Args:
A (chainer.links.EmbedID): Embed matrix for input. Its shape is
``(n_vocab, n_units)``.
C (chainer.links.EmbedID): Embed matrix for output. Its shape is
``(n_vocab, n_units)``.
TA (chainer.links.EmbedID): Embed matrix for temporal encoding for
input. Its shape is ``(max_memory, n_units)``.
TC (chainer.links.EmbedID): Embed matrix for temporal encoding for
output. Its shape is ``(max_memory, n_units)``.
encoder (callable): It encodes given stentences to embed vectors.
"""
def __init__(self, A, C, TA, TC, encoder):
self.A = A
self.C = C
self.TA = TA
self.TC = TC
self.encoder = encoder
def register_all(self, sentences):
self.m = self.encoder(self.A, sentences)
self.c = self.encoder(self.C, sentences)
def query(self, u):
xp = backend.get_array_module(u)
size = self.m.shape[1]
inds = xp.arange(size - 1, -1, -1, dtype=numpy.int32)
tm = self.TA(inds)
tc = self.TC(inds)
tm = F.broadcast_to(tm, self.m.shape)
tc = F.broadcast_to(tc, self.c.shape)
p = F.softmax(F.matmul(self.m + tm, F.expand_dims(u, -1)))
o = F.matmul(F.swapaxes(self.c + tc, 2, 1), p)
o = F.squeeze(o, -1)
u = o + u
return u
class MemNN(chainer.Chain):
def __init__(self, n_units, n_vocab, encoder, max_memory, hops):
super(MemNN, self).__init__()
with self.init_scope():
self.embeds = chainer.ChainList()
self.temporals = chainer.ChainList()
normal = initializers.Normal()
# Shares both embeded matrixes in adjacent layres
for _ in six.moves.range(hops + 1):
self.embeds.append(L.EmbedID(n_vocab, n_units, initialW=normal))
self.temporals.append(
L.EmbedID(max_memory, n_units, initialW=normal))
self.memories = [
Memory(self.embeds[i], self.embeds[i + 1],
self.temporals[i], self.temporals[i + 1], encoder)
for i in six.moves.range(hops)
]
# The question embedding is same as the input embedding of the
# first layer
self.B = self.embeds[0]
# The answer prediction matrix W is same as the final output layer
self.W = lambda u: F.linear(u, self.embeds[-1].W)
self.encoder = encoder
self.n_units = n_units
self.max_memory = max_memory
self.hops = hops
def fix_ignore_label(self):
for embed in self.embeds:
embed.W.array[0, :] = 0
def register_all(self, sentences):
for memory in self.memories:
memory.register_all(sentences)
def query(self, question):
u = self.encoder(self.B, question)
for memory in self.memories:
u = memory.query(u)
a = self.W(u)
return a
def forward(self, sentences, question):
self.register_all(sentences)
a = self.query(question)
return a
def convert_data(train_data, max_memory):
all_data = []
sentence_len = max(max(len(s.sentence) for s in story)
for story in train_data)
for story in train_data:
mem = numpy.zeros((max_memory, sentence_len), dtype=numpy.int32)
i = 0
for sent in story:
if isinstance(sent, babi.Sentence):
if i == max_memory:
mem[0:i - 1, :] = mem[1:i, :]
i -= 1
mem[i, 0:len(sent.sentence)] = sent.sentence
i += 1
elif isinstance(sent, babi.Query):
query = numpy.zeros(sentence_len, dtype=numpy.int32)
query[0:len(sent.sentence)] = sent.sentence
all_data.append({
'sentences': mem.copy(),
'question': query,
'answer': numpy.array(sent.answer, numpy.int32),
})
return all_data
def save_model(directory, model, vocab):
"""Saves a model to a given directory.
Args:
directory (str): Path to a directory where you store a model.
model (chainer.Chain): Model to store.
vocab (dict): Vocaburaly dictionary.
"""
encoder = model.predictor.encoder
if encoder == bow_encode:
sentence_repr = 'bow'
elif encoder == position_encode:
sentence_repr = 'pe'
else:
raise ValueError('Cannot serialize encoder: %s' % str(encoder))
os.makedirs(directory, exist_ok=True)
parameters = {
'unit': model.predictor.n_units,
'hop': model.predictor.hops,
'max_memory': model.predictor.max_memory,
'sentence_repr': sentence_repr,
'vocabulary': vocab,
}
with open(os.path.join(directory, 'parameter.json'), 'w') as f:
json.dump(parameters, f)
chainer.serializers.save_npz(
os.path.join(directory, 'model.npz'), model)
def load_model(directory):
"""Loads a model saved.
Args:
directory (str): Path to a directory where you load a model.
Returns:
tuple: ``(model, vocab)`` where ``model`` is a loaded model and
``vocab`` is a ``dict`` storing its vocabulary.
"""
with open(os.path.join(directory, 'parameter.json')) as f:
parameters = json.load(f)
max_memory = parameters['max_memory']
vocab = collections.defaultdict(lambda: 0)
vocab.update(parameters['vocabulary'])
encoder = make_encoder(parameters['sentence_repr'])
network = MemNN(
parameters['unit'], len(vocab), encoder, max_memory, parameters['hop'])
model = chainer.links.Classifier(network, label_key='answer')
chainer.serializers.load_npz(
os.path.join(directory, 'model.npz'), model)
return model, vocab
| 7,653
| 28.102662
| 79
|
py
|
chainer
|
chainer-master/examples/memnn/babi.py
|
import collections
Query = collections.namedtuple('Query', ['sentence', 'answer', 'fact'])
Sentence = collections.namedtuple('Sentence', ['sentence'])
def split(sentence):
"""Splits a sentence into words.
Args:
sentence (str): A sentence to split.
Returns:
list of str: A list of words. Punctuations are removed.
"""
return sentence.lower().replace('.', '').replace('?', '').split()
def convert(vocab, words):
"""Converts a word list into a word ID list.
Args:
vocab (collections.defaultdict): A dictionary storing word IDs.
words (list of str): A list of wards to convert.
Returns:
list of int: A list of word IDs.
"""
return [vocab[w] for w in words]
def parse_line(vocab, line):
"""Parses each line and make a named tuple.
Args:
vocab (collections.defaultdict): A dictionary storing word IDs.
line (str): A line to parse in bAbI dataset.
Returns:
Query or Sentence: Parsed tuple.
"""
if '\t' in line:
# question line
question, answer, fact_id = line.split('\t')
aid = convert(vocab, [answer])[0]
words = split(question)
wid = convert(vocab, words)
ids = list(map(int, fact_id.split(' ')))
return Query(wid, aid, ids)
else:
# sentence line
words = split(line)
wid = convert(vocab, words)
return Sentence(wid)
def read_data(vocab, path):
"""Reads a bAbI dataset.
Args:
vocab (collections.defaultdict): A dictionary storing word IDs.
path (str): Path to bAbI data file.
Returns:
list of Query of Sentence: Parsed lines.
"""
data = []
all_data = []
with open(path) as f:
for line in f:
sid, content = line.strip().split(' ', 1)
if sid == '1':
if data:
all_data.append(data)
data = []
data.append(parse_line(vocab, content))
if data:
all_data.append(data)
return all_data
| 2,092
| 22.516854
| 71
|
py
|
chainer
|
chainer-master/examples/memnn/train_memnn.py
|
#!/usr/bin/env python
import argparse
import collections
import warnings
import chainer
from chainer.training import extensions
import numpy
import babi
import memnn
def train(train_data_path, test_data_path, args):
device = chainer.get_device(args.device)
device.use()
vocab = collections.defaultdict(lambda: len(vocab))
vocab['<unk>'] = 0
train_data = babi.read_data(vocab, train_data_path)
test_data = babi.read_data(vocab, test_data_path)
print('Training data: %s: %d' % (train_data_path, len(train_data)))
print('Test data: %s: %d' % (test_data_path, len(test_data)))
train_data = memnn.convert_data(train_data, args.max_memory)
test_data = memnn.convert_data(test_data, args.max_memory)
encoder = memnn.make_encoder(args.sentence_repr)
network = memnn.MemNN(
args.unit, len(vocab), encoder, args.max_memory, args.hop)
model = chainer.links.Classifier(network, label_key='answer')
opt = chainer.optimizers.Adam()
model.to_device(device)
opt.setup(model)
train_iter = chainer.iterators.SerialIterator(
train_data, args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test_data, args.batchsize, repeat=False, shuffle=False)
updater = chainer.training.StandardUpdater(train_iter, opt, device=device)
trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'))
@chainer.training.make_extension()
def fix_ignore_label(trainer):
network.fix_ignore_label()
trainer.extend(fix_ignore_label)
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy']))
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
if args.model:
memnn.save_model(args.model, model, vocab)
def main():
parser = argparse.ArgumentParser(
description='Chainer example: End-to-end memory networks')
parser.add_argument('TRAIN_DATA',
help='Path to training data in bAbI dataset '
'(e.g. "qa1_single-supporting-fact_train.txt")')
parser.add_argument('TEST_DATA',
help='Path to test data in bAbI dataset '
'(e.g. "qa1_single-supporting-fact_test.txt")')
parser.add_argument('--model', '-m', default='model',
help='Model directory where it stores trained model')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini batch')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--unit', '-u', type=int, default=20,
help='Number of units')
parser.add_argument('--hop', '-H', type=int, default=3,
help='Number of hops')
parser.add_argument('--max-memory', type=int, default=50,
help='Maximum number of memory')
parser.add_argument('--sentence-repr',
choices=['bow', 'pe'], default='bow',
help='Sentence representation. '
'Select from BoW ("bow") or position encoding ("pe")')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
train(args.TRAIN_DATA, args.TEST_DATA, args)
if __name__ == '__main__':
main()
| 4,288
| 37.990909
| 78
|
py
|
chainer
|
chainer-master/examples/memnn/test_memnn.py
|
#!/usr/bin/env python
import argparse
import numpy
import chainer
import babi
import memnn
def main():
parser = argparse.ArgumentParser(
description='Chainer example: End-to-end memory networks')
parser.add_argument('MODEL',
help='Path to model directory specified with `-m` '
'argument in the training script')
parser.add_argument('DATA',
help='Path to test data in bAbI dataset '
'(e.g. "qa1_single-supporting-fact_test.txt")')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
xp = device.xp
device.use()
model, vocab = memnn.load_model(args.MODEL)
model.to_device(device)
network = model.predictor
max_memory = network.max_memory
id_to_vocab = {i: v for v, i in vocab.items()}
test_data = babi.read_data(vocab, args.DATA)
print('Test data: %s: %d' % (args.DATA, len(test_data)))
sentence_len = max(max(len(s.sentence) for s in story)
for story in test_data)
correct = total = 0
for story in test_data:
mem = xp.zeros((max_memory, sentence_len), dtype=numpy.int32)
i = 0
for sent in story:
if isinstance(sent, babi.Sentence):
if i == max_memory:
mem[0:i - 1, :] = mem[1:i, :]
i -= 1
mem[i, 0:len(sent.sentence)] = xp.asarray(sent.sentence)
i += 1
elif isinstance(sent, babi.Query):
query = xp.array(sent.sentence, dtype=numpy.int32)
# networks assumes mini-batch data
score = network(mem[None], query[None])[0]
answer = int(xp.argmax(score.array))
if answer == sent.answer:
correct += 1
total += 1
print(id_to_vocab[answer], id_to_vocab[sent.answer])
accuracy = float(correct) / total
print('Accuracy: %.2f%%' % (accuracy * 100))
if __name__ == '__main__':
main()
| 2,673
| 33.282051
| 76
|
py
|
chainer
|
chainer-master/examples/cifar/train_cifar_custom_loop.py
|
#!/usr/bin/env python
"""Convnet example using CIFAR10 or CIFAR100 dataset
This code is a custom loop version of train_cifar.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
import argparse
import os
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import models.VGG
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--dataset', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.add_argument('--resume', '-r', type=str,
help='Directory that has `vgg.model` and `vgg.state`')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.dataset == 'cifar10':
print('Using CIFAR10 dataset.')
class_labels = 10
train, test = get_cifar10()
elif args.dataset == 'cifar100':
print('Using CIFAR100 dataset.')
class_labels = 100
train, test = get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
if args.test:
train = train[:200]
test = test[:200]
test_count = len(test)
model = L.Classifier(models.VGG.VGG(class_labels))
model.to_device(device)
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
if args.resume is not None:
resume = args.resume
if os.path.exists(resume):
serializers.load_npz(os.path.join(resume, 'vgg.model'), model)
serializers.load_npz(os.path.join(resume, 'vgg.state'), optimizer)
else:
raise ValueError(
'`args.resume` ("{}") is specified,'
' but it does not exist.'.format(resume)
)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
train_count = 0
sum_acc = 0
sum_loss = 0
while train_iter.epoch < args.epoch:
batch = train_iter.next()
# Reduce learning rate by 0.5 every 25 epochs.
if train_iter.epoch % 25 == 0 and train_iter.is_new_epoch:
optimizer.lr *= 0.5
print('Reducing learning rate to: {}'.format(optimizer.lr))
x, t = convert.concat_examples(batch, device)
optimizer.update(model, x, t)
train_count += len(t)
sum_loss += float(model.loss.array) * len(t)
sum_acc += float(model.accuracy.array) * len(t)
if train_iter.is_new_epoch:
print('epoch: {}'.format(train_iter.epoch))
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_acc / train_count))
train_count = 0
sum_acc = 0
sum_loss = 0
# Enable evaluation mode.
with configuration.using_config('train', False):
# This is optional but can reduce computational overhead.
with chainer.using_config('enable_backprop', False):
for batch in test_iter:
x, t = convert.concat_examples(batch, device)
loss = model(x, t)
sum_loss += float(loss.array) * len(t)
sum_acc += float(model.accuracy.array) * len(t)
test_iter.reset()
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_acc / test_count))
sum_acc = 0
sum_loss = 0
# Save the model and the optimizer
out = args.out
if not os.path.exists(out):
os.makedirs(out)
print('save the model')
serializers.save_npz(os.path.join(out, 'vgg.model'), model)
print('save the optimizer')
serializers.save_npz(os.path.join(out, 'vgg.state'), optimizer)
if __name__ == '__main__':
main()
| 5,913
| 37.653595
| 78
|
py
|
chainer
|
chainer-master/examples/cifar/train_cifar.py
|
import argparse
import chainer
from chainer import backend
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import models.VGG
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--dataset', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--early-stopping', type=str,
help='Metric to watch for early stopping')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.dataset == 'cifar10':
print('Using CIFAR10 dataset.')
class_labels = 10
train, test = get_cifar10()
elif args.dataset == 'cifar100':
print('Using CIFAR100 dataset.')
class_labels = 100
train, test = get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
model = L.Classifier(models.VGG.VGG(class_labels))
model.to_device(device)
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(5e-4))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
stop_trigger = (args.epoch, 'epoch')
# Early stopping option
if args.early_stopping:
stop_trigger = triggers.EarlyStoppingTrigger(
monitor=args.early_stopping, verbose=True,
max_trigger=(args.epoch, 'epoch'))
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, stop_trigger, out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
# Reduce the learning rate by half every 25 epochs.
trainer.extend(extensions.ExponentialShift('lr', 0.5),
trigger=(25, 'epoch'))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
# TODO(imanishi): Support for ChainerX
if not isinstance(device, backend.ChainerxDevice):
trainer.extend(extensions.DumpGraph('main/loss'))
# Take a snapshot at each epoch
trainer.extend(extensions.snapshot(
filename='snaphot_epoch_{.updater.epoch}'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
if __name__ == '__main__':
main()
| 5,155
| 38.968992
| 79
|
py
|
chainer
|
chainer-master/examples/cifar/models/VGG.py
|
import chainer
import chainer.functions as F
import chainer.links as L
class Block(chainer.Chain):
"""A convolution, batch norm, ReLU block.
A block in a feedforward network that performs a
convolution followed by batch normalization followed
by a ReLU activation.
For the convolution operation, a square filter size is used.
Args:
out_channels (int): The number of output channels.
ksize (int): The size of the filter is ksize x ksize.
pad (int): The padding to use for the convolution.
"""
def __init__(self, out_channels, ksize, pad=1):
super(Block, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(None, out_channels, ksize, pad=pad,
nobias=True)
self.bn = L.BatchNormalization(out_channels)
def forward(self, x):
h = self.conv(x)
h = self.bn(h)
return F.relu(h)
class VGG(chainer.Chain):
"""A VGG-style network for very small images.
This model is based on the VGG-style model from
http://torch.ch/blog/2015/07/30/cifar.html
which is based on the network architecture from the paper:
https://arxiv.org/pdf/1409.1556v6.pdf
This model is intended to be used with either RGB or greyscale input
images that are of size 32x32 pixels, such as those in the CIFAR10
and CIFAR100 datasets.
On CIFAR10, it achieves approximately 89% accuracy on the test set with
no data augmentation.
On CIFAR100, it achieves approximately 63% accuracy on the test set with
no data augmentation.
Args:
class_labels (int): The number of class labels.
"""
def __init__(self, class_labels=10):
super(VGG, self).__init__()
with self.init_scope():
self.block1_1 = Block(64, 3)
self.block1_2 = Block(64, 3)
self.block2_1 = Block(128, 3)
self.block2_2 = Block(128, 3)
self.block3_1 = Block(256, 3)
self.block3_2 = Block(256, 3)
self.block3_3 = Block(256, 3)
self.block4_1 = Block(512, 3)
self.block4_2 = Block(512, 3)
self.block4_3 = Block(512, 3)
self.block5_1 = Block(512, 3)
self.block5_2 = Block(512, 3)
self.block5_3 = Block(512, 3)
self.fc1 = L.Linear(None, 512, nobias=True)
self.bn_fc1 = L.BatchNormalization(512)
self.fc2 = L.Linear(None, class_labels, nobias=True)
def forward(self, x):
# 64 channel blocks:
h = self.block1_1(x)
h = F.dropout(h, ratio=0.3)
h = self.block1_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 128 channel blocks:
h = self.block2_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block2_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 256 channel blocks:
h = self.block3_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block4_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block5_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.dropout(h, ratio=0.5)
h = self.fc1(h)
h = self.bn_fc1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5)
return self.fc2(h)
| 3,799
| 29.894309
| 76
|
py
|
chainer
|
chainer-master/examples/cifar/models/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/examples/seq2seq/seq2seq.py
|
#!/usr/bin/env python
import argparse
import datetime
import io
from nltk.translate import bleu_score
import numpy
import progressbar
import six
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainerx
UNK = 0
EOS = 1
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0)
return exs
class Seq2seq(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Seq2seq, self).__init__()
with self.init_scope():
self.embed_x = L.EmbedID(n_source_vocab, n_units)
self.embed_y = L.EmbedID(n_target_vocab, n_units)
self.encoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)
self.decoder = L.NStepLSTM(n_layers, n_units, n_units, 0.1)
self.W = L.Linear(n_units, n_target_vocab)
self.n_layers = n_layers
self.n_units = n_units
def forward(self, xs, ys):
xs = [x[::-1] for x in xs]
eos = self.xp.array([EOS], numpy.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
# Both xs and ys_in are lists of arrays.
exs = sequence_embed(self.embed_x, xs)
eys = sequence_embed(self.embed_y, ys_in)
batch = len(xs)
# None represents a zero vector in an encoder.
hx, cx, _ = self.encoder(None, None, exs)
_, _, os = self.decoder(hx, cx, eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
chainer.report({'loss': loss}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.array * batch / n_words)
chainer.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
h, c, _ = self.encoder(None, None, exs)
ys = self.xp.full(batch, EOS, numpy.int32)
result = []
for i in range(max_length):
eys = self.embed_y(ys)
eys = F.split_axis(eys, batch, 0)
h, c, ys = self.decoder(h, c, eys)
cys = F.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.array, axis=1).astype(numpy.int32)
result.append(ys)
# Using `xp.concatenate(...)` instead of `xp.stack(result)` here to
# support NumPy 1.9.
result = chainer.get_device('@numpy').send(
self.xp.concatenate([x[None, :] for x in result]).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == EOS)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
@chainer.dataset.converter()
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
src_xp = chainer.backend.get_array_module(*batch)
xp = device.xp
concat = src_xp.concatenate(batch, axis=0)
sections = list(numpy.cumsum(
[len(x) for x in batch[:-1]], dtype=numpy.int32))
concat_dst = device.send(concat)
batch_dst = xp.split(concat_dst, sections)
return batch_dst
return {'xs': to_device_batch([x for x, _ in batch]),
'ys': to_device_batch([y for _, y in batch])}
class CalculateBleu(chainer.training.Extension):
trigger = 1, 'epoch'
priority = chainer.training.PRIORITY_WRITER
def __init__(
self, model, test_data, key, device, batch=100, max_length=100):
self.model = model
self.test_data = test_data
self.key = key
self.batch = batch
self.device = device
self.max_length = max_length
def __call__(self, trainer):
device = self.device
with chainer.no_backprop_mode():
references = []
hypotheses = []
for i in range(0, len(self.test_data), self.batch):
sources, targets = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in targets])
sources = [device.send(x) for x in sources]
ys = [y.tolist()
for y in self.model.translate(sources, self.max_length)]
hypotheses.extend(ys)
bleu = bleu_score.corpus_bleu(
references, hypotheses,
smoothing_function=bleu_score.SmoothingFunction().method1)
chainer.report({self.key: bleu})
def count_lines(path):
with io.open(path, encoding='utf-8') as f:
return sum([1 for _ in f])
def load_vocabulary(path):
with io.open(path, encoding='utf-8') as f:
# +2 for UNK and EOS
word_ids = {line.strip(): i + 2 for i, line in enumerate(f)}
word_ids['<UNK>'] = 0
word_ids['<EOS>'] = 1
return word_ids
def load_data(vocabulary, path):
n_lines = count_lines(path)
bar = progressbar.ProgressBar()
data = []
print('loading...: %s' % path)
with io.open(path, encoding='utf-8') as f:
for line in bar(f, max_value=n_lines):
words = line.strip().split()
array = numpy.array([vocabulary.get(w, UNK)
for w in words], numpy.int32)
data.append(array)
return data
def load_data_using_dataset_api(
src_vocab, src_path, target_vocab, target_path, filter_func):
def _transform_line(vocabulary, line):
words = line.strip().split()
return numpy.array(
[vocabulary.get(w, UNK) for w in words], numpy.int32)
def _transform(example):
source, target = example
return (
_transform_line(src_vocab, source),
_transform_line(target_vocab, target)
)
return chainer.datasets.TransformDataset(
chainer.datasets.TextDataset(
[src_path, target_path],
encoding='utf-8',
filter_func=filter_func
), _transform)
def calculate_unknown_ratio(data):
unknown = sum((s == UNK).sum() for s in data)
total = sum(s.size for s in data)
return unknown / total
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('SOURCE', help='source sentence list')
parser.add_argument('TARGET', help='target sentence list')
parser.add_argument('SOURCE_VOCAB', help='source vocabulary file')
parser.add_argument('TARGET_VOCAB', help='target vocabulary file')
parser.add_argument('--validation-source',
help='source sentence list for validation')
parser.add_argument('--validation-target',
help='target sentence list for validation')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='number of sentence pairs in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='number of sweeps over the dataset to train')
parser.add_argument('--resume', '-r', type=str,
help='resume the training from snapshot')
parser.add_argument('--save', '-s', type=str,
help='save a snapshot of the training')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='number of units')
parser.add_argument('--layer', '-l', type=int, default=3,
help='number of layers')
parser.add_argument('--use-dataset-api', default=False,
action='store_true',
help='use TextDataset API to reduce CPU memory usage')
parser.add_argument('--min-source-sentence', type=int, default=1,
help='minimium length of source sentence')
parser.add_argument('--max-source-sentence', type=int, default=50,
help='maximum length of source sentence')
parser.add_argument('--min-target-sentence', type=int, default=1,
help='minimium length of target sentence')
parser.add_argument('--max-target-sentence', type=int, default=50,
help='maximum length of target sentence')
parser.add_argument('--log-interval', type=int, default=200,
help='number of iteration to show log')
parser.add_argument('--validation-interval', type=int, default=4000,
help='number of iteration to evlauate the model '
'with validation dataset')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='directory to output the result')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# If the device is a ChainerX CUDA device, use the shared device memory
# pool between ChainerX and CuPy.
if device.xp is chainerx and device.device.backend.name == 'cuda':
# TODO(niboshi): The API is provisional.
chainerx._cuda.cupy_share_allocator()
# Load pre-processed dataset
print('[{}] Loading dataset... (this may take several minutes)'.format(
datetime.datetime.now()))
source_ids = load_vocabulary(args.SOURCE_VOCAB)
target_ids = load_vocabulary(args.TARGET_VOCAB)
if args.use_dataset_api:
# By using TextDataset, you can avoid loading whole dataset on memory.
# This significantly reduces the host memory usage.
def _filter_func(s, t):
sl = len(s.strip().split()) # number of words in source line
tl = len(t.strip().split()) # number of words in target line
return (
args.min_source_sentence <= sl <= args.max_source_sentence and
args.min_target_sentence <= tl <= args.max_target_sentence)
train_data = load_data_using_dataset_api(
source_ids, args.SOURCE,
target_ids, args.TARGET,
_filter_func,
)
else:
# Load all records on memory.
train_source = load_data(source_ids, args.SOURCE)
train_target = load_data(target_ids, args.TARGET)
assert len(train_source) == len(train_target)
train_data = [
(s, t)
for s, t in six.moves.zip(train_source, train_target)
if (args.min_source_sentence <= len(s) <= args.max_source_sentence
and
args.min_target_sentence <= len(t) <= args.max_target_sentence)
]
print('[{}] Dataset loaded.'.format(datetime.datetime.now()))
if not args.use_dataset_api:
# Skip printing statistics when using TextDataset API, as it is slow.
train_source_unknown = calculate_unknown_ratio(
[s for s, _ in train_data])
train_target_unknown = calculate_unknown_ratio(
[t for _, t in train_data])
print('Source vocabulary size: %d' % len(source_ids))
print('Target vocabulary size: %d' % len(target_ids))
print('Train data size: %d' % len(train_data))
print('Train source unknown ratio: %.2f%%' % (
train_source_unknown * 100))
print('Train target unknown ratio: %.2f%%' % (
train_target_unknown * 100))
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
# Set the current device
device.use()
# Setup model
model = Seq2seq(args.layer, len(source_ids), len(target_ids), args.unit)
model.to_device(device)
# Setup optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Setup iterator
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
# Setup updater and trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, converter=convert, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.LogReport(
trigger=(args.log_interval, 'iteration')))
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'main/loss', 'main/perp',
'validation/main/bleu', 'elapsed_time']),
trigger=(args.log_interval, 'iteration'))
trainer.extend(
extensions.snapshot(filename='snapshot_epoch_{.updater.iteration}'),
trigger=(args.validation_interval, 'iteration'))
if args.validation_source and args.validation_target:
test_source = load_data(source_ids, args.validation_source)
test_target = load_data(target_ids, args.validation_target)
assert len(test_source) == len(test_target)
test_data = list(six.moves.zip(test_source, test_target))
test_data = [(s, t) for s, t in test_data if 0 < len(s) and 0 < len(t)]
test_source_unknown = calculate_unknown_ratio(
[s for s, _ in test_data])
test_target_unknown = calculate_unknown_ratio(
[t for _, t in test_data])
print('Validation data: %d' % len(test_data))
print('Validation source unknown ratio: %.2f%%' %
(test_source_unknown * 100))
print('Validation target unknown ratio: %.2f%%' %
(test_target_unknown * 100))
@chainer.training.make_extension()
def translate(trainer):
source, target = test_data[numpy.random.choice(len(test_data))]
result = model.translate([model.xp.array(source)])[0]
source_sentence = ' '.join([source_words[x] for x in source])
target_sentence = ' '.join([target_words[y] for y in target])
result_sentence = ' '.join([target_words[y] for y in result])
print('# source : ' + source_sentence)
print('# result : ' + result_sentence)
print('# expect : ' + target_sentence)
trainer.extend(
translate, trigger=(args.validation_interval, 'iteration'))
trainer.extend(
CalculateBleu(
model, test_data, 'validation/main/bleu', device),
trigger=(args.validation_interval, 'iteration'))
if args.resume is not None:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
print('start training')
trainer.run()
if args.save is not None:
# Save a snapshot
chainer.serializers.save_npz(args.save, trainer)
if __name__ == '__main__':
main()
| 15,768
| 37.089372
| 79
|
py
|
chainer
|
chainer-master/examples/seq2seq/wmt_preprocess.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
import argparse
import collections
import io
import re
import progressbar
split_pattern = re.compile(r'([.,!?"\':;)(])')
digit_pattern = re.compile(r'\d')
def split_sentence(s, use_lower):
if use_lower:
s = s.lower()
s = s.replace('\u2019', '\'')
s = digit_pattern.sub('0', s)
words = []
for word in s.strip().split():
words.extend(split_pattern.split(word))
words = [w for w in words if w]
return words
def count_lines(path):
with io.open(path, encoding='utf-8', errors='ignore') as f:
return sum([1 for _ in f])
def read_file(path, use_lower):
n_lines = count_lines(path)
bar = progressbar.ProgressBar()
with io.open(path, encoding='utf-8', errors='ignore') as f:
for line in bar(f, max_value=n_lines):
words = split_sentence(line, use_lower)
yield words
def proc_dataset(
path, outpath, vocab_path=None, vocab_size=None, use_lower=False):
token_count = 0
counts = collections.Counter()
with io.open(outpath, 'w', encoding='utf-8') as f:
for words in read_file(path, use_lower):
line = ' '.join(words)
f.write(line)
f.write('\n')
if vocab_path:
for word in words:
counts[word] += 1
token_count += len(words)
print('number of tokens: %d' % token_count)
if vocab_path and vocab_size:
vocab = [word for (word, _) in counts.most_common(vocab_size)]
with io.open(vocab_path, 'w', encoding='utf-8') as f:
for word in vocab:
f.write(word)
f.write('\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'INPUT', help='input sentence data')
parser.add_argument(
'OUTPUT', help='output sentence data')
parser.add_argument(
'--vocab-file', help='vocabulary file to save')
parser.add_argument(
'--vocab-size', type=int, default=40000,
help='size of vocabulary file')
parser.add_argument(
'--lower', action='store_true', help='use lower case')
args = parser.parse_args()
proc_dataset(
args.INPUT, args.OUTPUT, vocab_path=args.vocab_file,
vocab_size=args.vocab_size, use_lower=args.lower)
if __name__ == '__main__':
main()
| 2,403
| 26.318182
| 74
|
py
|
chainer
|
chainer-master/examples/image_captioning/download.py
|
#!/usr/bin/env python
import argparse
import os
import zipfile
import progressbar
from six.moves.urllib import request
"""Download the MSCOCO dataset (images and captions)."""
urls = [
'http://images.cocodataset.org/zips/train2014.zip',
'http://images.cocodataset.org/zips/val2014.zip',
'http://images.cocodataset.org/annotations/annotations_trainval2014.zip'
]
def download(url, dst_file_path):
# Download a file, showing progress
bar_wrap = [None]
def reporthook(count, block_size, total_size):
bar = bar_wrap[0]
if bar is None:
bar = progressbar.ProgressBar(
maxval=total_size,
widgets=[
progressbar.Percentage(),
' ',
progressbar.Bar(),
' ',
progressbar.FileTransferSpeed(),
' | ',
progressbar.ETA(),
])
bar.start()
bar_wrap[0] = bar
bar.update(min(count * block_size, total_size))
request.urlretrieve(url, dst_file_path, reporthook=reporthook)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str, default='data',
help='Target MSOCO dataset root directory')
args = parser.parse_args()
try:
os.makedirs(args.out)
except OSError:
raise OSError(
'\'{}\' already exists, delete it and try again'.format(args.out))
for url in urls:
print('Downloading {}...'.format(url))
# Download the zip file
file_name = os.path.basename(url)
dst_file_path = os.path.join(args.out, file_name)
download(url, dst_file_path)
# Unzip the file
zf = zipfile.ZipFile(dst_file_path)
for name in zf.namelist():
dirname, filename = os.path.split(name)
if not filename == '':
zf.extract(name, args.out)
# Remove the zip file since it has been extracted
os.remove(dst_file_path)
| 2,087
| 27.60274
| 78
|
py
|
chainer
|
chainer-master/examples/image_captioning/model.py
|
import numpy as np
import chainer
from chainer import functions as F
from chainer import initializers
from chainer import links as L
from chainer import reporter
from chainer import Variable
class ImageCaptionModel(chainer.Chain):
"""Image captioning model."""
def __init__(self, vocab_size, hidden_size=512, rnn='lstm',
dropout_ratio=0.5, finetune_feat_extractor=False,
ignore_label=-1):
super(ImageCaptionModel, self).__init__()
if rnn == 'lstm':
LanguageModel = LSTMLanguageModel
elif rnn == 'nsteplstm':
LanguageModel = NStepLSTMLanguageModel
else:
raise ValueError('Invalid RNN type.')
with self.init_scope():
self.feat_extractor = VGG16FeatureExtractor()
self.lang_model = LanguageModel(
vocab_size, hidden_size, dropout_ratio=dropout_ratio,
ignore_label=ignore_label)
self.finetune_feat_extractor = finetune_feat_extractor
def prepare(self, img):
"""Single image to resized and normalized image."""
return self.feat_extractor.prepare(img)
def forward(self, imgs, captions):
"""Batch of images to a single loss."""
imgs = Variable(imgs)
if self.finetune_feat_extractor:
img_feats = self.feat_extractor(imgs)
else:
# Extract features with the `train` configuration set to `False` in
# order to basically skip the dropout regularizations. This is how
# dropout is used during standard inference. Also, since we are not
# going to optimize the feature extractor, we explicitly set the
# backpropgation mode to not construct any computational graphs.
with chainer.using_config('train', False), \
chainer.no_backprop_mode():
img_feats = self.feat_extractor(imgs)
loss = self.lang_model(img_feats, captions)
# Report the loss so that it can be printed, logged and plotted by
# other trainer extensions
reporter.report({'loss': loss}, self)
return loss
def predict(self, imgs, bos, eos, max_caption_length):
"""Batch of images to captions."""
imgs = Variable(imgs)
img_feats = self.feat_extractor(imgs)
captions = self.lang_model.predict(
img_feats, bos=bos, eos=eos, max_caption_length=max_caption_length)
return captions
class VGG16FeatureExtractor(chainer.Chain):
"""VGG16 image feature extractor."""
def __init__(self):
super(VGG16FeatureExtractor, self).__init__()
with self.init_scope():
self.cnn = L.VGG16Layers()
self.cnn_layer_name = 'fc7'
def prepare(self, img):
"""Single image to resized and normalized image.
The inputs image should have the shape (channel, height, width) where
channels are aligned RGB. The returned image has the same shape but
channels in BGR order.
"""
return L.model.vision.vgg.prepare(img)
def forward(self, imgs):
"""Batch of images to image features."""
img_feats = self.cnn(imgs, [self.cnn_layer_name])[self.cnn_layer_name]
return img_feats
class LSTMLanguageModel(chainer.Chain):
"""Recurrent LSTM language model.
Generate captions given features extracted from images.
"""
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label):
super(LSTMLanguageModel, self).__init__()
with self.init_scope():
self.embed_word = L.EmbedID(
vocab_size,
hidden_size,
initialW=initializers.Normal(1.0),
ignore_label=ignore_label
)
self.embed_img = L.Linear(
hidden_size,
initialW=initializers.Normal(0.01)
)
self.lstm = L.LSTM(hidden_size, hidden_size)
self.out_word = L.Linear(
hidden_size,
vocab_size,
initialW=initializers.Normal(0.01)
)
self.dropout_ratio = dropout_ratio
def forward(self, img_feats, captions):
"""Batch of image features and image captions to a singe loss.
Compute the softmax cross-entropy captioning loss.
"""
self.reset(img_feats)
loss = 0
size = 0
caption_length = captions.shape[1]
for i in range(caption_length - 1):
# Compute the loss based on the prediction of the next token in the
# sequence
x = Variable(self.xp.asarray(captions[:, i]))
t = Variable(self.xp.asarray(captions[:, i + 1]))
if (t.array == self.embed_word.ignore_label).all():
# Preprocessed captions are padded to reach a maximum length.
# Often, you want to set the `ignore_label` to this padding.
# If all targets are simply paddings, predictions are no longer
# required.
break
y = self.step(x)
loss += F.softmax_cross_entropy(
y, t, ignore_label=self.embed_word.ignore_label)
size += 1
return loss / max(size, 1)
def predict(self, img_feats, bos, eos, max_caption_length):
"""Batch of image features to captions."""
self.reset(img_feats)
captions = self.xp.full((img_feats.shape[0], 1), bos, dtype=np.int32)
for _ in range(max_caption_length):
x = Variable(captions[:, -1]) # Previous word token as input
y = self.step(x)
pred = y.array.argmax(axis=1).astype(np.int32)
captions = self.xp.hstack((captions, pred[:, None]))
if (pred == eos).all():
break
return captions
def reset(self, img_feats):
"""Batch of image features to hidden representations.
Also, reset and then update the internal state of the LSTM.
"""
self.lstm.reset_state()
h = self.embed_img(img_feats)
h = self.lstm(F.dropout(h, ratio=self.dropout_ratio))
return h
def step(self, x):
"""Batch of word tokens to word tokens.
Predict the next set of tokens given previous tokens.
"""
h = self.embed_word(x)
h = self.lstm(F.dropout(h, ratio=self.dropout_ratio))
h = self.out_word(F.dropout(h, ratio=self.dropout_ratio))
return h
class NStepLSTMLanguageModel(chainer.Chain):
"""Recurrent NStepLSTM language model.
Generate captions given features extracted from images.
"""
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label):
super(NStepLSTMLanguageModel, self).__init__()
with self.init_scope():
self.embed_word = L.EmbedID(
vocab_size,
hidden_size,
initialW=initializers.Normal(1.0),
ignore_label=ignore_label
)
self.embed_img = L.Linear(
hidden_size,
initialW=initializers.Normal(0.01)
)
self.lstm = L.NStepLSTM(1, hidden_size, hidden_size, dropout_ratio)
self.decode_caption = L.Linear(
hidden_size,
vocab_size,
initialW=initializers.Normal(0.01)
)
self.dropout_ratio = dropout_ratio
def forward(self, img_feats, captions):
"""Batch of image features and image captions to a singe loss.
Compute the softmax cross-entropy captioning loss in a single pass
without iterating over the sequences.
"""
hx, cx, _ = self.reset(img_feats)
# Extract all inputs and targets for all captions in the batch
xs = [c[:-1] for c in captions] # del eos
ts = [c[1:] for c in captions] # del bos
# Get the predictions `ys`
_, _, ys = self.step(hx, cx, xs)
# Since `ys` is concatenated, we also concatenate the target tokens
# before computing the loss
ts = F.concat(ts, axis=0)
loss = F.softmax_cross_entropy(ys, ts)
return loss
def predict(self, img_feats, bos, eos, max_caption_length):
"""Batch of image features to captions."""
hx, cx, _ = self.reset(img_feats)
with chainer.using_device(self.device):
xp = self.xp
captions = xp.full(
(img_feats.shape[0], 1), bos, dtype=np.int32)
for i in range(max_caption_length):
# Create a list of the previous tokens to treat as inputs
xs = [xp.atleast_1d(c[-1]) for c in captions]
# Get the predictions `ys`
hx, cx, ys = self.step(hx, cx, xs)
# From `ys`, get the indices for the highest confidence.
# These indices correspond to the predicted tokens
#
# Note that this is a greedy approach and that it can by
# replaced by e.g. beam search
pred = ys.array.argmax(axis=1).astype(np.int32)
captions = xp.hstack((captions, pred[:, None]))
if (pred == eos).all():
break
return captions
def reset(self, img_feats):
"""Batch of image features to LSTM states and hidden representations.
"""
h = self.embed_img(img_feats)
h = F.split_axis(h, h.shape[0], axis=0)
hx, cx, ys = self.lstm(None, None, h)
return hx, cx, ys
def step(self, hx, cx, xs):
"""Batch of word tokens to word tokens and hidden LSTM states.
Predict the next set of tokens given previous tokens.
"""
# Concatenate all input captions and pass them through the model in a
# single pass
caption_lens = [len(x) for x in xs]
caption_sections = np.cumsum(caption_lens[:-1])
xs = F.concat(xs, axis=0)
xs = self.embed_word(xs)
xs = F.split_axis(xs, caption_sections, axis=0)
hx, cx, ys = self.lstm(hx, cx, xs)
ys = F.concat(ys, axis=0)
ys = F.dropout(ys, self.dropout_ratio)
ys = self.decode_caption(ys)
return hx, cx, ys
| 10,308
| 34.304795
| 79
|
py
|
chainer
|
chainer-master/examples/image_captioning/datasets.py
|
from collections import defaultdict
import os
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
from chainer import dataset
from chainer.dataset.convert import to_device
# Vocabulary tokens of BOS (beginning of sentence), EOS (end of sentence),
# UNK (unknown word) and token labels to be ignored in the loss computation by
# the LSTM layers
_bos = 0
_eos = 1
_unk = 2
_ignore = -1
def split(sentence):
return sentence.lower().replace('.', ' .').replace(',', ' ,').split()
class MsCocoDataset(dataset.DatasetMixin):
"""Wraps the MSCOCO datasets and is used by the iterator to fetch data."""
def __init__(self, root_dir, data_dir, anno_file):
coco = COCO(os.path.join(root_dir, anno_file))
anns = coco.loadAnns(coco.getAnnIds())
self.coco = coco
self.anns = anns
self.vocab = None # Later set from outside
self.coco_root = root_dir
self.coco_data = data_dir
def __len__(self):
return len(self.anns)
def get_example(self, i):
"""Called by the iterator to fetch a data sample.
A data sample from MSCOCO consists of an image and its corresponding
caption.
The returned image has the shape (channel, height, width).
"""
ann = self.anns[i]
# Load the image
img_id = ann['image_id']
img_file_name = self.coco.loadImgs([img_id])[0]['file_name']
img = Image.open(
os.path.join(self.coco_root, self.coco_data, img_file_name))
if img.mode == 'RGB':
img = np.asarray(img, np.float32).transpose(2, 0, 1)
elif img.mode == 'L':
img = np.asarray(img, np.float32)
img = np.broadcast_to(img, (3,) + img.shape)
else:
raise ValueError('Invalid image mode {}'.format(img.mode))
# Load the caption, i.e. sequence of tokens
tokens = [self.vocab.get(w, _unk) for w in
['<bos>'] + split(ann['caption']) + ['<eos>']]
tokens = np.array(tokens, np.int32)
return img, tokens
def get_mscoco(
root_dir,
train_dir='train2014',
train_anno='annotations/captions_train2014.json',
val_dir='val2014',
val_anno='annotations/captions_val2014.json',
unk_threshold=5):
"""Return the training and validation datasets for MSCOCO.
The datasets can be used by the iterator during training.
A vocabulary is dynamically created based on all captions and is
returned as members of the training and validation dataset objects.
"""
train = MsCocoDataset(root_dir, train_dir, train_anno)
val = MsCocoDataset(root_dir, val_dir, val_anno)
# Create a vocabulary based on the captions from the training set only
# (excluding the validation sets). This is common practice.
captions = [ann['caption'] for ann in train.anns]
# Filter out rare words as UNK
word_counts = defaultdict(int)
for c in captions:
for w in split(c):
word_counts[w] += 1
# This vocabulary is needed in order to convert the words in the captions
# to integer tokens. When generating captions during testing, these tokens
# are mapped back to their corresponding words. Note that this vocabulary
# sorted alphanumerically.
vocab = {'<bos>': _bos, '<eos>': _eos, '<unk>': _unk}
for w, count in sorted(word_counts.items()):
if w not in vocab and count >= unk_threshold:
vocab[w] = len(vocab)
train.vocab = vocab
val.vocab = vocab
return train, val
def converter(batch, device, max_caption_length=None):
"""Optional preprocessing of the batch before forward pass."""
pad = max_caption_length is not None
imgs = []
captions = []
for img, caption in batch:
# Preproess the caption by either fixing the length by padding (LSTM)
# or by simply wrapping each caption in an ndarray (NStepLSTM)
if pad:
arr = np.full(max_caption_length, _ignore, dtype=np.int32)
# Clip to max length if necessary
arr[:len(caption)] = caption[:max_caption_length]
caption = arr
else:
caption = to_device(device, np.asarray(caption, dtype=np.int32))
imgs.append(img)
captions.append(caption)
if pad:
captions = to_device(device, np.stack(captions))
imgs = to_device(device, np.stack(imgs))
return imgs, captions
| 4,479
| 30.77305
| 78
|
py
|
chainer
|
chainer-master/examples/image_captioning/predict.py
|
#!/usr/bin/env python
import argparse
import glob
import os
import sys
import numpy as np
from PIL import Image
import chainer
from chainer import serializers
import chainerx
import datasets
from model import ImageCaptionModel
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--img', type=str,
help='Image path')
parser.add_argument('--img-dir', type=str,
help='Image directory path, instead of a single image')
parser.add_argument('--model', type=str, default='result/model_1000',
help='Trained model path')
parser.add_argument('--mscoco-root', type=str, default='data',
help='MSOCO dataset root directory')
parser.add_argument('--rnn', type=str, default='nsteplstm',
choices=['nsteplstm', 'lstm'],
help='Language model layer type')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--max-caption-length', type=int, default=30,
help='Maximum caption length generated')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
if device.xp is chainerx:
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
print('Device: {}'.format(device))
print()
# Load the dataset to obtain the vocabulary, which is needed to convert
# predicted tokens into actual words
train, _ = datasets.get_mscoco(args.mscoco_root)
vocab = train.vocab
ivocab = {v: k for k, v in vocab.items()}
model = ImageCaptionModel(len(train.vocab), rnn=args.rnn)
serializers.load_npz(args.model, model)
model.to_device(device)
if args.img_dir: # Read all images in directory
img_paths = [
i for i in glob.glob(os.path.join(args.img_dir, '*')) if
i.endswith(('png', 'jpg'))]
img_paths = sorted(img_paths)
else: # Load a single image
img_paths = [args.img]
if not img_paths:
raise IOError('No images found for the given path')
imgs = []
for img_path in img_paths:
img = Image.open(img_path)
img = model.prepare(img)
imgs.append(img)
imgs = np.asarray(imgs)
imgs = device.send(imgs)
bos = vocab['<bos>']
eos = vocab['<eos>']
with chainer.using_config('train', False), \
chainer.no_backprop_mode():
captions = model.predict(
imgs, bos=bos, eos=eos, max_caption_length=args.max_caption_length)
captions = chainer.get_device('@numpy').send(captions)
# Print the predicted captions
file_names = [os.path.basename(path) for path in img_paths]
max_length = max(len(name) for name in file_names)
for file_name, caption in zip(file_names, captions):
caption = ' '.join(ivocab[token] for token in caption)
caption = caption.replace('<bos>', '').replace('<eos>', '').strip()
print(('{0:' + str(max_length) + '} {1}').format(file_name, caption))
if __name__ == '__main__':
main()
| 3,640
| 35.049505
| 79
|
py
|
chainer
|
chainer-master/examples/image_captioning/train.py
|
#!/usr/bin/env python
import argparse
import chainer
from chainer.datasets import TransformDataset
from chainer import iterators
from chainer import optimizers
from chainer import training
from chainer.training import extensions
import datasets
from model import ImageCaptionModel
import matplotlib
matplotlib.use('Agg')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--out', type=str, default='result',
help='Output directory')
parser.add_argument('--resume', '-r', type=str,
help='Resume the training from snapshot')
parser.add_argument('--mscoco-root', type=str, default='data',
help='MSOCO dataset root directory')
parser.add_argument('--max-iters', type=int, default=50000,
help='Maximum number of iterations to train')
parser.add_argument('--batch-size', type=int, default=128,
help='Minibatch size')
parser.add_argument('--dropout-ratio', type=float, default=0.5,
help='Language model dropout ratio')
parser.add_argument('--val-keep-quantity', type=int, default=100,
help='Keep every N-th validation image')
parser.add_argument('--val-iter', type=int, default=100,
help='Run validation every N-th iteration')
parser.add_argument('--log-iter', type=int, default=1,
help='Log every N-th iteration')
parser.add_argument('--snapshot-iter', type=int, default=1000,
help='Model snapshot every N-th iteration')
parser.add_argument('--rnn', type=str, default='nsteplstm',
choices=['nsteplstm', 'lstm'],
help='Language model layer type')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--max-caption-length', type=int, default=30,
help='Maxium caption length when using LSTM layer')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print()
# Load the MSCOCO dataset. Assumes that the dataset has been downloaded
# already using e.g. the `download.py` script
train, val = datasets.get_mscoco(args.mscoco_root)
# Validation samples are used to address overfitting and see how well your
# model generalizes to yet unseen data. However, since the number of these
# samples in MSCOCO is quite large (~200k) and thus require time to
# evaluate, you may choose to use only a fraction of the available samples
val = val[::args.val_keep_quantity]
# Number of unique words that are found in the dataset
vocab_size = len(train.vocab)
# Instantiate the model to be trained either with LSTM layers or with
# NStepLSTM layers
model = ImageCaptionModel(
vocab_size, dropout_ratio=args.dropout_ratio, rnn=args.rnn)
model.to_device(device)
def transform(in_data):
# Called for each sample and applies necessary preprocessing to the
# image such as resizing and normalizing
img, caption = in_data
img = model.prepare(img)
return img, caption
# We need to preprocess the images since their sizes may vary (and the
# model requires that they have the exact same fixed size)
train = TransformDataset(train, transform)
val = TransformDataset(val, transform)
train_iter = iterators.MultiprocessIterator(
train, args.batch_size, shared_mem=700000)
val_iter = chainer.iterators.MultiprocessIterator(
val, args.batch_size, repeat=False, shuffle=False, shared_mem=700000)
optimizer = optimizers.Adam()
optimizer.setup(model)
@chainer.dataset.converter()
def converter(batch, device):
# The converted receives a batch of input samples any may modify it if
# necessary. In our case, we need to align the captions depending on if
# we are using LSTM layers of NStepLSTM layers in the model.
if args.rnn == 'lstm':
max_caption_length = args.max_caption_length
elif args.rnn == 'nsteplstm':
max_caption_length = None
else:
raise ValueError('Invalid RNN type.')
return datasets.converter(
batch, device, max_caption_length=max_caption_length)
updater = training.updater.StandardUpdater(
train_iter, optimizer=optimizer, device=device, converter=converter)
trainer = training.Trainer(
updater, out=args.out, stop_trigger=(args.max_iters, 'iteration'))
trainer.extend(
extensions.Evaluator(
val_iter,
target=model,
converter=converter,
device=device,
),
trigger=(args.val_iter, 'iteration')
)
trainer.extend(
extensions.LogReport(
['main/loss', 'validation/main/loss'],
trigger=(args.log_iter, 'iteration')
)
)
trainer.extend(
extensions.PlotReport(
['main/loss', 'validation/main/loss'],
trigger=(args.log_iter, 'iteration')
)
)
trainer.extend(
extensions.PrintReport(
['elapsed_time', 'epoch', 'iteration', 'main/loss',
'validation/main/loss']
),
trigger=(args.log_iter, 'iteration')
)
# Save model snapshots so that later on, we can load them and generate new
# captions for any image. This can be done in the `predict.py` script
trainer.extend(
extensions.snapshot(filename='snapshot_{.updater.iteration}'),
trigger=(args.snapshot_iter, 'iteration')
)
trainer.extend(
extensions.snapshot_object(model, 'model_{.updater.iteration}'),
trigger=(args.snapshot_iter, 'iteration')
)
trainer.extend(extensions.ProgressBar())
if args.resume is not None:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
if __name__ == '__main__':
main()
| 6,564
| 38.311377
| 79
|
py
|
chainer
|
chainer-master/examples/modelzoo/download_model.py
|
#!/usr/bin/env python
import argparse
import zipfile
import six
parser = argparse.ArgumentParser(
description='Download a Caffe reference model')
parser.add_argument('model_type',
choices=('alexnet', 'caffenet', 'googlenet', 'resnet'),
help='Model type (alexnet, caffenet, googlenet)')
args = parser.parse_args()
if args.model_type == 'alexnet':
url = 'http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel'
name = 'bvlc_alexnet.caffemodel'
elif args.model_type == 'caffenet':
url = 'http://dl.caffe.berkeleyvision.org/' \
'bvlc_reference_caffenet.caffemodel'
name = 'bvlc_reference_caffenet.caffemodel'
elif args.model_type == 'googlenet':
url = 'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel'
name = 'bvlc_googlenet.caffemodel'
elif args.model_type == 'resnet':
raise RuntimeError('The resnet model file cannot be downloaded '
'automatically. Please download manually: '
'https://github.com/KaimingHe/deep-residual-networks'
'#models')
else:
raise RuntimeError('Invalid model type. Choose from '
'alexnet, caffenet and googlenet.')
print('Downloading model file...')
six.moves.urllib.request.urlretrieve(url, name)
if args.model_type == 'resnet':
with zipfile.ZipFile(name, 'r') as zf:
zf.extractall('.')
print('Done')
| 1,438
| 33.261905
| 76
|
py
|
chainer
|
chainer-master/examples/modelzoo/download_mean_file.py
|
#!/usr/bin/env python
import six
print('Downloading ILSVRC12 mean file for NumPy...')
six.moves.urllib.request.urlretrieve(
'https://github.com/BVLC/caffe/raw/master/python/caffe/imagenet/'
'ilsvrc_2012_mean.npy',
'ilsvrc_2012_mean.npy')
print('Done')
| 266
| 23.272727
| 69
|
py
|
chainer
|
chainer-master/examples/modelzoo/evaluate_caffe_net.py
|
#!/usr/bin/env python
"""Example code of evaluating a Caffe reference model for ILSVRC2012 task.
Prerequisite: To run this example, crop the center of ILSVRC2012 validation
images and scale them to 256x256, and make a list of space-separated CSV each
column of which contains a full path to an image at the fist column and a zero-
origin label at the second column (this format is same as that used by Caffe's
ImageDataLayer).
"""
import argparse
import os
import sys
import numpy as np
from PIL import Image
import chainer
import chainer.functions as F
from chainer.links import caffe
parser = argparse.ArgumentParser(
description='Evaluate a Caffe reference model on ILSVRC2012 dataset')
parser.add_argument('dataset', help='Path to validation image-label list file')
parser.add_argument('model_type',
choices=('alexnet', 'caffenet', 'googlenet', 'resnet'),
help='Model type (alexnet, caffenet, googlenet, resnet)')
parser.add_argument('model', help='Path to the pretrained Caffe model')
parser.add_argument('--basepath', '-b', default='/',
help='Base path for images in the dataset')
parser.add_argument('--mean', '-m', default='ilsvrc_2012_mean.npy',
help='Path to the mean file')
parser.add_argument('--batchsize', '-B', type=int, default=100,
help='Minibatch size')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.set_defaults(test=False)
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=-1,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
xp = device.xp
assert args.batchsize > 0
chainer.config.train = False # All the codes will run in test mode
dataset = []
with open(args.dataset) as list_file:
for line in list_file:
pair = line.strip().split()
path = os.path.join(args.basepath, pair[0])
dataset.append((path, np.int32(pair[1])))
assert len(dataset) % args.batchsize == 0
print('Loading Caffe model file %s...' % args.model)
func = caffe.CaffeFunction(args.model)
print('Loaded')
func.to_device(device)
if args.model_type == 'alexnet' or args.model_type == 'caffenet':
in_size = 227
mean_image = np.load(args.mean)
def forward(x, t):
y, = func(inputs={'data': x}, outputs=['fc8'])
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
elif args.model_type == 'googlenet':
in_size = 224
# Constant mean over spatial pixels
mean_image = np.ndarray((3, 256, 256), dtype=np.float32)
mean_image[0] = 104
mean_image[1] = 117
mean_image[2] = 123
def forward(x, t):
y, = func(inputs={'data': x}, outputs=['loss3/classifier'],
disable=['loss1/ave_pool', 'loss2/ave_pool'])
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
elif args.model_type == 'resnet':
in_size = 224
mean_image = np.load(args.mean)
def forward(x, t):
y, = func(inputs={'data': x}, outputs=['prob'])
return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
cropwidth = 256 - in_size
start = cropwidth // 2
stop = start + in_size
mean_image = mean_image[:, start:stop, start:stop].copy()
x_batch = np.ndarray((args.batchsize, 3, in_size, in_size), dtype=np.float32)
y_batch = np.ndarray((args.batchsize,), dtype=np.int32)
i = 0
count = 0
accum_loss = 0
accum_accuracy = 0
for path, label in dataset:
image = np.asarray(Image.open(path)).transpose(2, 0, 1)[::-1]
image = image[:, start:stop, start:stop].astype(np.float32)
image -= mean_image
x_batch[i] = image
y_batch[i] = label
i += 1
if i == args.batchsize:
x = xp.asarray(x_batch)
y = xp.asarray(y_batch)
with chainer.no_backprop_mode():
loss, accuracy = forward(x, y)
accum_loss += float(loss.array) * args.batchsize
accum_accuracy += float(accuracy.array) * args.batchsize
del x, y, loss, accuracy
count += args.batchsize
sys.stdout.write('{} / {}\r'.format(count, len(dataset)))
sys.stdout.flush()
i = 0
print('mean loss: {}'.format(accum_loss / count))
print('mean accuracy: {}'.format(accum_accuracy / count))
| 4,652
| 31.767606
| 79
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/ptb/train_ptb_custom_loop.py
|
"""Recurrent neural network language model with static graph optimizations.
This is a modified version of the standard Chainer Penn Tree Bank (ptb)
example that
includes static subgraph optimizations. It is mostly unchanged
from the original model except that that the RNN is unrolled for `bproplen`
slices inside of a static chain.
This was required because the `LSTM` link used by the ptb example
is not fully compatible with the static subgraph
optimizations feature. Specifically, it does not support
multiple calls in the same iteration unless it is called from
inside a single static chain.
This code is ported from the following implementation written in Torch.
https://github.com/tomsercu/lstm
This code is a custom loop version of train_ptb.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
from __future__ import print_function
import argparse
import numpy as np
import random
import sys
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.functions as F
from chainer.functions.loss import softmax_cross_entropy
import chainer.links as L
from chainer import serializers
from chainer import static_graph
import chainerx
# Definition of a recurrent net for language modeling
class RNNForLMSlice(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RNNForLMSlice, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l1 = L.LSTM(n_units, n_units)
self.l2 = L.LSTM(n_units, n_units)
self.l3 = L.Linear(n_units, n_vocab)
for param in self.params():
param.array[...] = np.random.uniform(-0.1, 0.1, param.shape)
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
def __call__(self, x):
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0))
h2 = self.l2(F.dropout(h1))
y = self.l3(F.dropout(h2))
return y
class RNNForLMUnrolled(chainer.Chain):
def __init__(self, n_vocab, n_units):
super(RNNForLMUnrolled, self).__init__()
with self.init_scope():
self.rnn = RNNForLMSlice(n_vocab, n_units)
@static_graph(verbosity_level=1)
def __call__(self, words):
"""Perform a forward pass on the supplied list of words.
The RNN is unrolled for a number of time slices equal to the
length of the supplied word sequence.
Args:
words_labels (list of Variable): The list of input words to the
unrolled neural network.
Returns the corresponding lest of output variables of the same
length as the input sequence.
"""
outputs = []
for ind in range(len(words)):
word = words[ind]
y = self.rnn(word)
outputs.append(y)
return outputs
# Dataset iterator to create a batch of sequences at different positions.
# This iterator returns a pair of current words and the next words. Each
# example is a part of sequences starting from the different offsets
# equally spaced within the whole sequence.
class ParallelSequentialIterator(chainer.dataset.Iterator):
def __init__(self, dataset, batch_size, repeat=True):
self.dataset = dataset
self.batch_size = batch_size # batch size
# Number of completed sweeps over the dataset. In this case, it is
# incremented if every word is visited at least once after the last
# increment.
self.epoch = 0
# True if the epoch is incremented at the last iteration.
self.is_new_epoch = False
self.repeat = repeat
length = len(dataset)
# Offsets maintain the position of each sequence in the mini-batch.
self.offsets = [i * length // batch_size for i in range(batch_size)]
# NOTE: this is not a count of parameter updates. It is just a count of
# calls of ``__next__``.
self.iteration = 0
# use -1 instead of None internally
self._previous_epoch_detail = -1.
def __next__(self):
# This iterator returns a list representing a mini-batch. Each item
# indicates a different position in the original sequence. Each item is
# represented by a pair of two word IDs. The first word is at the
# "current" position, while the second word at the next position.
# At each iteration, the iteration count is incremented, which pushes
# forward the "current" position.
length = len(self.dataset)
if not self.repeat and self.iteration * self.batch_size >= length:
# If not self.repeat, this iterator stops at the end of the first
# epoch (i.e., when all words are visited once).
raise StopIteration
cur_words = self.get_words()
self._previous_epoch_detail = self.epoch_detail
self.iteration += 1
next_words = self.get_words()
epoch = self.iteration * self.batch_size // length
self.is_new_epoch = self.epoch < epoch
if self.is_new_epoch:
self.epoch = epoch
return list(zip(cur_words, next_words))
@property
def epoch_detail(self):
# Floating point version of epoch.
return self.iteration * self.batch_size / len(self.dataset)
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def get_words(self):
# It returns a list of current words.
return [self.dataset[(offset + self.iteration) % len(self.dataset)]
for offset in self.offsets]
def serialize(self, serializer):
# It is important to serialize the state to be recovered on resume.
self.iteration = serializer('iteration', self.iteration)
self.epoch = serializer('epoch', self.epoch)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / len(self.dataset)
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def main():
np.random.seed(0)
random.seed(1)
parser = argparse.ArgumentParser()
parser.add_argument('--batchsize', '-b', type=int, default=20,
help='Number of examples in each mini-batch')
parser.add_argument('--bproplen', '-l', type=int, default=25,
help='Number of words in each mini-batch '
'(= length of truncated BPTT)')
parser.add_argument('--epoch', '-e', type=int, default=39,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='0',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--gradclip', '-c', type=float, default=5,
help='Gradient norm threshold to clip')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.set_defaults(test=False)
parser.add_argument('--unit', '-u', type=int, default=650,
help='Number of LSTM units in each layer')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
if device.xp is chainerx:
sys.stderr.write('This example does not support ChainerX devices.\n')
sys.exit(1)
device.use()
def evaluate(model, iter):
# Evaluation routine to be used for validation and test.
evaluator = model.copy() # to use different state
evaluator.rnn.reset_state() # initialize state
sum_perp = 0
data_count = 0
words = []
labels = []
lossfun = softmax_cross_entropy.softmax_cross_entropy
with configuration.using_config('train', False):
iter.reset()
for batch in iter:
word, label = convert.concat_examples(batch, device)
words.append(word)
labels.append(label)
data_count += 1
outputs = evaluator(words)
for ind in range(len(outputs)):
y = outputs[ind]
label = labels[ind]
loss = lossfun(y, label)
sum_perp += loss.array
return np.exp(float(sum_perp) / data_count)
# Load the Penn Tree Bank long word sequence dataset
train, val, test = chainer.datasets.get_ptb_words()
n_vocab = max(train) + 1 # train is just an array of integers
print('#vocab =', n_vocab)
if args.test:
train = train[:100]
val = val[:100]
test = test[:100]
# Create the dataset iterators
train_iter = ParallelSequentialIterator(train, args.batchsize)
val_iter = ParallelSequentialIterator(val, 1, repeat=False)
test_iter = ParallelSequentialIterator(test, 1, repeat=False)
# Prepare an RNNLM model
model = RNNForLMUnrolled(n_vocab, args.unit)
lossfun = softmax_cross_entropy.softmax_cross_entropy
model.to_device(device)
# Set up an optimizer
optimizer = chainer.optimizers.SGD(lr=1.0)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(args.gradclip))
sum_perp = 0
count = 0
iteration = 0
while train_iter.epoch < args.epoch:
iteration += 1
words = []
labels = []
# Progress the dataset iterator for bprop_len words at each iteration.
for i in range(args.bproplen):
# Get the next batch (a list of tuples of two word IDs)
batch = train_iter.__next__()
# Concatenate the word IDs to matrices and send them to the device
# self.converter does this job
# (it is chainer.dataset.concat_examples by default)
word, label = convert.concat_examples(batch, device)
words.append(word)
labels.append(label)
count += 1
outputs = model(words)
loss = 0
for ind in range(len(outputs)):
y = outputs[ind]
label = labels[ind]
loss += lossfun(y, label)
sum_perp += loss.array
optimizer.target.cleargrads() # Clear the parameter gradients
loss.backward() # Backprop
loss.unchain_backward() # Truncate the graph
optimizer.update() # Update the parameters
if iteration % 20 == 0:
print('iteration: ', iteration)
print('training perplexity: ', np.exp(float(sum_perp) / count))
sum_perp = 0
count = 0
if train_iter.is_new_epoch:
print('Evaluating model on validation set...')
print('epoch: ', train_iter.epoch)
print('validation perplexity: ', evaluate(model, val_iter))
# Evaluate on test dataset
print('test')
test_perp = evaluate(model, test_iter)
print('test perplexity:', test_perp)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('rnnlm.model', model)
print('save the optimizer')
serializers.save_npz('rnnlm.state', optimizer)
if __name__ == '__main__':
main()
| 12,422
| 36.759878
| 79
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/mnist/train_mnist_custom_loop.py
|
"""MNIST example with static subgraph optimizations.
This is a version of the Chainer MNIST example that has been modified
to support the static subgraph optimizations feature. Note that
the code is mostly unchanged except for the addition of the
`@static_graph` decorator to the model chain's `__call__()` method.
This code is a custom loop version of train_mnist.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
from __future__ import print_function
import argparse
import warnings
import numpy
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
import chainerx
import train_mnist
def run_train_loop(
optimizer, train_iter, test_iter, test_count, epoch, device):
model = optimizer.target
train_count = 0
sum_accuracy = 0
sum_loss = 0
while train_iter.epoch < epoch:
batch = train_iter.next()
x_array, t_array = convert.concat_examples(batch, device)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array, requires_grad=False)
optimizer.update(model, x, t)
train_count += len(t)
sum_loss += float(model.loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
if train_iter.is_new_epoch:
print('epoch: ', train_iter.epoch)
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_accuracy / train_count))
# evaluation
train_count = 0
sum_accuracy = 0
sum_loss = 0
# It is good practice to turn off train mode during evaluation.
with configuration.using_config('train', False):
for batch in test_iter:
x_array, t_array = convert.concat_examples(
batch, device)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array, requires_grad=False)
loss = model(x, t)
sum_loss += float(loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
test_iter.reset()
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
sum_accuracy = 0
sum_loss = 0
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--model', '-m', default='MLP',
help='Choose the model: MLP or MLPSideEffect')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device.use()
# Set up a neural network to train
if args.model == 'MLP':
model = L.Classifier(train_mnist.MLP(args.unit, 10))
elif args.model == 'MLPSideEffect':
model = L.Classifier(train_mnist.MLPSideEffect(args.unit, 10))
model.to_device(device)
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
test_count = len(test)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
if device.xp is not chainerx:
run_train_loop(
optimizer, train_iter, test_iter, test_count, args.epoch, device)
else:
warnings.warn(
'Static subgraph optimization does not support ChainerX and will'
' be disabled.', UserWarning)
with chainer.using_config('use_static_graph', False):
run_train_loop(
optimizer, train_iter, test_iter, test_count, args.epoch,
device)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('mlp.model', model)
print('save the optimizer')
serializers.save_npz('mlp.state', optimizer)
if __name__ == '__main__':
main()
| 5,773
| 36.738562
| 77
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/mnist/train_mnist.py
|
"""MNIST example with static subgraph optimizations.
This is a version of the Chainer MNIST example that has been modified
to support the static subgraph optimizations feature. Note that
the code is mostly unchanged except for the addition of the
`@static_graph` decorator to the model chain's `__call__()` method.
Note for contributors:
This example code is referred to from the documentation.
If this file is to be modified, please also update the line numbers in
`docs/source/reference/static_graph.rst` accordingly.
"""
from __future__ import print_function
import argparse
import warnings
import numpy
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import static_code
from chainer import static_graph
from chainer import training
from chainer.training import extensions
import chainerx
import matplotlib
matplotlib.use('Agg')
# Network definition
class MLP(chainer.Chain):
"""A fully-connected neural network for digit classification.
"""
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
@static_graph
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
class MLPSideEffect(chainer.Chain):
"""An example of a model with side-effects.
This uses the same network as ``MLP`` except that it includes an
example of side-effect code.
"""
def __init__(self, n_units, n_out):
super(MLPSideEffect, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
self.side_effect_counter = 0
@static_code
def example_side_effect(self):
# Any code that needs to run each iteration inside the `__call__()`
# method should be wrapped inside of a method/function like this
# using the `@static_code` decorator.
self.side_effect_counter += 1
if self.side_effect_counter % 1000 == 0:
print('Side effect counter: ', self.side_effect_counter)
@static_graph
def __call__(self, x):
# Example code with side effects:
self.example_side_effect()
# Define-by-run code:
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--model', '-m', default='MLP',
help='Choose the model: MLP or MLPSideEffect')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device.use()
# Set up a neural network to train
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.model == 'MLP':
model = L.Classifier(MLP(args.unit, 10))
elif args.model == 'MLPSideEffect':
model = L.Classifier(MLPSideEffect(args.unit, 10))
model.to_device(device)
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist()
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
# TODO(hvy): Temporarily disabled for chainerx. Fix it.
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
# Take a snapshot for each specified epoch
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
if device.xp is not chainerx:
trainer.run()
else:
warnings.warn(
'Static subgraph optimization does not support ChainerX and will'
' be disabled.', UserWarning)
with chainer.using_config('use_static_graph', False):
trainer.run()
if __name__ == '__main__':
main()
| 7,909
| 36.13615
| 79
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/cifar/train_cifar_custom_loop.py
|
"""CIFAR example with static subgraph optimizations.
This is a version of the Chainer CIFAR example that has been modified
to support the static subgraph optimizations feature. Note that
the code is mostly unchanged except for the addition of the
`@static_graph` decorator to the model chain's `__call__()` method.
This code is a custom loop version of train_cifar.py. That is, we train
models without using the Trainer class in chainer and instead write a
training loop that manually computes the loss of minibatches and
applies an optimizer to update the model.
"""
import argparse
import warnings
import numpy
import chainer
from chainer import configuration
from chainer.dataset import convert
import chainer.links as L
from chainer import serializers
import chainerx
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import models.VGG
def run_train_loop(
optimizer, train_iter, test_iter, test_count, epoch,
device):
model = optimizer.target
train_count = 0
sum_accuracy = 0
sum_loss = 0
while train_iter.epoch < epoch:
batch = train_iter.next()
# Reduce learning rate by 0.5 every 25 epochs.
if train_iter.epoch % 25 == 0 and train_iter.is_new_epoch:
optimizer.lr *= 0.5
print('Reducing learning rate to: {}'.format(optimizer.lr))
x_array, t_array = convert.concat_examples(batch, device)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array, requires_grad=False)
optimizer.update(model, x, t)
train_count += len(t)
sum_loss += float(model.loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
if train_iter.is_new_epoch:
print('epoch: {}'.format(train_iter.epoch))
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_accuracy / train_count))
# evaluation
train_count = 0
sum_accuracy = 0
sum_loss = 0
model.predictor.train = False
# It is good practice to turn off train mode during evaluation.
with configuration.using_config('train', False):
for batch in test_iter:
x_array, t_array = convert.concat_examples(
batch, device)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array, requires_grad=False)
loss = model(x, t)
sum_loss += float(loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
test_iter.reset()
model.predictor.train = True
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
sum_accuracy = 0
sum_loss = 0
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--dataset', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='0',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device.use()
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.dataset == 'cifar10':
print('Using CIFAR10 dataset.')
class_labels = 10
train, test = get_cifar10()
elif args.dataset == 'cifar100':
print('Using CIFAR100 dataset.')
class_labels = 100
train, test = get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
if args.test:
train = train[:200]
test = test[:200]
test_count = len(test)
model = L.Classifier(models.VGG.VGG(class_labels))
model.to_device(device)
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(5e-4))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
if device.xp is not chainerx:
run_train_loop(
optimizer, train_iter, test_iter, test_count, args.epoch, device)
else:
warnings.warn(
'Static subgraph optimization does not support ChainerX and will'
' be disabled.', UserWarning)
with chainer.using_config('use_static_graph', False):
run_train_loop(
optimizer, train_iter, test_iter, test_count, args.epoch,
device)
# Save the model and the optimizer
print('save the model')
serializers.save_npz('mlp.model', model)
print('save the optimizer')
serializers.save_npz('mlp.state', optimizer)
if __name__ == '__main__':
main()
| 6,633
| 37.126437
| 77
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/cifar/train_cifar.py
|
"""CIFAR example with static subgraph optimizations.
This is a version of the Chainer CIFAR example that has been modified
to support the static subgraph optimizations feature. Note that
the code is mostly unchanged except for the addition of the
`@static_graph` decorator to the model chain's `__call__()` method.
"""
import argparse
import warnings
import numpy
import chainer
import chainer.links as L
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
import chainerx
from chainer.datasets import get_cifar10
from chainer.datasets import get_cifar100
import models.VGG
def main():
parser = argparse.ArgumentParser(description='Chainer CIFAR example:')
parser.add_argument('--dataset', default='cifar10',
help='The dataset to use: cifar10 or cifar100')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.05,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=300,
help='Number of sweeps over the dataset to train')
parser.add_argument('--device', '-d', type=str, default='0',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--early-stopping', type=str,
help='Metric to watch for early stopping')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
if chainer.get_dtype() == numpy.float16:
warnings.warn(
'This example may cause NaN in FP16 mode.', RuntimeWarning)
device = chainer.get_device(args.device)
print('Device: {}'.format(device))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
device.use()
# Set up a neural network to train.
# Classifier reports softmax cross entropy loss and accuracy at every
# iteration, which will be used by the PrintReport extension below.
if args.dataset == 'cifar10':
print('Using CIFAR10 dataset.')
class_labels = 10
train, test = get_cifar10()
elif args.dataset == 'cifar100':
print('Using CIFAR100 dataset.')
class_labels = 100
train, test = get_cifar100()
else:
raise RuntimeError('Invalid dataset choice.')
model = L.Classifier(models.VGG.VGG(class_labels))
model.to_device(device)
optimizer = chainer.optimizers.MomentumSGD(args.learnrate)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(5e-4))
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
stop_trigger = (args.epoch, 'epoch')
# Early stopping option
if args.early_stopping:
stop_trigger = triggers.EarlyStoppingTrigger(
monitor=args.early_stopping, verbose=True,
max_trigger=(args.epoch, 'epoch'))
# Set up a trainer
updater = training.updaters.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(updater, stop_trigger, out=args.out)
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=device))
# Reduce the learning rate by half every 25 epochs.
trainer.extend(extensions.ExponentialShift('lr', 0.5),
trigger=(25, 'epoch'))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
# TODO(hvy): Support ChainerX
if device.xp is not chainerx:
trainer.extend(extensions.DumpGraph('main/loss'))
# Take a snapshot at each epoch
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Print selected entries of the log to stdout
# Here "main" refers to the target link of the "main" optimizer again, and
# "validation" refers to the default name of the Evaluator extension.
# Entries other than 'epoch' are reported by the Classifier link, called by
# either the updater or the evaluator.
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Print a progress bar to stdout
trainer.extend(extensions.ProgressBar())
if args.resume:
# Resume from a snapshot
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
if device.xp is not chainerx:
trainer.run()
else:
warnings.warn(
'Static subgraph optimization does not support ChainerX and will'
' be disabled.', UserWarning)
with chainer.using_config('use_static_graph', False):
trainer.run()
if __name__ == '__main__':
main()
| 5,864
| 38.1
| 79
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/cifar/models/VGG.py
|
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import static_graph
class Block(chainer.Chain):
"""A convolution, batch norm, ReLU block.
A block in a feedforward network that performs a
convolution followed by batch normalization followed
by a ReLU activation.
For the convolution operation, a square filter size is used.
Args:
out_channels (int): The number of output channels.
ksize (int): The size of the filter is ksize x ksize.
pad (int): The padding to use for the convolution.
"""
def __init__(self, out_channels, ksize, pad=1):
super(Block, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(None, out_channels, ksize, pad=pad,
nobias=True)
self.bn = L.BatchNormalization(out_channels)
def __call__(self, x):
h = self.conv(x)
h = self.bn(h)
return F.relu(h)
class VGG(chainer.Chain):
"""A VGG-style network for very small images.
This model is based on the VGG-style model from
http://torch.ch/blog/2015/07/30/cifar.html
which is based on the network architecture from the paper:
https://arxiv.org/pdf/1409.1556v6.pdf
This model is intended to be used with either RGB or greyscale input
images that are of size 32x32 pixels, such as those in the CIFAR10
and CIFAR100 datasets.
On CIFAR10, it achieves approximately 89% accuracy on the test set with
no data augmentation.
On CIFAR100, it achieves approximately 63% accuracy on the test set with
no data augmentation.
Args:
class_labels (int): The number of class labels.
"""
def __init__(self, class_labels=10):
super(VGG, self).__init__()
with self.init_scope():
self.block1_1 = Block(64, 3)
self.block1_2 = Block(64, 3)
self.block2_1 = Block(128, 3)
self.block2_2 = Block(128, 3)
self.block3_1 = Block(256, 3)
self.block3_2 = Block(256, 3)
self.block3_3 = Block(256, 3)
self.block4_1 = Block(512, 3)
self.block4_2 = Block(512, 3)
self.block4_3 = Block(512, 3)
self.block5_1 = Block(512, 3)
self.block5_2 = Block(512, 3)
self.block5_3 = Block(512, 3)
self.fc1 = L.Linear(None, 512, nobias=True)
self.bn_fc1 = L.BatchNormalization(512)
self.fc2 = L.Linear(None, class_labels, nobias=True)
@static_graph
def __call__(self, x):
# 64 channel blocks:
h = self.block1_1(x)
h = F.dropout(h, ratio=0.3)
h = self.block1_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 128 channel blocks:
h = self.block2_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block2_2(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 256 channel blocks:
h = self.block3_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block3_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block4_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block4_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
# 512 channel blocks:
h = self.block5_1(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_2(h)
h = F.dropout(h, ratio=0.4)
h = self.block5_3(h)
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.dropout(h, ratio=0.5)
h = self.fc1(h)
h = self.bn_fc1(h)
h = F.relu(h)
h = F.dropout(h, ratio=0.5)
return self.fc2(h)
| 3,852
| 29.824
| 76
|
py
|
chainer
|
chainer-master/examples/static_graph_optimizations/cifar/models/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/examples/glance/glance.py
|
# Note for contributors:
# This example code is referred to from "Chainer at a Glance" tutorial.
# If this file is to be modified, please also update the line numbers in
# `docs/source/glance.rst` accordingly.
import chainer as ch
from chainer import datasets
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import numpy as np
import matplotlib
matplotlib.use('Agg')
mushroomsfile = 'mushrooms.csv'
data_array = np.genfromtxt(
mushroomsfile, delimiter=',', dtype=str, skip_header=1)
for col in range(data_array.shape[1]):
data_array[:, col] = np.unique(data_array[:, col], return_inverse=True)[1]
X = data_array[:, 1:].astype(np.float32)
Y = data_array[:, 0].astype(np.int32)[:, None]
train, test = datasets.split_dataset_random(
datasets.TupleDataset(X, Y), int(data_array.shape[0] * .7))
train_iter = ch.iterators.SerialIterator(train, 100)
test_iter = ch.iterators.SerialIterator(
test, 100, repeat=False, shuffle=False)
# Network definition
def MLP(n_units, n_out):
layer = ch.Sequential(L.Linear(n_units), F.relu)
model = layer.repeat(2)
model.append(L.Linear(n_out))
return model
model = L.Classifier(
MLP(44, 1), lossfun=F.sigmoid_cross_entropy, accfun=F.binary_accuracy)
# Setup an optimizer
optimizer = ch.optimizers.SGD().setup(model)
# Create the updater, using the optimizer
updater = training.StandardUpdater(train_iter, optimizer, device=-1)
# Set up a trainer
trainer = training.Trainer(updater, (50, 'epoch'), out='result')
# Evaluate the model with the test dataset for each epoch
trainer.extend(extensions.Evaluator(test_iter, model, device=-1))
# Dump a computational graph from 'loss' variable at the first iteration
# The "main" refers to the target link of the "main" optimizer.
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(20, 'epoch'))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport())
# Save two plot images to the result dir
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.extend(
extensions.PlotReport(
['main/accuracy', 'validation/main/accuracy'],
'epoch', file_name='accuracy.png'))
# Print selected entries of the log to stdout
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
# Run the training
trainer.run()
x, t = test[np.random.randint(len(test))]
predict = model.predictor(x[None]).array
predict = predict[0][0]
if predict >= 0:
print('Predicted Poisonous, Actual ' + ['Edible', 'Poisonous'][t[0]])
else:
print('Predicted Edible, Actual ' + ['Edible', 'Poisonous'][t[0]])
| 2,876
| 30.271739
| 78
|
py
|
chainer
|
chainer-master/examples/serialization/model.py
|
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
class MLP(chainer.Chain):
def __init__(self, n_in=784, n_units=100, n_out=10):
super(MLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(n_in, n_units) # n_in -> n_units
self.l2 = L.Linear(n_units, n_units) # n_units -> n_units
self.l3 = L.Linear(n_units, n_out) # n_units -> n_out
self.add_persistent('persistent', np.random.rand(10, 10))
def forward(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
| 701
| 29.521739
| 70
|
py
|
chainer
|
chainer-master/examples/serialization/save.py
|
import chainer
import h5py
import numpy as np
import model
# Create a model object first
model = model.MLP()
def save_parameters_as_npz(model, filename='model.npz'):
# Save the model parameters into a NPZ file
chainer.serializers.save_npz(filename, model)
print('{} saved!\n'.format(filename))
# Load the saved npz from NumPy and show the parameter shapes
print('--- The list of saved params in model.npz ---')
saved_params = np.load('model.npz')
for param_key, param in saved_params.items():
print(param_key, '\t:', param.shape)
print('---------------------------------------------\n')
def save_parameters_as_hdf5(model, filename='model.h5'):
# Save the model parameters into a HDF5 archive
chainer.serializers.save_hdf5(filename, model)
print('model.h5 saved!\n')
# Load the saved HDF5 using h5py
print('--- The list of saved params in model.h5 ---')
f = h5py.File('model.h5', 'r')
for param_key, param in f.items():
msg = '{}:'.format(param_key)
if isinstance(param, h5py.Dataset):
msg += ' {}'.format(param.shape)
print(msg)
if isinstance(param, h5py.Group):
for child_key, child in param.items():
print(' {}:{}'.format(child_key, child.shape))
print('---------------------------------------------\n')
save_parameters_as_npz(model)
save_parameters_as_hdf5(model)
| 1,422
| 30.622222
| 65
|
py
|
chainer
|
chainer-master/examples/serialization/load.py
|
import chainer
import numpy as np
import model
def load_npz_file_to_model(npz_filename='model.npz'):
# Create model object first
model1 = model.MLP()
# Load the saved parameters into the model object
chainer.serializers.load_npz(npz_filename, model1)
print('{} loaded!'.format(npz_filename))
return model1
def load_hdf5_file_to_model(hdf5_filename='model.h5'):
# Create another model object first
model2 = model.MLP()
# Load the saved parameters into the model object
chainer.serializers.load_hdf5(hdf5_filename, model2)
print('{} loaded!'.format(hdf5_filename))
return model2
model1 = load_npz_file_to_model()
model2 = load_hdf5_file_to_model()
# Check that the loaded parameters are same
model2_params = {name: param for name, param in model2.namedparams()}
for name, npz_param in model1.namedparams():
h5_param = model2_params[name]
np.testing.assert_array_equal(npz_param.array, h5_param.array)
print(name, npz_param.shape)
| 1,000
| 25.342105
| 69
|
py
|
chainer
|
chainer-master/chainermn/global_except_hook.py
|
import os
import sys
import warnings
_orig_except_hook = None
def _global_except_hook(exctype, value, traceback):
"""Catches an unhandled exception and call MPI_Abort()."""
try:
if _orig_except_hook:
_orig_except_hook(exctype, value, traceback)
else:
sys.__excepthook__(exctype, value, traceback)
finally:
import mpi4py.MPI
rank = mpi4py.MPI.COMM_WORLD.Get_rank()
sys.stderr.write('\n')
sys.stderr.write('******************************************\n')
sys.stderr.write('ChainerMN:\n')
sys.stderr.write(' Uncaught exception on rank {}.\n'.format(rank))
sys.stderr.write(' Calling MPI_Abort() to shut down MPI...\n')
sys.stderr.write('******************************************\n')
sys.stderr.write('\n\n')
sys.stderr.flush()
try:
import mpi4py.MPI
mpi4py.MPI.COMM_WORLD.Abort(1)
except Exception as e:
# Something is completely broken...
# There's nothing we can do any more
sys.stderr.write(
'Sorry, failed to stop MPI and the process may hang.\n')
sys.stderr.flush()
raise e
def _add_hook_if_enabled():
# An MPI runtime is expected to kill all of its child processes
# if one of them exits abnormally or without calling `MPI_Finalize()`.
# However, when a Python program run on `mpi4py`, the MPI runtime
# often fails to detect a process failure, and the rest of the processes
# hang infinitely.
# It is problematic especially when you run ChainerMN programs on a cloud
# environment, on which you are charged on time basis.
# See https://github.com/chainer/chainermn/issues/236 for more discussion.
#
# To activate this handler, set CHAINERMN_FORCE_ABORT_ON_EXCEPTION
# to a non-empty value.
# Note that you need to pass an argument to mpiexec (-x for Open MPI)
# to activate the handler in all processes.
var = os.environ.get('CHAINERMN_FORCE_ABORT_ON_EXCEPTION')
if var is not None and len(var) > 0:
add_hook()
def add_hook():
"""Add a global hook function that captures all unhandled exceptions.
The function calls MPI_Abort() to force all processes abort.
It is useful when you run your training script on a cloud platform.
"""
global _orig_except_hook
if _orig_except_hook is not None:
warnings.warn('chainermn.global_except_hook.add_hook() '
'seems to be called multiple times. Ignoring.',
stacklevel=2)
return
_orig_except_hook = sys.excepthook
sys.excepthook = _global_except_hook
| 2,716
| 34.75
| 78
|
py
|
chainer
|
chainer-master/chainermn/nccl.py
|
try:
from cupy.cuda.nccl import get_build_version # NOQA
from cupy.cuda.nccl import get_unique_id # NOQA
from cupy.cuda.nccl import get_version # NOQA
from cupy.cuda.nccl import NCCL_FLOAT # NOQA
from cupy.cuda.nccl import NCCL_FLOAT16 # NOQA
from cupy.cuda.nccl import NCCL_FLOAT32 # NOQA
from cupy.cuda.nccl import NCCL_FLOAT64 # NOQA
from cupy.cuda.nccl import NCCL_SUM # NOQA
from cupy.cuda.nccl import NcclCommunicator # NOQA
from cupy.cuda.nccl import NcclError # NOQA
_available = True
except Exception:
_available = False
| 588
| 38.266667
| 56
|
py
|
chainer
|
chainer-master/chainermn/optimizers.py
|
import chainer
import copy
class _MultiNodeOptimizer(object):
def __init__(self, actual_optimizer, communicator, zero_fill):
super(_MultiNodeOptimizer, self).__setattr__(
'communicator', communicator)
super(_MultiNodeOptimizer, self).__setattr__(
'actual_optimizer', actual_optimizer)
super(_MultiNodeOptimizer, self).__setattr__(
'target_params', [])
super(_MultiNodeOptimizer, self).__setattr__(
'zero_fill', zero_fill)
def update(self, lossfun=None, *args, **kwds):
target = self.target
if lossfun is not None:
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss = lossfun(*args, **kwds)
if use_cleargrads:
target.cleargrads()
else:
target.zerograds()
loss.backward(loss_scale=self.actual_optimizer._loss_scale)
del loss
if self.is_changed(target):
self.communicator.bcast_data(target)
else:
self.communicator.multi_node_mean_grad(target, self.zero_fill)
self.actual_optimizer.update(None, *args, **kwds)
def is_changed(self, target):
previous_params = self.target_params
super(_MultiNodeOptimizer, self).__setattr__(
'target_params', [(name, param.data is not None)
for name, param in sorted(target.namedparams())])
if len(previous_params) != len(self.target_params):
return True
for param1, param2 in zip(self.target_params, previous_params):
if (param1[0] != param2[0]) or param1[1] != param2[1]:
return True
return False
def setup(self, link):
self.actual_optimizer.setup(link)
return self
def __getattr__(self, attr_name):
return getattr(self.actual_optimizer, attr_name)
def __setattr__(self, attr_name, value):
setattr(self.actual_optimizer, attr_name, value)
class _DoubleBufferingOptimizer(object):
def __init__(self, actual_optimizer, communicator, zero_fill):
super(_DoubleBufferingOptimizer, self).__setattr__(
'communicator', communicator)
super(_DoubleBufferingOptimizer, self).__setattr__(
'actual_optimizer', actual_optimizer)
super(_DoubleBufferingOptimizer, self).__setattr__(
'needs_update', False)
super(_DoubleBufferingOptimizer, self).__setattr__(
'communicated_target', None)
super(_DoubleBufferingOptimizer, self).__setattr__(
'target_params_list', [[], []])
super(_DoubleBufferingOptimizer, self).__setattr__(
'allreduce_grad_stream', chainer.cuda.Stream(non_blocking=True))
super(_DoubleBufferingOptimizer, self).__setattr__(
'zero_fill', zero_fill)
def update(self, lossfun=None, *args, **kwds):
target = self.target
if lossfun is not None:
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss = lossfun(*args, **kwds)
if use_cleargrads:
target.cleargrads()
else:
target.zerograds()
loss.backward(loss_scale=self.actual_optimizer._loss_scale)
del loss
if self.is_changed(target, self.target_params_list[0]):
self.wait()
self.communicator.bcast_data(target)
super(_DoubleBufferingOptimizer, self).__setattr__(
'communicated_target', copy.deepcopy(target))
super(_DoubleBufferingOptimizer, self).__setattr__(
'target_params_list', [
list(sorted(self.target.namedparams())),
list(sorted(self.communicated_target.namedparams()))])
super(_DoubleBufferingOptimizer, self).__setattr__(
'needs_update', False)
else:
self.wait()
self.swap_grad(self.target_params_list[0],
self.target_params_list[1])
self.multi_node_mean_grad_async()
if self.needs_update:
self.actual_optimizer.update(None, *args, **kwds)
else:
super(_DoubleBufferingOptimizer, self).__setattr__(
'needs_update', True)
def multi_node_mean_grad_async(self):
self.communicator._multi_node_mean_grad_async(
self.communicated_target, self.zero_fill,
self.allreduce_grad_stream)
def is_changed(self, target, previous_params):
target_params = list(sorted(target.namedparams()))
if len(previous_params) != len(target_params):
return True
for param1, param2 in zip(target_params, previous_params):
name1, var1 = param1
name2, var2 = param2
if (name1 != name2) or (var1.data is None) != (var2.data is None):
return True
return False
def swap_grad(self, target1_params, target2_params):
for param1, param2 in zip(target1_params, target2_params):
_, var1 = param1
_, var2 = param2
var1.grad, var2.grad = var2.grad, var1.grad
def wait(self):
self.allreduce_grad_stream.synchronize()
chainer.cuda.Stream.null.synchronize()
def setup(self, link):
self.actual_optimizer.setup(link)
return self
def __getattr__(self, attr_name):
return getattr(self.actual_optimizer, attr_name)
def __setattr__(self, attr_name, value):
setattr(self.actual_optimizer, attr_name, value)
def create_multi_node_optimizer(actual_optimizer, communicator,
double_buffering=False, zero_fill=True):
"""Create a multi node optimizer from a Chainer optimizer.
Args:
actual_optimizer: Chainer optimizer
(e.g., ``chainer.optimizers.Adam``).
communicator: ChainerMN communicator.
double_buffering: If ``True``, all-reduce and other
processing (such as forward and backward) are
overlapped using double buffering.
There are cases where accuracy is affected because
the gradients of the previous iteration are used
for update. This flag is supported by
``PureNcclCommunicator`` only.
zero_fill: A knob to control whether to fill gradients of initialized
and unused Link (which is None internally) with zero-valued array,
because the all gradients must be an array among processes for
performing all-reduce, which might be an array or None after
backward computation. Gradients of uninitialized Link are skipped.
If it is False, gradients of unused Link are just skipped.
Returns:
The multi node optimizer based on ``actual_optimizer``.
"""
if double_buffering:
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
if not isinstance(communicator, PureNcclCommunicator):
raise ValueError(
'This communicator does not support double buffering.')
return _DoubleBufferingOptimizer(actual_optimizer, communicator,
zero_fill)
return _MultiNodeOptimizer(actual_optimizer, communicator,
zero_fill)
| 7,436
| 39.639344
| 79
|
py
|
chainer
|
chainer-master/chainermn/__init__.py
|
import chainer
from chainermn import communicators # NOQA
from chainermn import datasets # NOQA
from chainermn import extensions # NOQA
from chainermn import functions # NOQA
from chainermn import global_except_hook # NOQA
from chainermn import iterators # NOQA
from chainermn import links # NOQA
from chainermn import optimizers # NOQA
from chainermn.communicators import CommunicatorBase # NOQA
from chainermn.communicators import create_communicator # NOQA
from chainermn.datasets import DataSizeError # NOQA
from chainermn.datasets import scatter_index # NOQA
from chainermn.datasets import scatter_dataset # NOQA
from chainermn.extensions import create_multi_node_checkpointer # NOQA
from chainermn.extensions import create_multi_node_evaluator # NOQA
from chainermn.links import MultiNodeChainList # NOQA
from chainermn.optimizers import create_multi_node_optimizer # NOQA
global_except_hook._add_hook_if_enabled()
__version__ = chainer.__version__
| 976
| 38.08
| 71
|
py
|
chainer
|
chainer-master/chainermn/functions/pseudo_connect.py
|
import chainer
from chainer import backend
import chainer.utils
class PseudoConnect(chainer.FunctionNode):
"""Connect a variable to a delegating variable."""
def forward(self, inputs):
self.retain_inputs((0,))
# delegate_variable = inputs[0]
actual_variables = inputs[1:]
return actual_variables
def backward(self, target_input_indexes, grad_outputs):
delegate_variable, = self.get_retained_inputs()
# actual_variables = inputs[1:]
xp = backend.get_array_module(delegate_variable)
# delegate_variable do not need backward gradients, instead sending
# back dummy grads in order to take consistency of shapes of grads.
grad_delegate_variable = xp.zeros_like(delegate_variable.array)
# grad_outputs corresponds to grads of actual_variables.
return (chainer.Variable(grad_delegate_variable),) + grad_outputs
def pseudo_connect(delegate_variable, *actual_variables):
"""Connect independent connected graph component.
This function is implemented to return received arguments directly,
except the first ``delegate_variable``.
In backward computation, it returns received gradients directly,
adding a zero grad corresponding to ``delegate_variable``.
The detail of ``delegate_variable`` is described in the following notes.
.. note::
In model-parallel framework, models on each process might have many
non-connected components. Here we call a given graph non-connected
when multiple inter-process communications are needed for its
computation. For example, consider the following example::
class ConnectedGraph(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(ConnectedGraph, self).__init__(comm)
self.add_link(ConnectedGraphSub(), rank_in=3, rank_out=1)
This model receives inputs from rank=3 process and sends its outputs
to rank=1 process. The entire graph can be seen as one connected
component ``ConnectedGraphSub``. Please refer the documentation of
``MultiNodeChainList`` for detail.
On the other hand, see the next example::
class NonConnectedGraph(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(NonConnectedGraph, self).__init__(comm)
self.add_link(NonConnectedGraphSubA(), \
rank_in=3, rank_out=1)
self.add_link(NonConnectedGraphSubB(), \
rank_in=1, rank_out=2)
This model consists of two components: at first,
``NonConnectedGraphSubA`` receives inputs from rank=3 process and
sends its outputs to rank=1 process, and then
``NonConnectedGraphSubB`` receives inputs from rank=1 process and
sends its outputs to rank=2 process. Here multiple inter-process
communications are invoked between ``NonConnectedGraphSubA`` and
``NonConnectedGraphSubB``, so it is regarded as non-connected.
Such kind of non-connected models can be problematic in backward
computation. Chainer traces back the computational graph from the
output variable, however naive implementation of
``chainermn.functions.recv`` does not take any inputs rather receives
inputs by ``MPI_Recv``, where backward path vanishes.
To prevent this, dummy variables what we call ``delegate_variable``
are used. In principle, ``chainermn.functions.send`` does not return
any outputs because it sends data to the other process by ``MPI_Send``.
However, ``chainermn.functions.send`` returns a dummy / empty variable
in our implementation, which is called ``delegate_variable``. This
variable does not hold any data, just used for retaining backward
computation path. We can guarantee the backward computation just by
putting ``delegate_variable`` to the next ``chainermn.functions.recv``
(``chainermn.functions.recv`` has an optional argument to receive
``delegate_variable``).
.. note::
In some cases the intermediate graph component returns model outputs.
See the next example::
class NonConnectedGraph2(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(NonConnectedGraph2, self).__init__(comm)
self.add_link(NonConnectedGraphSubA(), \
rank_in=1, rank_out=None)
self.add_link(NonConnectedGraphSubB(), \
rank_in=None, rank_out=1)
This model first receives inputs from rank=1 process and make model
outputs (specified by ``rank_out=None``) in ``NonConnectedGraphSubA``.
Then using model inputs (specified by ``rank_in=None``),
``NonConnectedGraphSubB`` sends its outputs to rank=1 process. Since
``MultiNodeChainList.__call__`` returns outputs of the last component
(in this case, outputs of ``NonConnectedGraphSubB``), naive
implementation cannot output the returned value of
``NonConnectedGraphSubA`` as the model outputs. In this case,
``pseudo_connect`` should be used.
``pseudo_connect`` takes two arguments. The first one
``delegate_variable`` is what we explained in above note. In this
case, returned value of ``NonConnectedGraphSubB`` corresponds to
``delegate_variable``. The second one ``actual_variables`` is
"what we want ``delegate_variable`` to imitate". In
``NonConnectedGraph2``, we obtain returned value of
``NonConnectedGraphSubB`` as the model outputs, but what we actually
want is returned value of ``NonConnectedGraphSubA``. At the same time
we want to trace back this resulted variable in backward computation.
Using ``pseudo_connect``, we can make a variable whose data is the
same as the returned value of ``NonConnectedGraphSubA``, and which
traces back ``NonConnectedGraphSubB`` first.
``pseudo_connect`` should also be used in some pathological cases,
for example, where multiple ``chainermn.functions.send`` occurs
sequentially.
Args:
delegate_variable (chainer.Variable):
Pointer to the previous non-connected graph component.
actual_variables (tuple of chainer.Variable):
Actual values which ``delegate_variable`` imitate.
Returns:
tuple of chainer.Variable:
A variable with the given values combined with delegating variable.
"""
chainer.utils.experimental('chainermn.functions.pseudo_connect')
if delegate_variable is None:
xp = backend.get_array_module(*actual_variables)
delegate_variable = xp.empty((0,), xp.float32)
return PseudoConnect().apply(
(delegate_variable,) + actual_variables)
| 6,895
| 46.232877
| 79
|
py
|
chainer
|
chainer-master/chainermn/functions/collective_communication.py
|
import chainer
from chainer import backend
import numpy
class AllGather(chainer.Function):
"""Collective all-gather communication."""
def __init__(self, comm):
chainer.utils.experimental('chainermn.functions.AllGather')
self.comm = comm
def forward(self, inputs):
x, = inputs
x_dtype = x.dtype
# convert to float32 for communication
if numpy.float16 == x_dtype:
x = x.astype(numpy.float32)
ret = self.comm.allgather(x)
# convert back
if numpy.float16 == x_dtype:
ret = tuple([item.astype(x_dtype) for item in ret])
return ret
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
grad_dtype = grad_outputs[0].dtype
# convert to float32 for communication
if numpy.float16 == grad_dtype:
grad_outputs = tuple([item.astype(numpy.float32)
for item in grad_outputs])
gxs = self.comm.alltoall(grad_outputs)
gx = xp.stack(gxs).sum(axis=0)
# convert back
if numpy.float16 == grad_dtype:
gx = gx.astype(grad_dtype)
return gx,
class AllToAll(chainer.Function):
"""Collective all-to-all communication."""
def __init__(self, comm):
chainer.utils.experimental('chainermn.functions.AllToAll')
self.comm = comm
def forward(self, inputs):
if len(inputs) != self.comm.size:
raise ValueError(
'The length of inputs must be same as communicator size.')
xs_dtype = inputs[0].dtype
# convert to float32 for communication
if numpy.float16 == xs_dtype:
xs = tuple([x.astype(numpy.float32) for x in inputs])
else:
xs = tuple([x for x in inputs])
ret = self.comm.alltoall(xs)
# convert back
if numpy.float16 == xs_dtype:
ret = tuple([item.astype(xs_dtype) for item in ret])
return ret
def backward(self, inputs, grad_outputs):
assert self.comm.size == len(grad_outputs)
xs_dtype = inputs[0].dtype
# convert to float32 for communication
if numpy.float16 == xs_dtype:
gys = tuple([gy.astype(numpy.float32) for gy in grad_outputs])
else:
gys = tuple([gy for gy in grad_outputs])
ret = self.comm.alltoall(gys)
# convert back
if numpy.float16 == xs_dtype:
ret = tuple([item.astype(xs_dtype) for item in ret])
return ret
class Bcast(chainer.Function):
"""Collective broadcast communication."""
def __init__(self, comm, root):
chainer.utils.experimental('chainermn.functions.Bcast')
self.comm = comm
self.root = root
def __call__(self, *inputs):
xp = backend.get_array_module(*inputs)
if inputs == ():
# Without dummy variable, this function does not "require_grad",
# thus back propagation will not be invoked.
dummy_var = chainer.Variable(
xp.array([], dtype=chainer.config.dtype))
dummy_var.name = 'dummy_var'
return super(Bcast, self).__call__(dummy_var)
else:
return super(Bcast, self).__call__(*inputs)
def forward(self, inputs):
x_dtype = inputs[0].dtype
if self.comm.rank == self.root:
x, = inputs
# convert to float32 for communication
if numpy.float16 == x_dtype:
x = x.astype(numpy.float32)
else:
x = None
x = self.comm.bcast(x, self.root),
# convert back
if numpy.float16 == x_dtype:
x = tuple([item.astype(x_dtype) for item in x])
return x
def backward(self, inputs, grad_outputs):
gx, = grad_outputs
gx_dtype = gx.dtype
# convert to float32 for communication
if numpy.float16 == gx_dtype:
gx = gx.astype(numpy.float32)
gxs = self.comm.gather(gx, self.root)
if self.comm.rank == self.root:
xp = backend.get_array_module(*gxs)
gxs = xp.stack(gxs)
_sum = gxs.sum(axis=0),
# convert back
if numpy.float16 == gx_dtype:
_sum = tuple([item.astype(gx_dtype) for item in _sum])
return _sum
else:
return None,
class Gather(chainer.Function):
"""Collective gather communication."""
def __init__(self, comm, root):
chainer.utils.experimental('chainermn.functions.Gather')
self.comm = comm
self.root = root
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
x, = inputs
# convert to float32 for communication
x_dtype = x.dtype
if numpy.float16 == x_dtype:
x = x.astype(numpy.float32)
ys = self.comm.gather(x, self.root)
if self.comm.rank == self.root:
# convert back
if numpy.float16 == x_dtype:
ys = tuple([item.astype(x_dtype) for item in ys])
return ys
else:
# Return an empty variable, which serves as "delegate_variable."
return xp.array([], dtype=x_dtype),
def backward(self, inputs, grad_outputs):
# convert to float32 for communication
input_dtype = inputs[0].dtype
if self.comm.rank == self.root and numpy.float16 == input_dtype:
grad_outputs = tuple([item.astype(numpy.float32)
for item in grad_outputs])
ret = self.comm.scatter(grad_outputs, self.root),
# convert back
if numpy.float16 == input_dtype:
ret = tuple([item.astype(input_dtype) for item in ret])
return ret
class Scatter(chainer.Function):
"""Collective scatter communication."""
def __init__(self, comm, root):
chainer.utils.experimental('chainermn.functions.Scatter')
self.comm = comm
self.root = root
def __call__(self, *inputs):
xp = backend.get_array_module(*inputs)
if inputs == ():
# Without dummy variable, this function does not "require_grad",
# thus back propagation will not be invoked.
dummy_var = chainer.Variable(
xp.array([], dtype=chainer.config.dtype))
dummy_var.name = 'dummy_var'
return super(Scatter, self).__call__(dummy_var)
else:
return super(Scatter, self).__call__(*inputs)
def forward(self, inputs):
input_dtype = inputs[0].dtype
if self.comm.rank == self.root:
# convert to float32 for communication
if numpy.float16 == input_dtype:
inputs = tuple([item.astype(numpy.float32) for item in inputs])
y = self.comm.scatter(inputs, self.root)
else:
y = self.comm.scatter(None, self.root)
# convert back
if numpy.float16 == input_dtype:
y = y.astype(input_dtype)
return y,
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
gy, = grad_outputs
gy_dtype = gy.dtype
# convert to float32 for communication
if numpy.float16 == gy_dtype:
gy = gy.astype(numpy.float32)
gxs = self.comm.gather(gy, self.root)
if self.comm.rank == self.root:
# convert back
if numpy.float16 == gy_dtype:
gxs = tuple([item.astype(gy_dtype) for item in gxs])
return gxs
else:
# Slave processes need to maintain input/output shapes.
if inputs == ():
dummy_var = tuple([xp.array([], dtype=xp.float32)])
else:
dummy_var = tuple([xp.zeros_like(x) for x in inputs])
return dummy_var
def allgather(comm, x):
"""Differentiable all-gather communication between workers.
This function invokes gather communications among processes specified
by the communicator. Backward will be invoked as well as the ordinary
chainer functions, where gradients are reduced to each process.
The received array will be on the current CUDA device on the invoking
process if ``x`` is on GPU. Please be aware that the current CUDA device
is intended one.
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
Args:
comm: ChainerMN communicator.
x (chainer.Variables): Variables to send.
Returns:
ys (list of chainer.Variables): Received variables.
"""
chainer.utils.experimental('chainermn.functions.all_gather')
return AllGather(comm)(x)
def alltoall(comm, xs):
"""Differentiable all-to-all communication between workers.
This function invokes all-to-all communications among processes specified
by the communicator. Backward will be invoked as well as the ordinary
chainer functions, just passing input gradients back.
Unlike point-to-point communication such as ``chainermn.functions.send``
and ``chainermn.functions.recv``, users need not to care about
delegate variables, since ``backward()`` will not be invoked until
all gradients from output direction arrive.
Please refer to ``chainermn.functions.pseudo_connect`` about the detail
of delegate variables.
The received array will be on the current CUDA device on the invoking
process if ``xs`` is on GPU. Please be aware that the current CUDA device
is intended one.
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
Args:
comm: ChainerMN communicator.
xs (list of chainer.Variables): Variables to send.
Returns:
ys (list of chainer.Variables): Received variables.
"""
chainer.utils.experimental('chainermn.functions.all_to_all')
if len(xs) != comm.size:
raise ValueError('The length of xs must be same as communicator size.')
return AllToAll(comm)(*xs)
def bcast(comm, x, root=0):
"""Differentiable broadcast communication between workers.
This function invokes broadcast communications among processes specified
by the communicator. Backward will be invoked as well as the ordinary
chainer functions, where gradients are gathered to the root process
and summed up.
The received array will be on the current CUDA device if ``x`` on the
invoking process is on GPU. Please be aware that the current CUDA device
is intended one.
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
Args:
comm: ChainerMN communicator.
x (chainer.Variable): Variable to be sent.
Returns:
y (chainer.Variable): Broadcasted variable.
"""
chainer.utils.experimental('chainermn.functions.bcast')
if comm.rank == root:
return Bcast(comm, root)(x)
else:
return Bcast(comm, root)()
def gather(comm, x, root=0):
"""Differentiable gather communication between workers.
This function invokes gather communications among processes specified
by the communicator. Backward will be invoked as well as the ordinary
chainer functions, where gradients are scattered from the root process
to each slave.
The received array will be on the current CUDA device if ``x`` on the
root process is on GPU. Please be aware that the current CUDA device
is intended one.
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
Args:
comm: ChainerMN communicator.
x (chainer.Variable): Variable to be sent.
Returns:
ys (chainer.Variable):
Gathered variables. ``None`` for slaves.
"""
chainer.utils.experimental('chainermn.functions.gather')
return Gather(comm, root)(x)
def scatter(comm, xs, root=0):
"""Differentiable scatter communication between workers.
This function invokes scatter communications among processes specified
by the communicator. Backward will be invoked as well as the ordinary
chainer functions, where gradients are gathered to the root process.
The received array will be on the current CUDA device if ``xs`` on the
root process is on GPU. Please be aware that the current CUDA device
is intended one.
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
Args:
comm: ChainerMN communicator.
xs (list of chainer.Variable):
Variables to be scattered for master process.
``None`` for slave process.
Returns:
y (chainer.Variable): Scattered variable.
"""
chainer.utils.experimental('chainermn.functions.scatter')
if comm.rank == root:
return Scatter(comm, root)(*xs)
else:
return Scatter(comm, root)()
| 12,921
| 31.224439
| 84
|
py
|
chainer
|
chainer-master/chainermn/functions/point_to_point_communication.py
|
import chainer
from chainer import backend
import chainer.utils
class Send(chainer.Function):
"""Send elements to target process."""
def __init__(self, comm, peer_rank, peer_tag):
chainer.utils.experimental('chainermn.functions.Send')
self.comm = comm
self.peer_rank = peer_rank
self.peer_tag = peer_tag
@property
def label(self):
return '{} (peer_rank: {})'.format(
self.__class__.__name__,
self.peer_rank)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
# The last input is dummy variable, to retain gradient computation
# of this function.
xs = inputs[:-1]
if len(xs) == 1:
xs = xs[0]
self.comm.send(xs, self.peer_rank, self.peer_tag)
# Return an empty variable, which serves as "delegate_variable."
return xp.array([], dtype=xp.float32),
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
dummy_grad = xp.array([], dtype=xp.float32)
grad = self.comm.recv(self.peer_rank, self.peer_tag)
if isinstance(grad, tuple):
return tuple([xp.array(gy) for gy in grad] + [dummy_grad])
else:
return xp.array(grad), dummy_grad
class Recv(chainer.Function):
"""Receive elements from target process."""
def __init__(self, comm, peer_rank, peer_tag):
chainer.utils.experimental('chainermn.functions.Recv')
self.comm = comm
self.peer_rank = peer_rank
self.peer_tag = peer_tag
def __call__(self, *inputs):
xp = backend.get_array_module(*inputs)
if inputs == ():
# Expected to be invoked without any args in usual case.
dummy_var = chainer.Variable(xp.array([], dtype=xp.float32))
dummy_var.name = 'dummy_var'
return super(Recv, self).__call__(dummy_var)
else:
# Used for retaining computational graph.
return super(Recv, self).__call__(*inputs)
@property
def label(self):
return '{} (peer_rank: {})'.format(
self.__class__.__name__,
self.peer_rank)
def forward(self, inputs):
data = self.comm.recv(self.peer_rank, self.peer_tag)
if not isinstance(data, tuple):
data = tuple([data])
return data
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
self.comm.send(grad_outputs, self.peer_rank, self.peer_tag)
# dummy_var is needed to maintain Chainer's constraint.
if inputs == ():
dummy_var = tuple([xp.array([], dtype=xp.float32)])
else:
dummy_var = tuple([xp.zeros_like(x)
for x in inputs])
return dummy_var
def send(x, communicator, rank, tag=0):
"""Send elements to target process.
This function returns a dummy variable only holding the computational
graph. If ``backward()`` is invoked by this dummy variable, it will
try to receive gradients from the target process and send them back
to the parent nodes.
Args:
x (~chainer.Variable): Variable holding a matrix which you would like
to send.
communicator (chainer.communicators.CommunicatorBase):
ChainerMN communicator.
rank (int): Target process specifier.
tag (int): Optional message ID (MPI feature).
Returns:
~chainer.Variable:
A dummy variable with no actual data, only holding the
computational graph. Please refer
``chainermn.functions.pseudo_connect`` for detail.
"""
chainer.utils.experimental('chainermn.functions.send')
if rank == communicator.rank:
raise ValueError(
'rank must be different from communicator rank, '
'otherwise deadlock occurs')
xp = backend.get_array_module(*x)
# Dummy variable to retain gradient computation of send,
# otherwise the corresponding recv will cause deadlock in backward
# in the case where all inputs for this function does not require_grad.
dummy_var = chainer.Variable(xp.array([], dtype=xp.float32))
if isinstance(x, list) or isinstance(x, tuple):
inputs = x + type(x)([dummy_var])
delegate_variable = Send(
communicator, peer_rank=rank, peer_tag=tag)(*inputs)
else:
delegate_variable = Send(
communicator, peer_rank=rank, peer_tag=tag)(x, dummy_var)
delegate_variable.name = 'delegate_variable'
return delegate_variable
def recv(communicator, rank, delegate_variable=None, tag=0, force_tuple=False):
"""Receive elements from target process.
This function returns data received from target process. If ``backward()``
is invoked, it will try to send gradients to the target process.
The received array will be on the current CUDA device if the corresponding
``send()`` is invoked with arrays on GPU.
Please be aware that the current CUDA device is intended one.
(``https://docs-cupy.chainer.org/en/stable/tutorial/basic.html#current-device``)
.. note::
If you define non-connected computational graph on one process,
you have to use ``delegate_variable`` to specify the output of
previous computational graph component.
Otherwise ``backward()`` does not work well.
Please refer ``chainermn.functions.pseudo_connect`` for detail.
Args:
communicator (chainer.communicators.CommunicatorBase):
ChainerMN communicator.
rank (int): Target process specifier.
delegate_variable (chainer.Variable):
Pointer to the other non-connected component.
tag (int): Optional message ID (MPI feature).
force_tuple (bool): If ``False`` (the default) a Variable will be
returned when the number of outputs is one. Otherwise, this
method returns a tuple even when the number of outputs is one.
Returns:
~chainer.Variable:
Data received from target process. If ``backward()`` is invoked
by this variable, it will send gradients to the target process.
"""
chainer.utils.experimental('chainermn.functions.recv')
if rank == communicator.rank:
raise ValueError(
'rank must be different from communicator rank, '
'otherwise deadlock occurs')
if delegate_variable is None:
res = Recv(
communicator,
peer_rank=rank,
peer_tag=tag)()
else:
delegate_variable.name = 'delegate_variable'
res = Recv(
communicator,
peer_rank=rank,
peer_tag=tag)(delegate_variable)
if force_tuple and not isinstance(res, tuple):
return tuple([res])
else:
return res
| 6,903
| 33.178218
| 84
|
py
|
chainer
|
chainer-master/chainermn/functions/__init__.py
|
from chainermn.functions.collective_communication import allgather # NOQA
from chainermn.functions.collective_communication import alltoall # NOQA
from chainermn.functions.collective_communication import bcast # NOQA
from chainermn.functions.collective_communication import gather # NOQA
from chainermn.functions.collective_communication import scatter # NOQA
from chainermn.functions.point_to_point_communication import recv # NOQA
from chainermn.functions.point_to_point_communication import send # NOQA
from chainermn.functions.pseudo_connect import pseudo_connect # NOQA
| 585
| 52.272727
| 74
|
py
|
chainer
|
chainer-master/chainermn/functions/batch_normalization.py
|
import chainer
from chainer.backends import cuda
from chainer.functions.normalization import batch_normalization
import chainer.utils
class _MpiImpl(batch_normalization.GeneralBatchNormalizationImpl):
def __init__(self, comm):
self.comm = comm
def get_mean_and_var(self, axis, gamma, x, xp, interm_dtype):
tmp = xp.empty(gamma.size * 2, dtype=gamma.dtype)
x.mean(axis=axis, out=tmp[:gamma.size], dtype=gamma.dtype)
xp.square(x).mean(axis=axis, out=tmp[gamma.size:], dtype=gamma.dtype)
if xp is cuda.cupy:
chainer.cuda.Stream.null.synchronize()
self.comm._multi_node_mean(None, tmp)
mean = tmp[:gamma.size]
sqmean = tmp[gamma.size:]
var = sqmean - xp.square(mean)
return mean, var
def get_ggamma_and_gbeta(self, axis, gamma, gy, x_hat, xp):
tmp = xp.empty(gamma.size * 2, dtype=gamma.dtype)
gy.sum(axis=axis, out=tmp[:gamma.size], dtype=gamma.dtype)
(gy * x_hat).sum(axis=axis, out=tmp[gamma.size:], dtype=gamma.dtype)
if xp is cuda.cupy:
chainer.cuda.Stream.null.synchronize()
self.comm._multi_node_mean(None, tmp)
gbeta = tmp[:gamma.size]
ggamma = tmp[gamma.size:]
return gbeta, ggamma
class _NcclImpl(batch_normalization.GeneralBatchNormalizationImpl):
def __init__(self, comm):
self.comm = comm
# We need to delay importing MPI4py (and momdules that import MPI4py)
import chainermn.communicators._memory_utility as memory_utility_module
self.memory_utility_module = memory_utility_module
def get_mean_and_var(self, axis, gamma, x, xp, interm_dtype):
gpu_buffer_n_elems = gamma.size * 2
gpu_buffer_size = gamma.dtype.itemsize * gpu_buffer_n_elems
gpu_buffer_a = self.memory_utility_module.DeviceMemory()
gpu_buffer_b = self.memory_utility_module.DeviceMemory()
gpu_buffer_a.assign(gpu_buffer_size)
gpu_buffer_b.assign(gpu_buffer_size)
gpu_buffer_a_array = gpu_buffer_a.array(
gpu_buffer_n_elems, dtype=gamma.dtype)
x.mean(axis=axis, out=gpu_buffer_a_array[:gamma.size],
dtype=gamma.dtype)
xp.square(x).mean(axis=axis, out=gpu_buffer_a_array[gamma.size:],
dtype=gamma.dtype)
self.comm._multi_node_mean_nccl(gpu_buffer_a,
gpu_buffer_b,
gpu_buffer_n_elems,
gamma.dtype)
gpu_buffer_a_array = gpu_buffer_b.array(
gpu_buffer_n_elems,
dtype=gamma.dtype)
mean = gpu_buffer_a_array[:gamma.size]
sqmean = gpu_buffer_a_array[gamma.size:]
var = sqmean - xp.square(mean)
return mean, var
def get_ggamma_and_gbeta(self, axis, gamma, gy, x_hat, xp):
gpu_buffer_n_elems = gamma.size * 2
gpu_buffer_size = gamma.dtype.itemsize * gpu_buffer_n_elems
gpu_buffer_a = self.memory_utility_module.DeviceMemory()
gpu_buffer_b = self.memory_utility_module.DeviceMemory()
gpu_buffer_a.assign(gpu_buffer_size)
gpu_buffer_b.assign(gpu_buffer_size)
gpu_buffer_a_array = gpu_buffer_a.array(
gpu_buffer_n_elems, dtype=gamma.dtype)
gy.sum(axis=axis, out=gpu_buffer_a_array[:gamma.size],
dtype=gamma.dtype)
(gy * x_hat).sum(axis=axis, out=gpu_buffer_a_array[gamma.size:],
dtype=gamma.dtype)
self.comm._multi_node_mean_nccl(gpu_buffer_a,
gpu_buffer_b,
gpu_buffer_n_elems,
gamma.dtype)
gpu_buffer_a_array = gpu_buffer_b.array(
gpu_buffer_n_elems,
dtype=gamma.dtype)
gbeta = gpu_buffer_a_array[:gamma.size]
ggamma = gpu_buffer_a_array[gamma.size:]
return gbeta, ggamma
def get_communication_backend(comm, communication_backend='auto'):
if communication_backend not in ['mpi', 'nccl', 'auto']:
raise ValueError('MultiNodeBatchNormalization does not support '
'{}.'.format(communication_backend))
from chainermn.communicators.pure_nccl_communicator \
import PureNcclCommunicator
if communication_backend != 'auto':
if 'nccl' == communication_backend:
if not isinstance(comm, PureNcclCommunicator):
raise ValueError('{} is not supported in '
'MultiNodeBatchNormalization when using '
'{}.'.format(communication_backend,
type(comm)))
selected_communication_backend = communication_backend
else:
if isinstance(comm, PureNcclCommunicator):
selected_communication_backend = 'nccl'
else:
selected_communication_backend = 'mpi'
return selected_communication_backend
class MultiNodeBNImplSelector:
def __init__(self, comm, communication_backend_name):
self.comm = comm
self.communication_backend_name = communication_backend_name
def __call__(self, batch_norm_func, inputs):
if self.communication_backend_name == 'nccl':
return _NcclImpl(self.comm)
else:
return _MpiImpl(self.comm)
| 5,424
| 41.382813
| 79
|
py
|
chainer
|
chainer-master/chainermn/testing/__init__.py
|
from chainermn.testing.device import get_device # NOQA
| 56
| 27.5
| 55
|
py
|
chainer
|
chainer-master/chainermn/testing/device.py
|
import chainer
def get_device(device_id=None, use_chainerx=False):
"""Get device object
Currently in Chainer, there are 3 officially-supported backends
(numpy, cupy, and chainerx) and 2 devices (CPU and NVIDIA GPUs).
Also, ChainerX has its own backend system, so there are 4 combinations
(numpy, cupy, chainerx+native, chainerx+cuda). This utility function
is a boilerplate to get device object in ChainerMN contexts.
"""
if device_id is not None:
if use_chainerx:
device = 'cuda:{}'.format(device_id)
else:
device = '@cupy:{}'.format(device_id)
else:
if use_chainerx:
device = 'native:0'
else:
device = '@numpy'
return chainer.get_device(device)
| 771
| 29.88
| 74
|
py
|
chainer
|
chainer-master/chainermn/links/create_mnbn_model.py
|
import copy
import chainer
import chainermn
def create_mnbn_model(link, comm, communication_backend='auto'):
"""Create a link object with MultiNodeBatchNormalization.
Returns a copy of `link`, where BatchNormalization is replaced
by MultiNodeBatchNormalization.
Args:
link: Link object
comm: ChainerMN communicator
communication_backend (str): ``mpi``, ``nccl`` or ``auto``. It is used
to determine communication backend of MultiNodeBatchNormalization.
If ``auto``, use the best communication backend for each
communicator.
Returns:
Link object where BatchNormalization is replaced
by MultiNodeBatchNormalization.
"""
if isinstance(link, chainer.links.BatchNormalization):
mnbn = chainermn.links.MultiNodeBatchNormalization(
size=link.avg_mean.shape,
comm=comm,
decay=link.decay,
eps=link.eps,
dtype=link.avg_mean.dtype,
use_gamma=hasattr(link, 'gamma'),
use_beta=hasattr(link, 'beta'),
communication_backend=communication_backend,
)
mnbn.copyparams(link)
for name in link._persistent:
mnbn.__dict__[name] = copy.deepcopy(link.__dict__[name])
return mnbn
elif isinstance(link, chainer.Chain):
new_children = [
(child_name, create_mnbn_model(link.__dict__[child_name], comm,
communication_backend))
for child_name in link._children
]
new_link = copy.deepcopy(link)
for name, new_child in new_children:
new_link.__dict__[name] = new_child
return new_link
elif isinstance(link, chainer.Sequential):
new_link = copy.deepcopy(link)
for i, l in enumerate(link):
new_l = create_mnbn_model(l, comm, communication_backend)
new_link[i] = new_l
return new_link
elif isinstance(link, chainer.ChainList):
new_children = [
create_mnbn_model(l, comm, communication_backend) for l in link]
new_link = copy.deepcopy(link)
for i, new_child in enumerate(new_children):
new_link._children[i] = new_child
return new_link
else:
return copy.deepcopy(link)
| 2,340
| 33.940299
| 78
|
py
|
chainer
|
chainer-master/chainermn/links/multi_node_chain_list.py
|
from six.moves import queue
import chainer
import chainermn
import chainermn.communicators
import chainermn.functions
class MultiNodeChainList(chainer.ChainList):
"""Combining multiple non-connected components of computational graph.
This class combines each ``chainer.Chain``, which represents one of the
non-connected component in compuational graph. In ``__call__()``,
the returned object of ``chainer.Chain`` (which represents pointer)
are passed to the next ``chainer.Chain``, in order to retain the
computational graph connected and make backprop work properly.
Users add each ``chainer.Chain`` by ``add_link()`` method. Each chain
is invoked in forward computation according to the order they are added,
and in backward computation according to the reversed order.
.. admonition:: Example (basic usage)
This is a simple example of the model which sends its outputs to
rank=1 machine::
import chainer
import chainer.functions as F
import chainermn
class SimpleModelSub(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(SimpleModelSub, self).__init__(
l1=L.Linear(n_in, n_hidden),
l2=L.Linear(n_hidden, n_out))
def __call__(self, x):
h1 = F.relu(self.l1(x))
return self.l2(h1)
class SimpleModel(chainermn.MultiNodeChainList):
def __init__(self, comm, n_in, n_hidden, n_out):
super(SimpleModel, self).__init__(comm)
self.add_link(
SimpleModelSub(n_in, n_hidden, n_out),
rank_in=None,
rank_out=1)
.. admonition:: Example (split MLP on 2 processes)
This is the other example of two models interacting each other::
import chainer
import chainer.functions as F
import chainermn
class MLP(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MLP, self).__init__(
l1=L.Linear(n_in, n_hidden),
l2=L.Linear(n_hidden, n_hidden),
l3=L.Linear(n_hidden, n_out))
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
class Model0(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(Model0, self).__init__(comm)
self.add_link(
MLP(10000, 5000, 2000),
rank_in=None,
rank_out=1)
self.add_link(
MLP(100, 50, 10),
rank_in=1,
rank_out=None)
class Model1(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(Model1, self).__init__(comm)
self.add_link(MLP(2000, 500, 100), rank_in=0, rank_out=0)
``Model0`` is expected to be on rank=0, and ``Model1`` is expected to
be on rank=1. The first ``MLP`` in ``Model0`` will send its outputs
to ``Model1``, then ``MLP`` in ``Model1`` will receive it and send
its outputs to the second ``MLP`` in ``Model0``.
.. admonition:: Example (sending tuples)
This is the example for sending a tuple::
import chainer
import chainer.functions as F
import chainermn
class NN0(chainer.Chain):
def __call__(self, x):
y0 = some_calculation_nn0_0(x)
y1 = some_calculation_nn1_1(x)
return y0, y1
class NN1(chainer.Chain):
def __call__(self, y):
y0, y1 = y # unpack tuple from NN0
return some_calculation_nn1(y0, y1)
class Model_on_Process_0(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(Model_on_Process_0, self).__init__(comm=comm)
self.add_link(NN0(), rank_in=None, rank_out=1)
class Model_on_Process_1(chainermn.MultiNodeChainList):
def __init__(self, comm):
super(Model_on_Process_1, self).__init__(comm=comm)
self.add_link(NN1(), rank_in=0, rank_out=None)
In this example, ``Model_on_Process_0`` sends two elemental tuple
``(y0, y1)`` (returned by ``NN0.__call__``) to ``Model_on_Process_1``,
which can be unpacked as shown in ``NN1.__call__``.
Args:
comm (chainermn.communicators._base.CommunicatorBase):
ChainerMN communicator.
"""
def __init__(self, comm):
chainer.utils.experimental('chainermn.MultiNodeChainList')
super(MultiNodeChainList, self).__init__()
self._comm = comm
self._rank_inouts = []
def add_link(self, link, rank_in=None, rank_out=None):
"""Register one connected link with its inout rank.
Args:
link (chainer.Link): The link object to be registered.
rank_in (int, list, or None):
Ranks from which it receives data. If None is specified,
the model does not receive from any machines.
rank_out (int, list, or None):
Ranks to which it sends data. If None is specified,
the model will not send to any machine.
"""
super(MultiNodeChainList, self).add_link(link)
if isinstance(rank_in, int):
rank_in = [rank_in]
if isinstance(rank_out, int):
rank_out = [rank_out]
if rank_out is None:
for _, _rank_out in self._rank_inouts:
if _rank_out is None:
raise ValueError(
'MultiNodeChainList cannot have more than two '
'computational graph component whose rank_out is None')
self._rank_inouts.append((rank_in, rank_out))
def __call__(self, *inputs):
comm_queue = queue.Queue()
y = None
delegate_variable = None
for i_comp, (f, (rank_in, rank_out)) in \
enumerate(zip(self._children, self._rank_inouts)):
x = None
if rank_in is None: # Use inputs.
if i_comp == 0:
x = f(*inputs)
else:
# If the graph component is not the first one,
# backprop to the previous graph component must be
# guaranteed.
x, = chainermn.functions.pseudo_connect(
delegate_variable,
*inputs)
x = f(x)
else: # Receive inputs from the other machines.
# Preprocess: receiving inputs from the other machines.
xs = []
for _rank_in in rank_in:
if _rank_in == self._comm.rank:
# Receive inputs from itself.
if delegate_variable is None:
_x = comm_queue.get()
else:
_x, = chainermn.functions.pseudo_connect(
delegate_variable,
comm_queue.get())
else:
_x = chainermn.functions.recv(
self._comm,
rank=_rank_in,
delegate_variable=delegate_variable)
xs.append(_x)
# Guarantee the backward path to the previous graph
# component to be executed in the last to avoid dead-lock.
delegate_variable = _x
# Guarantee backprop on the same edge exactly once.
delegate_variable = None
# Actual forward.
x = f(*tuple(xs))
if rank_out is None: # Return outputs.
assert y is None, (
'MultiNodeChainList cannot have more than '
'two computational graph component whose rank_out is None')
y = x # model output
delegate_variable = y
else: # Send outputs to the other machines.
for i_comp, _rank_out in enumerate(rank_out):
if _rank_out == self._comm.rank:
# Send outputs to itself.
if delegate_variable is not None:
x, = chainermn.functions.pseudo_connect(
delegate_variable,
x)
comm_queue.put(x)
delegate_variable = x
elif i_comp == 0:
delegate_variable = chainermn.functions.send(
x, self._comm,
rank=_rank_out)
else:
# If the model has multiple targets for send,
# we must guarantee backwards of each send to be
# called in the reversed order.
if delegate_variable is not None:
x, = chainermn.functions.pseudo_connect(
delegate_variable,
x)
delegate_variable = chainermn.functions.send(
x, self._comm,
rank=_rank_out)
if not comm_queue.empty():
raise ValueError(
'Communication queue is not empty at the end of forward. '
'Make sure if all rank_in and rank_out correspond each other.')
# Return.
if y is delegate_variable:
# The last computational graph component returns model output.
return y
elif y is not None:
# The intermediate graph component returns model output.
y, = chainermn.functions.pseudo_connect(delegate_variable, y)
return y
else:
# Do not have any model output.
return delegate_variable
| 10,526
| 37.56044
| 79
|
py
|
chainer
|
chainer-master/chainermn/links/__init__.py
|
from chainermn.links.batch_normalization import MultiNodeBatchNormalization # NOQA
from chainermn.links.create_mnbn_model import create_mnbn_model # NOQA
from chainermn.links.multi_node_chain_list import MultiNodeChainList # NOQA
from chainermn.links.n_step_rnn import create_multi_node_n_step_rnn # NOQA
| 309
| 61
| 83
|
py
|
chainer
|
chainer-master/chainermn/links/n_step_rnn.py
|
import chainer
import chainer.links.rnn as rnn
import chainermn.functions
class _MultiNodeNStepRNN(chainer.Chain):
def __init__(self, link, communicator, rank_in, rank_out):
super(_MultiNodeNStepRNN, self).__init__(actual_rnn=link)
self.communicator = communicator
self.rank_in = rank_in
self.rank_out = rank_out
check_lstm = isinstance(link, rnn.n_step_rnn.NStepRNNBase)
if not check_lstm:
raise ValueError('link must be NStepRNN and its inherited link')
else:
self.n_cells = link.n_cells
def __call__(self, *inputs):
cells = [None for _ in range(self.n_cells)]
if self.rank_in is not None:
cells = [chainermn.functions.recv(
self.communicator,
rank=self.rank_in)
for _ in range(self.n_cells)]
outputs = self.actual_rnn(*(tuple(cells) + inputs))
cells = outputs[:-1]
delegate_variable = None
if self.rank_out is not None:
cell = cells[0]
for i in range(self.n_cells):
delegate_variable = chainermn.functions.send(
cell, self.communicator, rank=self.rank_out)
if i < self.n_cells - 1:
cell, = chainermn.functions.pseudo_connect(
delegate_variable, cells[i + 1])
return outputs + tuple([delegate_variable])
def create_multi_node_n_step_rnn(
actual_link, communicator, rank_in=None, rank_out=None):
"""Create a multi node stacked RNN link from a Chainer stacked RNN link.
Multi node stacked RNN link is used for model-parallel.
The created link will receive initial hidden states from the process
specified by ``rank_in`` (or do not receive if ``None``), execute
the original RNN compuation, and then send resulting hidden states
to the process specified by ``rank_out``.
Compared with Chainer stacked RNN link, multi node stacked RNN link
returns an extra object called ``delegate_variable``.
If ``rank_out`` is not ``None``, backward computation is expected
to be begun from ``delegate_variable``.
For detail, please refer ``chainermn.functions.pseudo_connect``.
The following RNN links can be passed to this function:
- ``chainer.links.NStepBiGRU``
- ``chainer.links.NStepBiLSTM``
- ``chainer.links.NStepBiRNNReLU``
- ``chainer.links.NStepBiRNNTanh``
- ``chainer.links.NStepGRU``
- ``chainer.links.NStepLSTM``
- ``chainer.links.NStepRNNReLU``
- ``chainer.links.NStepRNNTanh``
Args:
link (chainer.Link): Chainer stacked RNN link
communicator: ChainerMN communicator
rank_in (int, or None):
Rank of the process which sends hidden RNN states to this process.
rank_out (int, or None):
Rank of the process to which this process sends hiddne RNN states.
Returns:
The multi node stacked RNN link based on ``actual_link``.
"""
chainer.utils.experimental('chainermn.links.create_multi_node_n_step_rnn')
return _MultiNodeNStepRNN(actual_link, communicator, rank_in, rank_out)
| 3,181
| 36
| 78
|
py
|
chainer
|
chainer-master/chainermn/links/batch_normalization.py
|
import chainer
from chainer.backends import cuda
from chainer.functions.normalization import batch_normalization
from chainer import initializers
from chainer import link
import chainer.utils
from chainer import variable
from chainermn.functions import batch_normalization as \
chainermn_batch_normalization
import numpy
import copy
class MultiNodeBatchNormalization(link.Link):
"""Batch normalization layer that can use the whole batch stats.
When using chainer.link.BatchNormalization, batch mean and std are
computed independently for the local batch in each worker. When local
batch size is too small, training is unstable due to unreliable batch
stats.
In contrast, when using this MultiNodeBatchNormalization, workers
communicate to conduct 'correct' batch normalization (e.g., obtaining
mean and std for the whole global batch).
This link works only with Chainer >= 2.0.0.
Args:
size (int or tuple of ints): Size (or shape) of channel
dimensions.
comm (ChainerMN communicator): communicator to share
the batch stats.
decay (float): Decay rate of moving average. It is used on training.
eps (float): Epsilon value for numerical stability.
dtype (numpy.dtype): Type to use in computing.
use_gamma (bool): If ``True``, use scaling parameter. Otherwise, use
unit(1) which makes no effect.
use_beta (bool): If ``True``, use shifting parameter. Otherwise, use
unit(0) which makes no effect.
communication_backend (str): ``mpi``, ``nccl`` or ``auto``. It is used
to determine communication backend. If ``auto``, use the best
communication backend for each communicator.
"""
def __init__(self, size, comm, decay=0.9, eps=2e-5, dtype=None,
use_gamma=True, use_beta=True,
initial_gamma=None, initial_beta=None,
communication_backend='auto'):
chainer.utils.experimental(
'chainermn.links.MultiNodeBatchNormalization')
super(MultiNodeBatchNormalization, self).__init__()
self._highprec_dtype = chainer.get_dtype(
dtype, map_mixed16=numpy.float32)
self.comm = comm
self.avg_mean = numpy.zeros(size, dtype=self._highprec_dtype)
self.register_persistent('avg_mean')
self.avg_var = numpy.zeros(size, dtype=self._highprec_dtype)
self.register_persistent('avg_var')
self.N = 0
self.register_persistent('N')
self.decay = decay
self.eps = eps
self._communication_backend = \
chainermn_batch_normalization.get_communication_backend(
comm, communication_backend)
with self.init_scope():
if use_gamma:
if initial_gamma is None:
initial_gamma = 1
initial_gamma = initializers._get_initializer(initial_gamma)
initial_gamma.dtype = self._highprec_dtype
self.gamma = variable.Parameter(initial_gamma, size)
if use_beta:
if initial_beta is None:
initial_beta = 0
initial_beta = initializers._get_initializer(initial_beta)
initial_beta.dtype = self._highprec_dtype
self.beta = variable.Parameter(initial_beta, size)
def __call__(self, x, finetune=False):
if hasattr(self, 'gamma'):
gamma = self.gamma
else:
with cuda.get_device_from_id(self._device_id):
gamma = variable.Variable(self.xp.ones(
self.avg_mean.shape, dtype=self._highprec_dtype))
if hasattr(self, 'beta'):
beta = self.beta
else:
with cuda.get_device_from_id(self._device_id):
beta = variable.Variable(self.xp.zeros(
self.avg_mean.shape, dtype=self._highprec_dtype))
if chainer.configuration.config.train:
if finetune:
self.N += 1
decay = 1. - 1. / self.N
else:
decay = self.decay
func = batch_normalization.BatchNormalization(
self.eps, self.avg_mean, self.avg_var, decay,
impl_selector=(
chainermn_batch_normalization.MultiNodeBNImplSelector(
self.comm, self._communication_backend)))
ret = func.apply((x, gamma, beta))[0]
self.avg_mean[:] = func.running_mean
self.avg_var[:] = func.running_var
else:
# Use running average statistics or fine-tuned statistics.
mean = variable.Variable(self.avg_mean)
var = variable.Variable(self.avg_var)
ret = batch_normalization.fixed_batch_normalization(
x, gamma, beta, mean, var, self.eps)
return ret
def start_finetuning(self):
"""Resets the population count for collecting population statistics.
This method can be skipped if it is the first time to use the
fine-tuning mode. Otherwise, this method should be called before
starting the fine-tuning mode again.
"""
self.N = 0
def __deepcopy__(self, memo):
to_be_preserved = ['comm']
preserved = {}
for name in to_be_preserved:
preserved[name] = getattr(self, name)
setattr(self, name, None)
ret = copy.deepcopy(super(MultiNodeBatchNormalization, self))
for name in to_be_preserved:
setattr(self, name, preserved[name])
setattr(ret, name, preserved[name])
return ret
| 5,709
| 37.581081
| 78
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.