| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| from __future__ import print_function |
|
|
| import argparse |
| import os |
|
|
| import chainer |
| from chainer import serializers, training |
| from chainer.datasets import tuple_dataset |
| import chainer.functions as F |
| import chainer.links as L |
| from chainer.training import extensions |
| import numpy as np |
|
|
| import sagemaker_containers |
|
|
|
|
| class MLP(chainer.Chain): |
| def __init__(self, n_units, n_out): |
| super(MLP, self).__init__() |
| with self.init_scope(): |
| |
| self.l1 = L.Linear(None, n_units) |
| self.l2 = L.Linear(None, n_units) |
| self.l3 = L.Linear(None, n_out) |
|
|
| def __call__(self, x): |
| h1 = F.relu(self.l1(x)) |
| h2 = F.relu(self.l2(h1)) |
| return self.l3(h2) |
|
|
|
|
| def _preprocess_mnist(raw, withlabel, ndim, scale, image_dtype, label_dtype, rgb_format): |
| images = raw["x"][-100:] |
| if ndim == 2: |
| images = images.reshape(-1, 28, 28) |
| elif ndim == 3: |
| images = images.reshape(-1, 1, 28, 28) |
| if rgb_format: |
| images = np.broadcast_to(images, (len(images), 3) + images.shape[2:]) |
| elif ndim != 1: |
| raise ValueError("invalid ndim for MNIST dataset") |
| images = images.astype(image_dtype) |
| images *= scale / 255.0 |
|
|
| if withlabel: |
| labels = raw["y"][-100:].astype(label_dtype) |
| return tuple_dataset.TupleDataset(images, labels) |
| else: |
| return images |
|
|
|
|
| if __name__ == "__main__": |
| env = sagemaker_containers.training_env() |
|
|
| parser = argparse.ArgumentParser() |
|
|
| |
| parser.add_argument("--units", type=int, default=1000) |
| parser.add_argument("--epochs", type=int, default=20) |
| parser.add_argument("--frequency", type=int, default=20) |
| parser.add_argument("--batch-size", type=int, default=100) |
| parser.add_argument("--alpha", type=float, default=0.001) |
| parser.add_argument("--model-dir", type=str, default=env.model_dir) |
|
|
| parser.add_argument("--train", type=str, default=env.channel_input_dirs["train"]) |
| parser.add_argument("--test", type=str, default=env.channel_input_dirs["test"]) |
|
|
| parser.add_argument("--num-gpus", type=int, default=env.num_gpus) |
|
|
| args = parser.parse_args() |
|
|
| train_file = np.load(os.path.join(args.train, "train.npz")) |
| test_file = np.load(os.path.join(args.test, "test.npz")) |
|
|
| preprocess_mnist_options = { |
| "withlabel": True, |
| "ndim": 1, |
| "scale": 1.0, |
| "image_dtype": np.float32, |
| "label_dtype": np.int32, |
| "rgb_format": False, |
| } |
|
|
| train = _preprocess_mnist(train_file, **preprocess_mnist_options) |
| test = _preprocess_mnist(test_file, **preprocess_mnist_options) |
|
|
| |
| |
| |
| model = L.Classifier(MLP(args.units, 10)) |
|
|
| if chainer.cuda.available: |
| chainer.cuda.get_device_from_id(0).use() |
|
|
| |
| optimizer = chainer.optimizers.Adam(alpha=args.alpha) |
| optimizer.setup(model) |
|
|
| |
| train_iter = chainer.iterators.SerialIterator(train, args.batch_size) |
| test_iter = chainer.iterators.SerialIterator(test, args.batch_size, repeat=False, shuffle=False) |
|
|
| |
| device = 0 if chainer.cuda.available else -1 |
| if chainer.cuda.available: |
|
|
| def device_name(device_intra_rank): |
| return "main" if device_intra_rank == 0 else str(device_intra_rank) |
|
|
| devices = {device_name(device): device for device in range(args.num_gpus)} |
| updater = training.updater.ParallelUpdater( |
| train_iter, |
| optimizer, |
| |
| |
| devices=devices, |
| ) |
| else: |
| updater = training.updater.StandardUpdater(train_iter, optimizer, device=device) |
|
|
| |
| |
| trainer = training.Trainer(updater, (args.epochs, "epoch"), out=env.output_data_dir) |
|
|
| |
|
|
| trainer.extend(extensions.Evaluator(test_iter, model, device=device)) |
|
|
| |
| |
| trainer.extend(extensions.dump_graph("main/loss")) |
|
|
| |
| trainer.extend(extensions.snapshot(), trigger=(args.frequency, "epoch")) |
|
|
| |
| trainer.extend(extensions.LogReport()) |
|
|
| |
| if extensions.PlotReport.available(): |
| trainer.extend( |
| extensions.PlotReport( |
| ["main/loss", "validation/main/loss"], "epoch", file_name="loss.png" |
| ) |
| ) |
| trainer.extend( |
| extensions.PlotReport( |
| ["main/accuracy", "validation/main/accuracy"], "epoch", file_name="accuracy.png" |
| ) |
| ) |
|
|
| |
| |
| |
| |
| |
| trainer.extend( |
| extensions.PrintReport( |
| [ |
| "epoch", |
| "main/loss", |
| "validation/main/loss", |
| "main/accuracy", |
| "validation/main/accuracy", |
| "elapsed_time", |
| ] |
| ) |
| ) |
|
|
| |
| trainer.extend(extensions.ProgressBar()) |
|
|
| |
| trainer.run() |
|
|
| serializers.save_npz(os.path.join(args.model_dir, "model.npz"), model) |
|
|
|
|
| def model_fn(model_dir): |
| model = L.Classifier(MLP(1000, 10)) |
| serializers.load_npz(os.path.join(model_dir, "model.npz"), model) |
| return model.predictor |
|
|