repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
kraken | kraken-main/kraken/ketos/recognition.py | #
# Copyright 2022 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.ketos.train
~~~~~~~~~~~~~~~~~~
Command line driver for recognition training and evaluation.
"""
import click
import logging
import pathlib
from typing import List
from kraken.lib.progress import KrakenProgressBar
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.default_specs import RECOGNITION_HYPER_PARAMS, RECOGNITION_SPEC
from .util import _validate_manifests, _expand_gt, message, to_ptl_device
logging.captureWarnings(True)
logger = logging.getLogger('kraken')
@click.command('train')
@click.pass_context
@click.option('-B', '--batch-size', show_default=True, type=click.INT,
default=RECOGNITION_HYPER_PARAMS['batch_size'], help='batch sample size')
@click.option('--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')
@click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC,
help='VGSL spec of the network to train. CTC layer will be added automatically.')
@click.option('-a', '--append', show_default=True, default=None, type=click.INT,
help='Removes layers before argument and then appends spec. Only works when loading an existing model')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True,
readable=True), help='Load existing file to continue training')
@click.option('-F', '--freq', show_default=True, default=RECOGNITION_HYPER_PARAMS['freq'], type=click.FLOAT,
help='Model saving and report generation frequency in epochs '
'during training. If frequency is >1 it must be an integer, '
'i.e. running validation every n-th epoch.')
@click.option('-q',
'--quit',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['quit'],
type=click.Choice(['early',
'dumb']),
help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs')
@click.option('-N',
'--epochs',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['epochs'],
help='Number of epochs to train for')
@click.option('--min-epochs',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['min_epochs'],
help='Minimal number of epochs to train for when using early stopping.')
@click.option('--lag',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['lag'],
help='Number of evaluations (--report frequence) to wait before stopping training without improvement')
@click.option('--min-delta',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['min_delta'],
type=click.FLOAT,
help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--precision',
show_default=True,
default='32',
type=click.Choice(['64', '32', 'bf16', '16']),
help='Numerical precision to use for training. Default is 32-bit single-point precision.')
@click.option('--optimizer',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['optimizer'],
type=click.Choice(['Adam',
'SGD',
'RMSprop']),
help='Select optimizer')
@click.option('-r', '--lrate', show_default=True, default=RECOGNITION_HYPER_PARAMS['lrate'], help='Learning rate')
@click.option('-m', '--momentum', show_default=True, default=RECOGNITION_HYPER_PARAMS['momentum'], help='Momentum')
@click.option('-w', '--weight-decay', show_default=True, type=float,
default=RECOGNITION_HYPER_PARAMS['weight_decay'], help='Weight decay')
@click.option('--warmup', show_default=True, type=int,
default=RECOGNITION_HYPER_PARAMS['warmup'], help='Number of steps to ramp up to `lrate` initial learning rate.')
@click.option('--freeze-backbone', show_default=True, type=int,
default=RECOGNITION_HYPER_PARAMS['freeze_backbone'], help='Number of samples to keep the backbone (everything but last layer) frozen.')
@click.option('--schedule',
show_default=True,
type=click.Choice(['constant',
'1cycle',
'exponential',
'cosine',
'step',
'reduceonplateau']),
default=RECOGNITION_HYPER_PARAMS['schedule'],
help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.')
@click.option('-g',
'--gamma',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['gamma'],
help='Decay factor for exponential, step, and reduceonplateau learning rate schedules')
@click.option('-ss',
'--step-size',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['step_size'],
help='Number of validation runs between learning rate decay for exponential and step LR schedules')
@click.option('--sched-patience',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['rop_patience'],
help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.')
@click.option('--cos-max',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['cos_t_max'],
help='Epoch of minimal learning rate for cosine LR scheduler.')
@click.option('-p', '--partition', show_default=True, default=0.9,
help='Ground truth data partition ratio between train/validation set')
@click.option('--fixed-splits/--ignore-fixed-split', show_default=True, default=False,
help='Whether to honor fixed splits in binary datasets.')
@click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),
default=RECOGNITION_HYPER_PARAMS['normalization'], help='Ground truth normalization')
@click.option('-n', '--normalize-whitespace/--no-normalize-whitespace', show_default=True,
default=RECOGNITION_HYPER_PARAMS['normalize_whitespace'], help='Normalizes unicode whitespace')
@click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True),
help='Load a codec JSON definition (invalid if loading existing model)')
@click.option('--resize', show_default=True, default='fail',
type=click.Choice([
'add', 'union', # Deprecation: `add` is deprecated, `union` is the new value
'both', 'new', # Deprecation: `both` is deprecated, `new` is the new value
'fail'
]),
help='Codec/output layer resizing option. If set to `union` code '
'points will be added, `new` will set the layer to match exactly '
'the training data, `fail` will abort if training data and model '
'codec do not match.')
@click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')
@click.option('--base-dir', show_default=True, default='auto',
type=click.Choice(['L', 'R', 'auto']), help='Set base text '
'direction. This should be set to the direction used during the '
'creation of the training data. If set to `auto` it will be '
'overridden by any explicit value given in the input files.')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')
@click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,
help='When loading an existing model, retrieve hyperparameters from the model')
@click.option('--repolygonize/--no-repolygonize', show_default=True,
default=False, help='Repolygonizes line data in ALTO/PageXML '
'files. This ensures that the trained model is compatible with the '
'segmenter in kraken even if the original image files either do '
'not contain anything but transcriptions and baseline information '
'or the polygon data was created using a different method. Will '
'be ignored in `path` mode. Note that this option will be slow '
'and will not scale input images to the same size as the segmenter '
'does.')
@click.option('--force-binarization/--no-binarization', show_default=True,
default=False, help='Forces input images to be binary, otherwise '
'the appropriate color format will be auto-determined through the '
'network specification. Will be ignored in `path` mode.')
@click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path',
help='Sets the training data format. In ALTO and PageXML mode all '
'data is extracted from xml files containing both line definitions and a '
'link to source images. In `path` mode arguments are image files '
'sharing a prefix up to the last extension with `.gt.txt` text files '
'containing the transcription. In binary mode files are datasets '
'files containing pre-extracted text lines.')
@click.option('--augment/--no-augment',
show_default=True,
default=RECOGNITION_HYPER_PARAMS['augment'],
help='Enable image augmentation')
@click.option('--logger', 'pl_logger', show_default=True, type=click.Choice(['tensorboard']), default=None,
help='Logger used by PyTorch Lightning to track metrics such as loss and accuracy.')
@click.option('--log-dir', show_default=True, type=click.Path(exists=True, dir_okay=True, writable=True),
help='Path to directory where the logger will store the logs. If not set, a directory will be created in the current working directory.')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, batch_size, pad, output, spec, append, load, freq, quit, epochs,
min_epochs, lag, min_delta, device, precision, optimizer, lrate, momentum,
weight_decay, warmup, freeze_backbone, schedule, gamma, step_size,
sched_patience, cos_max, partition, fixed_splits, normalization,
normalize_whitespace, codec, resize, reorder, base_dir,
training_files, evaluation_files, workers, load_hyper_parameters,
repolygonize, force_binarization, format_type, augment,
pl_logger, log_dir, ground_truth):
"""
Trains a model from image-text pairs.
"""
if not load and append:
raise click.BadOptionUsage('append', 'append option requires loading an existing model')
if resize != 'fail' and not load:
raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')
if not (0 <= freq <= 1) and freq % 1.0 != 0:
raise click.BadOptionUsage('freq', 'freq needs to be either in the interval [0,1.0] or a positive integer.')
if augment:
try:
import albumentations # NOQA
except ImportError:
raise click.BadOptionUsage('augment', 'augmentation needs the `albumentations` package installed.')
if pl_logger == 'tensorboard':
try:
import tensorboard # NOQA
except ImportError:
raise click.BadOptionUsage('logger', 'tensorboard logger needs the `tensorboard` package installed.')
if log_dir is None:
log_dir = pathlib.Path.cwd()
import json
import shutil
from kraken.lib.train import RecognitionModel, KrakenTrainer
hyper_params = RECOGNITION_HYPER_PARAMS.copy()
hyper_params.update({'freq': freq,
'pad': pad,
'batch_size': batch_size,
'quit': quit,
'epochs': epochs,
'min_epochs': min_epochs,
'lag': lag,
'min_delta': min_delta,
'optimizer': optimizer,
'lrate': lrate,
'momentum': momentum,
'weight_decay': weight_decay,
'warmup': warmup,
'freeze_backbone': freeze_backbone,
'schedule': schedule,
'gamma': gamma,
'step_size': step_size,
'rop_patience': sched_patience,
'cos_t_max': cos_max,
'normalization': normalization,
'normalize_whitespace': normalize_whitespace,
'augment': augment,
})
# disable automatic partition when given evaluation set explicitly
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
# merge training_files into ground_truth list
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
if reorder and base_dir != 'auto':
reorder = base_dir
if codec:
logger.debug(f'Loading codec file from {codec}')
codec = json.load(codec)
try:
accelerator, device = to_ptl_device(device)
except Exception as e:
raise click.BadOptionUsage('device', str(e))
if hyper_params['freq'] > 1:
val_check_interval = {'check_val_every_n_epoch': int(hyper_params['freq'])}
else:
val_check_interval = {'val_check_interval': hyper_params['freq']}
model = RecognitionModel(hyper_params=hyper_params,
output=output,
spec=spec,
append=append,
model=load,
reorder=reorder,
training_data=ground_truth,
evaluation_data=evaluation_files,
partition=partition,
binary_dataset_split=fixed_splits,
num_workers=workers,
load_hyper_parameters=load_hyper_parameters,
repolygonize=repolygonize,
force_binarization=force_binarization,
format_type=format_type,
codec=codec,
resize=resize)
trainer = KrakenTrainer(accelerator=accelerator,
devices=device,
precision=precision,
max_epochs=hyper_params['epochs'] if hyper_params['quit'] == 'dumb' else -1,
min_epochs=hyper_params['min_epochs'],
freeze_backbone=hyper_params['freeze_backbone'],
enable_progress_bar=True if not ctx.meta['verbose'] else False,
deterministic=ctx.meta['deterministic'],
pl_logger=pl_logger,
log_dir=log_dir,
**val_check_interval)
try:
trainer.fit(model)
except KrakenInputException as e:
if e.args[0].startswith('Training data and model codec alphabets mismatch') and resize == 'fail':
raise click.BadOptionUsage('resize', 'Mismatched training data for loaded model. Set option `--resize` to `new` or `add`')
else:
raise e
if model.best_epoch == -1:
logger.warning('Model did not improve during training.')
ctx.exit(1)
if quit == 'early':
message(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel')
logger.info(f'Moving best model {model.best_model} ({model.best_metric}) to {output}_best.mlmodel')
shutil.copy(f'{model.best_model}', f'{output}_best.mlmodel')
@click.command('test')
@click.pass_context
@click.option('-B', '--batch-size', show_default=True, type=click.INT,
default=RECOGNITION_HYPER_PARAMS['batch_size'], help='Batch sample size')
@click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True),
multiple=True, help='Model(s) to evaluate')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data.')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('--workers', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.')
@click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')
@click.option('--base-dir', show_default=True, default='auto',
type=click.Choice(['L', 'R', 'auto']), help='Set base text '
'direction. This should be set to the direction used during the '
'creation of the training data. If set to `auto` it will be '
'overridden by any explicit value given in the input files.')
@click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),
default=None, help='Ground truth normalization')
@click.option('-n', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('--repolygonize/--no-repolygonize', show_default=True,
default=False, help='Repolygonizes line data in ALTO/PageXML '
'files. This ensures that the trained model is compatible with the '
'segmenter in kraken even if the original image files either do '
'not contain anything but transcriptions and baseline information '
'or the polygon data was created using a different method. Will '
'be ignored in `path` mode. Note, that this option will be slow '
'and will not scale input images to the same size as the segmenter '
'does.')
@click.option('--force-binarization/--no-binarization', show_default=True,
default=False, help='Forces input images to be binary, otherwise '
'the appropriate color format will be auto-determined through the '
'network specification. Will be ignored in `path` mode.')
@click.option('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page', 'binary']), default='path',
help='Sets the training data format. In ALTO and PageXML mode all '
'data is extracted from xml files containing both baselines and a '
'link to source images. In `path` mode arguments are image files '
'sharing a prefix up to the last extension with JSON `.path` files '
'containing the baseline information. In `binary` mode files are '
'collections of pre-extracted text line images.')
@click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def test(ctx, batch_size, model, evaluation_files, device, pad, workers,
reorder, base_dir, normalization, normalize_whitespace, repolygonize,
force_binarization, format_type, test_set):
"""
Evaluate on a test set.
"""
if not model:
raise click.UsageError('No model to evaluate given.')
import numpy as np
from torch.utils.data import DataLoader
from kraken.serialization import render_report
from kraken.lib import models
from kraken.lib.xml import preparse_xml_data
from kraken.lib.dataset import (global_align, compute_confusions,
PolygonGTDataset, GroundTruthDataset,
ImageInputTransforms,
ArrowIPCRecognitionDataset,
collate_sequences)
logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files)))
nn = {}
for p in model:
message('Loading model {}\t'.format(p), nl=False)
nn[p] = models.load_any(p)
message('\u2713', fg='green')
test_set = list(test_set)
# set number of OpenMP threads
next(iter(nn.values())).nn.set_num_threads(1)
if evaluation_files:
test_set.extend(evaluation_files)
if len(test_set) == 0:
raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')
if format_type in ['xml', 'page', 'alto']:
if repolygonize:
message('Repolygonizing data')
test_set = preparse_xml_data(test_set, format_type, repolygonize)
valid_norm = False
DatasetClass = PolygonGTDataset
elif format_type == 'binary':
DatasetClass = ArrowIPCRecognitionDataset
if repolygonize:
logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.')
test_set = [{'file': file} for file in test_set]
valid_norm = False
else:
DatasetClass = GroundTruthDataset
if force_binarization:
logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')
test_set = [{'image': img} for img in test_set]
valid_norm = True
if len(test_set) == 0:
raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')
if reorder and base_dir != 'auto':
reorder = base_dir
acc_list = []
for p, net in nn.items():
algn_gt: List[str] = []
algn_pred: List[str] = []
chars = 0
error = 0
message('Evaluating {}'.format(p))
logger.info('Evaluating {}'.format(p))
batch, channels, height, width = net.nn.input
ts = ImageInputTransforms(batch, height, width, channels, (pad, 0), valid_norm, force_binarization)
ds = DatasetClass(normalization=normalization,
whitespace_normalization=normalize_whitespace,
reorder=reorder,
im_transforms=ts)
for line in test_set:
try:
ds.add(**line)
except KrakenInputException as e:
logger.info(e)
# don't encode validation set as the alphabets may not match causing encoding failures
ds.no_encode()
ds_loader = DataLoader(ds,
batch_size=batch_size,
num_workers=workers,
pin_memory=True,
collate_fn=collate_sequences)
with KrakenProgressBar() as progress:
batches = len(ds_loader)
pred_task = progress.add_task('Evaluating', total=batches, visible=True if not ctx.meta['verbose'] else False)
for batch in ds_loader:
im = batch['image']
text = batch['target']
lens = batch['seq_lens']
try:
pred = net.predict_string(im, lens)
for x, y in zip(pred, text):
chars += len(y)
c, algn1, algn2 = global_align(y, x)
algn_gt.extend(algn1)
algn_pred.extend(algn2)
error += c
except FileNotFoundError as e:
batches -= 1
progress.update(pred_task, total=batches)
logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename))
except KrakenInputException as e:
batches -= 1
progress.update(pred_task, total=batches)
logger.warning(str(e))
progress.update(pred_task, advance=1)
acc_list.append((chars - error) / chars)
confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred)
rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs)
logger.info(rep)
message(rep)
logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
| 26,702 | 52.087475 | 151 | py |
kraken | kraken-main/kraken/contrib/heatmap_overlay.py | #! /usr/bin/env python
"""
Produces semi-transparent neural segmenter output overlays
"""
import click
@click.command()
@click.option('-i', '--model', default=None, show_default=True, type=click.Path(exists=True),
help='Baseline detection model to use.')
@click.argument('files', nargs=-1)
def cli(model, files):
"""
Applies a BLLA baseline segmentation model and outputs the raw heatmaps of
the first baseline class.
"""
import torch
from PIL import Image
from kraken.lib import vgsl, dataset
import torch.nn.functional as F
from os.path import splitext
import torchvision.transforms as tf
model = vgsl.TorchVGSLModel.load_model(model)
model.eval()
batch, channels, height, width = model.input
transforms = dataset.ImageInputTransforms(batch, height, width, channels, 0, valid_norm=False)
torch.set_num_threads(1)
for img in files:
print(img)
im = Image.open(img)
xs = transforms(im)
with torch.no_grad():
o, _ = model.nn(xs.unsqueeze(0))
o = F.interpolate(o, size=xs.shape[1:])
o = o.squeeze().numpy()
scal_im = tf.ToPILImage()(1-xs)
heat = Image.fromarray((o[2]*255).astype('uint8'))
heat.save(splitext(img)[0] + '.heat.png')
overlay = Image.new('RGBA', scal_im.size, (0, 130, 200, 255))
bl = Image.composite(overlay, scal_im.convert('RGBA'), heat)
heat = Image.fromarray((o[1]*255).astype('uint8'))
overlay = Image.new('RGBA', scal_im.size, (230, 25, 75, 255))
bl = Image.composite(overlay, bl, heat)
heat = Image.fromarray((o[0]*255).astype('uint8'))
overlay = Image.new('RGBA', scal_im.size, (60, 180, 75, 255))
Image.composite(overlay, bl, heat).save(splitext(img)[0] + '.overlay.png')
del o
del im
if __name__ == '__main__':
cli()
| 1,903 | 31.271186 | 98 | py |
kraken | kraken-main/kraken/contrib/baselineset_overlay.py | #! /usr/bin/env python
"""
Produces semi-transparent neural segmenter output overlays
"""
import click
@click.command()
@click.argument('files', nargs=-1)
def cli(files):
import torch
from PIL import Image
from os.path import splitext
import torchvision.transforms as tf
from kraken.lib import dataset
batch, channels, height, width = 1, 3, 1200, 0
transforms = dataset.ImageInputTransforms(batch, height, width, channels, 0, valid_norm=False)
torch.set_num_threads(1)
ds = dataset.BaselineSet(files, im_transforms=transforms, mode='xml')
for idx, batch in enumerate(ds):
img = ds.imgs[idx]
print(img)
im = Image.open(img)
res_tf = tf.Compose(transforms.transforms[:2])
scal_im = res_tf(im)
o = batch['target'].numpy()
heat = Image.fromarray((o[ds.class_mapping['baselines']['default']]*255).astype('uint8'))
heat.save(splitext(img)[0] + '.heat.png')
overlay = Image.new('RGBA', scal_im.size, (0, 130, 200, 255))
bl = Image.composite(overlay, scal_im.convert('RGBA'), heat)
heat = Image.fromarray((o[ds.class_mapping['aux']['_start_separator']]*255).astype('uint8'))
overlay = Image.new('RGBA', scal_im.size, (230, 25, 75, 255))
bl = Image.composite(overlay, bl, heat)
heat = Image.fromarray((o[ds.class_mapping['aux']['_end_separator']]*255).astype('uint8'))
overlay = Image.new('RGBA', scal_im.size, (60, 180, 75, 255))
Image.composite(overlay, bl, heat).save(splitext(img)[0] + '.overlay.png')
del o
del im
if __name__ == '__main__':
cli()
| 1,640 | 33.1875 | 100 | py |
kraken | kraken-main/kraken/contrib/hyperparameters/tune_pretraining.py | #!/usr/bin/env python
"""
A script for a grid search over pretraining hyperparameters.
"""
import click
from functools import partial
from ray import tune
from ray.tune.integration.pytorch_lightning import TuneReportCallback
from kraken.lib.default_specs import RECOGNITION_PRETRAIN_HYPER_PARAMS, RECOGNITION_SPEC
from kraken.lib.pretrain.model import PretrainDataModule, RecognitionPretrainModel
from kraken.ketos.util import _validate_manifests
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
config = {'lrate': tune.loguniform(1e-8, 1e-2),
'num_negatives': tune.qrandint(1, 4, 1),
'mask_prob': tune.loguniform(0.01, 0.2),
'mask_width': tune.qrandint(2, 8, 2)}
resources_per_trial = {"cpu": 8, "gpu": 0.5}
def train_tune(config, training_data=None, epochs=100, spec=RECOGNITION_SPEC):
hyper_params = RECOGNITION_PRETRAIN_HYPER_PARAMS.copy()
hyper_params.update(config)
model = RecognitionPretrainModel(hyper_params=hyper_params,
output='./model',
spec=spec)
data_module = PretrainDataModule(batch_size=hyper_params.pop('batch_size'),
pad=hyper_params.pop('pad'),
augment=hyper_params.pop('augment'),
training_data=training_data,
num_workers=resources_per_trial['cpu'],
height=model.height,
width=model.width,
channels=model.channels,
format_type='binary')
callback = TuneReportCallback({'loss': 'CE'}, on='validation_end')
trainer = pl.Trainer(max_epochs=epochs,
accelerator='gpu',
devices=1,
callbacks=[callback],
enable_progress_bar=False)
trainer.fit(model, datamodule=data_module)
@click.command()
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=42, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducible random splits of data')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='pretrain_hyper', help='output directory')
@click.option('-n', '--num-samples', show_default=True, type=int, default=100, help='Number of samples to train')
@click.option('-N', '--epochs', show_default=True, type=int, default=10, help='Maximum number of epochs to train per sample')
@click.option('-s', '--spec', show_default=True, default=RECOGNITION_SPEC, help='VGSL spec of the network to train.')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.argument('files', nargs=-1)
def cli(verbose, seed, output, num_samples, epochs, spec, training_files, files):
files = list(files)
if training_files:
files.extend(training_files)
if not files:
raise click.UsageError('No training data was provided to the search command. Use `-t` or the `files` argument.')
seed_everything(seed, workers=True)
analysis = tune.run(partial(train_tune,
training_data=files,
epochs=epochs,
spec=spec), local_dir=output, num_samples=num_samples, resources_per_trial=resources_per_trial, config=config)
click.echo("Best hyperparameters found were: ", analysis.get_best_config(metric='accuracy', mode='max'))
if __name__ == '__main__':
cli()
| 3,872 | 43.011364 | 142 | py |
kraken | kraken-main/kraken/contrib/hyperparameters/tune_training.py | #!/usr/bin/env python
"""
A script for a grid search over pretraining hyperparameters.
"""
import sys
from functools import partial
from ray import tune
from ray.tune.integration.pytorch_lightning import TuneReportCallback
from kraken.lib.default_spec import RECOGNITION_PRETRAIN_HYPER_PARAMS, RECOGNITION_SPEC
from kraken.lib.pretrain.model import PretrainDataModule, RecognitionPretrainModel
from ray.tune.schedulers import ASHAScheduler
import pytorch_lightning as pl
config = {'lrate': tune.loguniform(1e-8, 1e-2),
'num_negatives': tune.qrandint(2, 100, 8),
'mask_prob': tune.loguniform(0.01, 0.2),
'mask_width': tune.qrandint(2, 8, 2)}
resources_per_trial = {"cpu": 8, "gpu": 0.5}
def train_tune(config, training_data=None, epochs=100):
hyper_params = RECOGNITION_PRETRAIN_HYPER_PARAMS.copy()
hyper_params.update(config)
model = RecognitionPretrainModel(hyper_params=hyper_params,
output='model',
spec=RECOGNITION_SPEC)
data_module = PretrainDataModule(batch_size=hyper_params.pop('batch_size'),
pad=hyper_params.pop('pad'),
augment=hyper_params.pop('augment'),
training_data=training_data,
num_workers=resources_per_trial['cpu'],
height=model.height,
width=model.width,
channels=model.channels,
format_type='binary')
callback = TuneReportCallback({'loss': 'CE'}, on='validation_end')
trainer = pl.Trainer(max_epochs=epochs,
gpus=1,
callbacks=[callback],
enable_progress_bar=False)
trainer.fit(model)
analysis = tune.run(partial(train_tune, training_data=sys.argv[2:]), local_dir=sys.argv[1], num_samples=100, resources_per_trial=resources_per_trial, config=config)
print("Best hyperparameters found were: ", analysis.get_best_config(metric='accuracy', mode='max'))
| 2,190 | 37.438596 | 164 | py |
kraken | kraken-main/kraken/lib/codec.py | #
# Copyright 2017 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Pytorch compatible codec with many-to-many mapping between labels and
graphemes.
"""
import logging
import numpy as np
from collections import Counter
from typing import List, Tuple, Set, Union, Dict, Sequence
from torch import IntTensor
from kraken.lib.exceptions import KrakenEncodeException, KrakenCodecException
__all__ = ['PytorchCodec']
logger = logging.getLogger(__name__)
class PytorchCodec(object):
"""
Builds a codec converting between graphemes/code points and integer
label sequences.
charset may either be a string, a list or a dict. In the first case
each code point will be assigned a label, in the second case each
string in the list will be assigned a label, and in the final case each
key string will be mapped to the value sequence of integers. In the
first two cases labels will be assigned automatically. When a mapping
is manually provided the label codes need to be a prefix-free code.
As 0 is the blank label in a CTC output layer, output labels and input
dictionaries are/should be 1-indexed.
Args:
charset: Input character set.
strict: Flag indicating if encoding/decoding errors should be ignored
or cause an exception.
Raises:
KrakenCodecException: If the character set contains duplicate
entries or the mapping is non-singular or
non-prefix-free.
"""
def __init__(self, charset: Union[Dict[str, Sequence[int]], Sequence[str], str], strict=False):
if isinstance(charset, dict):
self.c2l = charset
else:
cc = Counter(charset)
if len(cc) < len(charset):
raise KrakenCodecException(f'Duplicate entry in codec definition string: {cc}')
self.c2l = {k: [v] for v, k in enumerate(sorted(charset), start=1)}
self.c_sorted = sorted(self.c2l.keys(), key=len, reverse=True)
self.l2c = {tuple(v): k for k, v in self.c2l.items()} # type: Dict[Tuple[int], str]
self.l2c_single = {k[0]: v for k, v in self.l2c.items() if len(k) == 1}
self.strict = strict
if not self.is_valid:
raise KrakenCodecException('Codec is not valid (non-singular/non-prefix free).')
def __len__(self) -> int:
"""
Total number of input labels the codec can decode.
"""
return len(self.l2c.keys())
@property
def is_valid(self) -> bool:
"""
Returns True if the codec is prefix-free (in label space) and
non-singular (in both directions).
"""
# quick test for non-singularity
if len(self.l2c.keys()) != len(self.c2l.keys()):
return False
for i, code_1 in enumerate(sorted(self.l2c.keys())):
for j, code_2 in enumerate(sorted(self.l2c.keys())):
if i != j and code_1[:len(code_2)] == code_2:
return False
return True
@property
def max_label(self) -> int:
"""
Returns the maximum label value.
"""
return max(label for labels in self.c2l.values() for label in labels)
def encode(self, s: str) -> IntTensor:
"""
Encodes a string into a sequence of labels.
If the code is non-singular we greedily encode the longest sequence first.
Args:
s: Input unicode string
Returns:
Ecoded label sequence
Raises:
KrakenEncodeException: if the a subsequence is not encodable and the
codec is set to strict mode.
"""
labels = [] # type: List[int]
idx = 0
while idx < len(s):
encodable_suffix = False
for code in self.c_sorted:
if len(code) == 1:
break
if s[idx:].startswith(code):
labels.extend(self.c2l[code])
idx += len(code)
encodable_suffix = True
break
if not encodable_suffix and s[idx] in self.c2l:
labels.extend(self.c2l[s[idx]])
idx += 1
encodable_suffix = True
if not encodable_suffix:
if self.strict:
raise KrakenEncodeException(f'Non-encodable sequence {s[idx:idx+5]}... encountered.')
logger.warning(f'Non-encodable sequence {s[idx:idx+5]}... encountered. Advancing one code point.')
idx += 1
return IntTensor(labels)
def decode(self, labels: Sequence[Tuple[int, int, int, float]]) -> List[Tuple[str, int, int, float]]:
"""
Decodes a labelling.
Given a labelling with cuts and confidences returns a string with the
cuts and confidences aggregated across label-code point
correspondences. When decoding multilabels to code points the resulting
cuts are min/max, confidences are averaged.
Args:
labels: Input containing tuples (label, start, end,
confidence).
Returns:
A list of tuples (code point, start, end, confidence)
"""
start = [x for _, x, _, _ in labels]
end = [x for _, _, x, _ in labels]
con = [x for _, _, _, x in labels]
labels = tuple(x for x, _, _, _ in labels)
decoded = []
idx = 0
while idx < len(labels):
decodable_suffix = False
if int(labels[idx]) in self.l2c_single:
code = self.l2c_single[int(labels[idx])]
decoded.extend([(c, s, e, u) for c, s, e, u in zip(code,
len(code) * [start[idx]],
len(code) * [end[idx]],
len(code) * [con[idx]])])
idx += 1
decodable_suffix = True
else:
for code in self.l2c.keys():
if code == labels[idx:idx+len(code)]:
decoded.extend([(c, s, e, u) for c, s, e, u in zip(self.l2c[code],
len(self.l2c[code]) * [start[idx]],
len(self.l2c[code]) * [end[idx + len(code) - 1]],
len(self.l2c[code]) * [np.mean(con[idx:idx + len(code)])])])
idx += len(code)
decodable_suffix = True
break
if not decodable_suffix:
if self.strict:
raise KrakenEncodeException(f'Non-decodable sequence {labels[idx:idx+5]}... encountered.')
logger.debug(f'Non-decodable sequence {labels[idx:idx+5]}... encountered. Advancing one label.')
idx += 1
return decoded
def merge(self, codec: 'PytorchCodec') -> Tuple['PytorchCodec', Set]:
"""
Transforms this codec (c1) into another (c2) reusing as many labels as
possible.
The resulting codec is able to encode the same code point sequences
while not necessarily having the same labels for them as c2.
Retains matching character -> label mappings from both codecs, removes
mappings not c2, and adds mappings not in c1. Compound labels in c2 for
code point sequences not in c1 containing labels also in use in c1 are
added as separate labels.
Args:
codec: PytorchCodec to merge with
Returns:
A merged codec and a list of labels that were removed from the
original codec.
"""
# find character sequences not encodable (exact match) by new codec.
# get labels for these sequences as deletion candidates
rm_candidates = {cseq: enc for cseq, enc in self.c2l.items() if cseq not in codec.c2l}
c2l_cand = self.c2l.copy()
for x in rm_candidates.keys():
c2l_cand.pop(x)
# remove labels from candidate list that are in use for other decodings
rm_labels = [label for v in rm_candidates.values() for label in v]
for v in c2l_cand.values():
for label in rm_labels:
if label in v:
rm_labels.remove(label)
# iteratively remove labels, decrementing subsequent labels to close
# (new) holes in the codec.
offset_rm_labels = [v-idx for idx, v in enumerate(sorted(set(rm_labels)))]
for rlabel in offset_rm_labels:
c2l_cand = {k: [label-1 if label > rlabel else label for label in v] for k, v in c2l_cand.items()}
# add mappings not in original codec
add_list = {cseq: enc for cseq, enc in codec.c2l.items() if cseq not in self.c2l}
# renumber
start_idx = max((0,) + tuple(label for v in c2l_cand.values() for label in v)) + 1
add_labels = {k: v for v, k in enumerate(sorted(set(label for v in add_list.values() for label in v)), start_idx)}
for k, v in add_list.items():
c2l_cand[k] = [add_labels[label] for label in v]
return PytorchCodec(c2l_cand, self.strict), set(rm_labels)
def add_labels(self, charset: Union[Dict[str, Sequence[int]], Sequence[str], str]) -> 'PytorchCodec':
"""
Adds additional characters/labels to the codec.
charset may either be a string, a list or a dict. In the first case
each code point will be assigned a label, in the second case each
string in the list will be assigned a label, and in the final case each
key string will be mapped to the value sequence of integers. In the
first two cases labels will be assigned automatically.
As 0 is the blank label in a CTC output layer, output labels and input
dictionaries are/should be 1-indexed.
Args:
charset: Input character set.
"""
if isinstance(charset, dict):
c2l = self.c2l.copy()
c2l.update(charset)
else:
c2l = self.c2l.copy()
c2l.update({k: [v] for v, k in enumerate(sorted(charset), start=self.max_label+1)})
return PytorchCodec(c2l, self.strict)
def __repr__(self):
return f'PytorchCodec({self.c2l})'
| 11,105 | 40.909434 | 135 | py |
kraken | kraken-main/kraken/lib/progress.py | # Copyright Benjamin Kiessling
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handlers for rich-based progress bars.
"""
from typing import Any, Dict, Optional, Union
from numbers import Number
from dataclasses import dataclass
import pytorch_lightning as pl
from pytorch_lightning.callbacks.progress.rich_progress import CustomProgress, RichProgressBar, MetricsTextColumn
from rich import get_console, reconfigure
from rich.console import Console, RenderableType
from rich.progress import BarColumn, Progress, ProgressColumn, Task, TextColumn, TimeRemainingColumn, TimeElapsedColumn, DownloadColumn
from rich.text import Text
from rich.style import Style
from rich.default_styles import DEFAULT_STYLES
__all__ = ['KrakenProgressBar', 'KrakenDownloadProgressBar', 'KrakenTrainProgressBar']
class BatchesProcessedColumn(ProgressColumn):
def __init__(self):
super().__init__()
def render(self, task) -> RenderableType:
total = task.total if task.total != float("inf") else "--"
return Text(f"{int(task.completed)}/{total}", style='magenta')
class EarlyStoppingColumn(ProgressColumn):
"""
A column containing text.
"""
def __init__(self, trainer):
self._trainer = trainer
super().__init__()
def render(self, task) -> Text:
text = f'early_stopping: ' \
f'{self._trainer.early_stopping_callback.wait_count}/{self._trainer.early_stopping_callback.patience} ' \
f'{self._trainer.early_stopping_callback.best_score:.5f}'
return Text(text, justify="left")
class KrakenProgressBar(Progress):
"""
Adaptation of the default rich progress bar to fit with kraken/ketos output.
"""
def __init__(self, *args, **kwargs):
columns = [TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
BatchesProcessedColumn(),
TimeRemainingColumn(),
TimeElapsedColumn()]
kwargs['refresh_per_second'] = 1
super().__init__(*columns, *args, **kwargs)
class KrakenDownloadProgressBar(Progress):
"""
Adaptation of the default rich progress bar to fit with kraken/ketos download output.
"""
def __init__(self, *args, **kwargs):
columns = [TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
DownloadColumn(),
TimeRemainingColumn(),
TimeElapsedColumn()]
kwargs['refresh_per_second'] = 1
super().__init__(*columns, *args, **kwargs)
class KrakenTrainProgressBar(RichProgressBar):
"""
Adaptation of the default ptl rich progress bar to fit with kraken (segtrain, train) output.
Args:
refresh_rate: Determines at which rate (in number of batches) the progress bars get updated.
Set it to ``0`` to disable the display.
leave: Leaves the finished progress bar in the terminal at the end of the epoch. Default: False
console_kwargs: Args for constructing a `Console`
"""
def __init__(self,
*args,
**kwargs):
super().__init__(*args, **kwargs, theme=RichProgressBarTheme())
def _init_progress(self, trainer):
if self.is_enabled and (self.progress is None or self._progress_stopped):
self._reset_progress_bar_ids()
reconfigure(**self._console_kwargs)
self._console = get_console()
self._console.clear_live()
self._metric_component = MetricsTextColumn(trainer, self.theme.metrics)
columns = self.configure_columns(trainer)
columns.append(self._metric_component)
if trainer.early_stopping_callback:
self._early_stopping_component = EarlyStoppingColumn(trainer)
columns.append(self._early_stopping_component)
self.progress = CustomProgress(
*columns,
auto_refresh=False,
disable=self.is_disabled,
console=self._console,
)
self.progress.start()
# progress has started
self._progress_stopped = False
def _get_train_description(self, current_epoch: int) -> str:
return f"stage {current_epoch}/" \
f"{self.trainer.max_epochs if self.trainer.model.hparams['quit'] == 'fixed' else '∞'}"
@dataclass
class RichProgressBarTheme:
"""Styles to associate to different base components.
Args:
description: Style for the progress bar description. For eg., Epoch x, Testing, etc.
progress_bar: Style for the bar in progress.
progress_bar_finished: Style for the finished progress bar.
progress_bar_pulse: Style for the progress bar when `IterableDataset` is being processed.
batch_progress: Style for the progress tracker (i.e 10/50 batches completed).
time: Style for the processed time and estimate time remaining.
processing_speed: Style for the speed of the batches being processed.
metrics: Style for the metrics
https://rich.readthedocs.io/en/stable/style.html
"""
description: Union[str, Style] = DEFAULT_STYLES['progress.description']
progress_bar: Union[str, Style] = DEFAULT_STYLES['bar.complete']
progress_bar_finished: Union[str, Style] = DEFAULT_STYLES['bar.finished']
progress_bar_pulse: Union[str, Style] = DEFAULT_STYLES['bar.pulse']
batch_progress: Union[str, Style] = DEFAULT_STYLES['progress.description']
time: Union[str, Style] = DEFAULT_STYLES['progress.elapsed']
processing_speed: Union[str, Style] = DEFAULT_STYLES['progress.data.speed']
metrics: Union[str, Style] = DEFAULT_STYLES['progress.description']
| 6,470 | 39.698113 | 135 | py |
kraken | kraken-main/kraken/lib/ctc_decoder.py | #
# Copyright 2017 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Decoders for softmax outputs of CTC trained networks.
Decoders extract label sequences out of the raw output matrix of the line
recognition network. There are multiple different approaches implemented here,
from a simple greedy decoder, to the legacy ocropy thresholding decoder, and a
more complex beam search decoder.
Extracted label sequences are converted into the code point domain using kraken.lib.codec.PytorchCodec.
"""
import collections
import numpy as np
from typing import List, Tuple
from scipy.special import logsumexp
from scipy.ndimage import measurements
from itertools import groupby
__all__ = ['beam_decoder', 'greedy_decoder', 'blank_threshold_decoder']
def beam_decoder(outputs: np.ndarray, beam_size: int = 3) -> List[Tuple[int, int, int, float]]:
"""
Translates back the network output to a label sequence using
same-prefix-merge beam search decoding as described in [0].
[0] Hannun, Awni Y., et al. "First-pass large vocabulary continuous speech
recognition using bi-directional recurrent DNNs." arXiv preprint
arXiv:1408.2873 (2014).
Args:
output: (C, W) shaped softmax output tensor
beam_size: Size of the beam
Returns:
A list with tuples (class, start, end, prob). max is the maximum value
of the softmax layer in the region.
"""
c, w = outputs.shape
probs = np.log(outputs)
beam = [(tuple(), (0.0, float('-inf')))] # type: List[Tuple[Tuple, Tuple[float, float]]]
# loop over each time step
for t in range(w):
next_beam = collections.defaultdict(lambda: 2*(float('-inf'),)) # type: dict
# p_b -> prob for prefix ending in blank
# p_nb -> prob for prefix not ending in blank
for prefix, (p_b, p_nb) in beam:
# only update ending-in-blank-prefix probability for blank
n_p_b, n_p_nb = next_beam[prefix]
n_p_b = logsumexp((n_p_b, p_b + probs[0, t], p_nb + probs[0, t]))
next_beam[prefix] = (n_p_b, n_p_nb)
# loop over non-blank classes
for s in range(1, c):
# only update the not-ending-in-blank-prefix probability for prefix+s
l_end = prefix[-1][0] if prefix else None
n_prefix = prefix + ((s, t, t),)
n_p_b, n_p_nb = next_beam[n_prefix]
if s == l_end:
# substitute the previous non-blank-ending-prefix
# probability for repeated labels
n_p_nb = logsumexp((n_p_nb, p_b + probs[s, t]))
else:
n_p_nb = logsumexp((n_p_nb, p_b + probs[s, t], p_nb + probs[s, t]))
next_beam[n_prefix] = (n_p_b, n_p_nb)
# If s is repeated at the end we also update the unchanged
# prefix. This is the merging case.
if s == l_end:
n_p_b, n_p_nb = next_beam[prefix]
n_p_nb = logsumexp((n_p_nb, p_nb + probs[s, t]))
# rewrite both new and old prefix positions
next_beam[prefix[:-1] + ((prefix[-1][0], prefix[-1][1], t),)] = (n_p_b, n_p_nb)
next_beam[n_prefix[:-1] + ((n_prefix[-1][0], n_prefix[-1][1], t),)] = next_beam.pop(n_prefix)
# Sort and trim the beam before moving on to the
# next time-step.
beam = sorted(next_beam.items(),
key=lambda x: logsumexp(x[1]),
reverse=True)
beam = beam[:beam_size]
return [(c, start, end, max(outputs[c, start:end+1])) for (c, start, end) in beam[0][0]]
def greedy_decoder(outputs: np.ndarray) -> List[Tuple[int, int, int, float]]:
"""
Translates back the network output to a label sequence using greedy/best
path decoding as described in [0].
[0] Graves, Alex, et al. "Connectionist temporal classification: labelling
unsegmented sequence data with recurrent neural networks." Proceedings of
the 23rd international conference on Machine learning. ACM, 2006.
Args:
output: (C, W) shaped softmax output tensor
Returns:
A list with tuples (class, start, end, max). max is the maximum value
of the softmax layer in the region.
"""
labels = np.argmax(outputs, 0)
seq_len = outputs.shape[1]
mask = np.eye(outputs.shape[0], dtype='bool')[labels].T
classes = []
for label, group in groupby(zip(np.arange(seq_len), labels, outputs[mask]), key=lambda x: x[1]):
lgroup = list(group)
if label != 0:
classes.append((label, lgroup[0][0], lgroup[-1][0], max(x[2] for x in lgroup)))
return classes
def blank_threshold_decoder(outputs: np.ndarray, threshold: float = 0.5) -> List[Tuple[int, int, int, float]]:
"""
Translates back the network output to a label sequence as the original
ocropy/clstm.
Thresholds on class 0, then assigns the maximum (non-zero) class to each
region.
Args:
output: (C, W) shaped softmax output tensor
threshold: Threshold for 0 class when determining possible label
locations.
Returns:
A list with tuples (class, start, end, max). max is the maximum value
of the softmax layer in the region.
"""
outputs = outputs.T
labels, n = measurements.label(outputs[:, 0] < threshold)
mask = np.tile(labels.reshape(-1, 1), (1, outputs.shape[1]))
maxima = measurements.maximum_position(outputs, mask, np.arange(1, np.amax(mask)+1))
p = 0
start = None
x = []
for idx, val in enumerate(labels):
if val != 0 and start is None:
start = idx
p += 1
if val == 0 and start is not None:
if maxima[p-1][1] == 0:
start = None
else:
x.append((maxima[p-1][1], start, idx, outputs[maxima[p-1]]))
start = None
# append last non-zero region to list of no zero region occurs after it
if start:
x.append((maxima[p-1][1], start, len(outputs), outputs[maxima[p-1]]))
return [y for y in x if x[0] != 0]
| 6,701 | 39.131737 | 113 | py |
kraken | kraken-main/kraken/lib/vgsl.py | """
VGSL plumbing
"""
import re
import json
import torch
import logging
import warnings
from torch import nn
from os import PathLike
from typing import Sequence, List, Tuple, Union, Optional, Iterable, Callable, Dict, Any
from kraken.lib import layers
from kraken.lib.codec import PytorchCodec
from kraken.lib.exceptions import KrakenInvalidModelException
# filter out coreml warnings coming from their conversion routines (which we don't use).
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', message='has not been tested with coremltools')
warnings.filterwarnings(action='ignore', message='is not supported')
from coremltools.models import MLModel
from coremltools.models import datatypes
from coremltools.models.neural_network import NeuralNetworkBuilder
from google.protobuf.message import DecodeError
# all tensors are ordered NCHW, the "feature" dimension is C, so the output of
# an LSTM will be put into C same as the filters of a CNN.
__all__ = ['TorchVGSLModel']
logger = logging.getLogger(__name__)
class VGSLBlock(object):
def __init__(self, block: str, layer: str, name: str, idx: int):
if name:
name = name[1:-1]
else:
name = '{}_{}'.format(re.sub(r'\W+', '_', layer), idx)
block = re.sub(r'\{.+\}', '', block)
lsplits = re.split(r'(^[^\d]+)', block)
lsplits.insert(-1, '{{{}}}'.format(name))
self._block = ''.join(lsplits)
self._name = name
self._layer = layer
def __str__(self):
return self._block
@property
def name(self):
return self._name
@property
def layer(self):
return self._layer
class TorchVGSLModel(object):
"""
Class building a torch module from a VSGL spec.
The initialized class will contain a variable number of layers and a loss
function. Inputs and outputs are always 4D tensors in order (batch,
channels, height, width) with channels always being the feature dimension.
Importantly this means that a recurrent network will be fed the channel
vector at each step along its time axis, i.e. either put the non-time-axis
dimension into the channels dimension or use a summarizing RNN squashing
the time axis to 1 and putting the output into the channels dimension
respectively.
Attributes:
input: Expected input tensor as a 4-tuple.
nn: Stack of layers parsed from the spec.
criterion: Fully parametrized loss function.
user_metadata: dict with user defined metadata. Is flushed into
model file during saving/overwritten by loading
operations.
one_channel_mode: Field indicating the image type used during
training of one-channel images. Is '1' for
models trained on binarized images, 'L' for
grayscale, and None otherwise.
"""
def __init__(self, spec: str) -> None:
"""
Constructs a torch module from a (subset of) VSGL spec.
Args:
spec: Model definition similar to tesseract as follows:
============ FUNCTIONAL OPS ============
C[T](s|t|r|l|rl|m)[{name}]<y>,<x>,<d>[,<y_stride>,<x_stride>][,<y_dilation>,<x_dilation>]
Convolves using a y,x window, with no shrinkage, SAME
infill, d outputs, with s|t|r|l|m non-linear layer,
T for transposed convolution.
(s|t|r|l|m) specifies the type of non-linearity:
s = sigmoid
t = tanh
r = relu
lr = leaky relu
l = linear (i.e., None)
m = softmax
L(f|r|b)(x|y)[s][{name}]<n> LSTM cell with n outputs.
f runs the LSTM forward only.
r runs the LSTM reversed only.
b runs the LSTM bidirectionally.
x runs the LSTM in the x-dimension (on data with or without the
y-dimension).
y runs the LSTM in the y-dimension (data must have a y dimension).
s (optional) summarizes the output in the requested dimension,
outputting only the final step, collapsing the dimension to a
single element.
Examples:
Lfx128 runs a forward-only LSTM in the x-dimension with 128
outputs, treating any y dimension independently.
Lfys64 runs a forward-only LSTM in the y-dimension with 64 outputs
and collapses the y-dimension to 1 element.
Do[{name}][<p>,<d>] Insert a dropout layer operating in
<d> dimensions with probability
<p>. Defaults to 1D with 0.5
probability.
Gn[{name}]<n> A group normalization layer with n groups
============ PLUMBING OPS ============
[...] Execute ... networks in series (layers).
(...) Execute ... networks in parallel.
I[{name}] Identity function to build residual connections in parallel layers.
Mp[{name}]<y>,<x>[<y_stride>,<x_stride>] Maxpool the input, reducing the (y,x) rectangle to a
single vector value.
S[{name}]<d>(<a>x<b>)<e>,<f> Splits one dimension, moves one part to another
dimension.
"""
self.spec = spec
self.named_spec = [] # type: List[str]
self.ops = [self.build_addition, self.build_identity, self.build_rnn,
self.build_dropout, self.build_maxpool, self.build_conv,
self.build_output, self.build_reshape, self.build_wav2vec2,
self.build_groupnorm, self.build_series,
self.build_parallel]
self.codec = None # type: Optional[PytorchCodec]
self.criterion = None # type: Any
self.nn = layers.MultiParamSequential()
self.user_metadata = {'accuracy': [],
'metrics': [],
'seg_type': None,
'one_channel_mode': None,
'model_type': None,
'hyper_params': {}} # type: dict[str, Any]
self._aux_layers = nn.ModuleDict()
self.idx = -1
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise ValueError('Non-sequential models not supported')
spec = spec[1:-1]
blocks = spec.split(' ')
self.named_spec.append(blocks[0])
pattern = re.compile(r'(\d+),(\d+),(\d+),(\d+)')
m = pattern.match(blocks.pop(0))
if not m:
raise ValueError('Invalid input spec.')
batch, height, width, channels = [int(x) for x in m.groups()]
self.input = (batch, channels, height, width)
named_spec, self.nn, self.output = self._parse(self.input, blocks)
self.named_spec.extend(str(x) for x in named_spec)
self.init_weights()
def _parse(self, input: Tuple[int, int, int, int], blocks: Sequence[str], parallel=False, target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> None:
"""
Parses VGSL spec and appends layers to nn
"""
logger.debug('layer\t\ttype\tparams')
named_spec = []
if not parallel:
nn = layers.MultiParamSequential()
else:
nn = layers.MultiParamParallel()
prev_oshape = None
channels = 0
idx = 0
while idx < len(blocks):
oshape = None
layer = None
for op in self.ops:
oshape, name, layer = op(input, blocks, idx, target_output_shape=target_output_shape if parallel or idx == len(blocks) - 1 else None)
if oshape:
break
if oshape:
if not parallel:
input = oshape
else:
if prev_oshape and prev_oshape[2:] != oshape[2:]:
raise ValueError('Output shape in parallel block not equal!')
else:
prev_oshape = oshape
target_output_shape = oshape
channels += oshape[1]
named_spec.extend(name) # type: ignore
idx += len(name)
nn.add_module(' '.join(n.name for n in name), layer)
else:
raise ValueError('{} invalid layer definition'.format(blocks[idx]))
if parallel:
return named_spec, nn, (oshape[0], channels, *oshape[2:])
else:
return named_spec, nn, oshape
def append(self, idx: int, spec: str) -> None:
"""
Splits a model at layer `idx` and append layers `spec`.
New layers are initialized using the init_weights method.
Args:
idx (int): Index of layer to append spec to starting with 1. To
select the whole layer stack set idx to None.
spec (str): VGSL spec without input block to append to model.
"""
self.nn = self.nn[:idx]
self.idx = idx-1
spec = spec[1:-1]
blocks = spec.split(' ')
self.named_spec = self.named_spec[:idx+1]
named_spec, nn, self.output = self._parse(self.nn[-1].output_shape, blocks)
self.named_spec.extend(str(x) for x in named_spec)
for module in nn.named_children():
self.nn.add_module(*module)
self.spec = '[' + ' '.join(self.named_spec) + ']'
self.init_weights(slice(idx, -1))
def to(self, device: Union[str, torch.device]) -> None:
self.nn = self.nn.to(device)
if self.criterion:
self.criterion = self.criterion.to(device)
def eval(self) -> None:
"""
Sets the model to evaluation/inference mode, disabling dropout and
gradient calculation.
"""
self.nn.eval()
torch.set_grad_enabled(False)
def train(self) -> None:
"""
Sets the model to training mode (enables dropout layers and disables
softmax on CTC layers).
"""
self.nn.train()
# set last layer back to eval mode if not CTC output layer
# (log_softmax/softmax switch).
if not self.criterion:
self.nn[-1].eval()
torch.set_grad_enabled(True)
def set_num_threads(self, num: int) -> None:
"""
Sets number of OpenMP threads to use.
"""
torch.set_num_threads(num)
@classmethod
def load_model(cls, path: Union[str, PathLike]):
"""
Deserializes a VGSL model from a CoreML file.
Args:
path: CoreML file
Returns:
A TorchVGSLModel instance.
Raises:
KrakenInvalidModelException if the model data is invalid (not a
string, protobuf file, or without appropriate metadata).
FileNotFoundError if the path doesn't point to a file.
"""
if isinstance(path, PathLike):
path = path.as_posix()
try:
mlmodel = MLModel(path)
except TypeError as e:
raise KrakenInvalidModelException(str(e)) from e
except DecodeError as e:
raise KrakenInvalidModelException('Failure parsing model protobuf: {}'.format(str(e))) from e
if 'vgsl' not in mlmodel.user_defined_metadata:
raise KrakenInvalidModelException('No VGSL spec in model metadata')
vgsl_spec = mlmodel.user_defined_metadata['vgsl']
nn = cls(vgsl_spec)
def _deserialize_layers(name, layer):
logger.debug(f'Deserializing layer {name} with type {type(layer)}')
if type(layer) in (layers.MultiParamParallel, layers.MultiParamSequential):
for name, l in layer.named_children():
_deserialize_layers(name, l)
else:
layer.deserialize(name, mlmodel.get_spec())
try:
_deserialize_layers('', nn.nn)
except Exception as exc:
raise KrakenInvalidModelException('Failed parsing out layers from model weights') from exc
if 'aux_layers' in mlmodel.user_defined_metadata:
logger.info('Deserializing auxiliary layers.')
nn.aux_layers = {k: cls(v).nn.get_submodule(k) for k, v in json.loads(mlmodel.user_defined_metadata['aux_layers']).items()}
if 'codec' in mlmodel.user_defined_metadata:
nn.add_codec(PytorchCodec(json.loads(mlmodel.user_defined_metadata['codec'])))
nn.user_metadata = {'accuracy': [],
'metrics': [],
'seg_type': 'bbox',
'one_channel_mode': '1',
'model_type': None,
'hyper_params': {}} # type: dict[str, str]
if 'kraken_meta' in mlmodel.user_defined_metadata:
nn.user_metadata.update(json.loads(mlmodel.user_defined_metadata['kraken_meta']))
return nn
@property
def one_channel_mode(self):
return self.user_metadata['one_channel_mode']
@one_channel_mode.setter
def one_channel_mode(self, val: str):
if val not in ['1', 'L', None]:
raise ValueError('one_channel_mode {} is not one of [1, L, None]'.format(val))
self.user_metadata['one_channel_mode'] = val
@property
def model_type(self):
return self.user_metadata['model_type']
@model_type.setter
def model_type(self, val: str):
if val not in ['recognition', 'segmentation']:
raise ValueError('model_type {} is not one of [recognition, segmentation]'.format(val))
self.user_metadata['model_type'] = val
@property
def seg_type(self):
return self.user_metadata['seg_type']
@seg_type.setter
def seg_type(self, val: str):
if val not in ['bbox', 'baselines', None]:
raise ValueError('segmentation type {} is not one of [bbox, baselines, None]'.format(val))
self.user_metadata['seg_type'] = val
@property
def hyper_params(self, **kwargs):
return self.user_metadata['hyper_params']
@hyper_params.setter
def hyper_params(self, val: Dict[str, Any]):
self.user_metadata['hyper_params'].update(val)
@property
def aux_layers(self, **kwargs):
return self._aux_layers
@aux_layers.setter
def aux_layers(self, val: Dict[str, torch.nn.Module]):
self._aux_layers.update(val)
def save_model(self, path: str):
"""
Serializes the model into path.
Args:
path: Target destination
"""
inputs = [('input', datatypes.Array(*self.input))]
outputs = [('output', datatypes.Array(*self.output))]
net_builder = NeuralNetworkBuilder(inputs, outputs)
input = 'input'
prev_device = next(self.nn.parameters()).device
try:
self.nn.to('cpu')
def _serialize_layer(net, input, net_builder):
for name, l in net.named_children():
logger.debug(f'Serializing layer {name} with type {type(l)}')
if type(l) in (layers.MultiParamParallel, layers.MultiParamSequential):
_serialize_layer(l, input, net_builder)
else:
l.serialize(name, input, net_builder)
_serialize_layer(self.nn, input, net_builder)
if self.aux_layers:
prev_aux_device = next(self.aux_layers.parameters()).device
try:
logger.debug(f'Serializing {len(self.aux_layers)} auxiliary layers')
self.aux_layers.to('cpu')
_serialize_layer(self.aux_layers, input, net_builder)
finally:
self.aux_layers.to(prev_aux_device)
mlmodel = MLModel(net_builder.spec)
mlmodel.short_description = 'kraken model'
mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']'
if self.codec:
mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l)
if self.user_metadata:
mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata)
if self.aux_layers:
mlmodel.user_defined_metadata['aux_layers'] = json.dumps({k: v.get_spec(k) for k, v in self.aux_layers.items()})
mlmodel.save(path)
finally:
self.nn.to(prev_device)
def add_codec(self, codec: PytorchCodec) -> None:
"""
Adds a PytorchCodec to the model.
"""
self.codec = codec
def init_weights(self, idx: slice = slice(0, None)) -> None:
"""
Initializes weights for all or a subset of layers in the graph.
LSTM/GRU layers are orthogonally initialized, convolutional layers
uniformly from (-0.1,0.1).
Args:
idx (slice): A slice object representing the indices of layers to
initialize.
"""
def _wi(m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
torch.nn.init.constant_(m.bias.data, 0)
elif isinstance(m, torch.nn.LSTM):
for p in m.parameters():
# weights
if p.data.dim() == 2:
torch.nn.init.orthogonal_(p.data)
# initialize biases to 1 (jozefowicz 2015)
else:
torch.nn.init.constant_(p.data[len(p)//4:len(p)//2], 1.0)
elif isinstance(m, torch.nn.GRU):
for p in m.parameters():
torch.nn.init.orthogonal_(p.data)
elif isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.ConvTranspose2d):
for p in m.parameters():
torch.nn.init.uniform_(p.data, -0.1, 0.1)
self.nn[idx].apply(_wi)
def resize_output(self, output_size: int, del_indices: Optional[Iterable] = None) -> None:
"""
Resizes an output layer.
Args:
output_size (int): New size/output channels of last layer
del_indices (list): list of outputs to delete from layer
"""
if type(self.nn[-1]) not in [layers.ActConv2D, layers.LinSoftmax]:
raise ValueError('last layer is neither linear nor convolutional layer')
logger.debug('Resizing output layer to {}'.format(output_size))
self.nn[-1].resize(output_size, del_indices)
pattern = re.compile(r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)')
m = pattern.match(self.named_spec[-1])
if not m:
raise ValueError('Output specification is not parsable')
aug = m.group('aug') if m.group('aug') else ''
self.named_spec[-1] = 'O{}{}{}{}{}'.format(m.group('name'), m.group('dim'), m.group('type'), aug, output_size)
self.spec = '[' + ' '.join(self.named_spec) + ']'
def build_rnn(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds an LSTM/GRU layer returning number of outputs and layer.
"""
pattern = re.compile(r'(?P<type>L|G)(?P<dir>f|r|b)(?P<dim>x|y)(?P<sum>s)?(?P<legacy>c|o)?(?P<name>{\w+})?(?P<out>\d+)')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
type = m.group('type')
direction = m.group('dir')
dim = m.group('dim') == 'y'
summarize = m.group('sum') == 's'
legacy = None
if m.group('legacy') == 'c':
legacy = 'clstm'
elif m.group('legacy') == 'o':
legacy = 'ocropy'
hidden = int(m.group(7))
fn = layers.TransposedSummarizingRNN(input[1], hidden, direction, dim, summarize, legacy)
self.idx += 1
logger.debug(f'{self.idx}\t\trnn\tdirection {direction} transposed {dim} '
f'summarize {summarize} out {hidden} legacy {legacy}')
return fn.get_shape(input), [VGSLBlock(blocks[idx], type, m.group('name'), self.idx)], fn
def build_dropout(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
pattern = re.compile(r'(?P<type>Do)(?P<name>{\w+})?(?P<p>(\d+(\.\d*)?|\.\d+))?(,(?P<dim>\d+))?')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
prob = float(m.group('p')) if m.group('p') else 0.5
dim = int(m.group('dim')) if m.group('dim') else 1
fn = layers.Dropout(prob, dim)
self.idx += 1
logger.debug('{}\t\tdropout\tprobability {} dims {}'.format(self.idx, prob, dim))
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_addition(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
pattern = re.compile(r'(?P<type>A)(?P<name>{\w+})?(?P<dim>\d+),(?P<chunk_size>\d+)')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
dim_map = {0: 0, 1: 2, 2: 3, 3: 1}
dim = int(m.group('dim'))
chunk_size = int(m.group('chunk_size'))
if dim > 3:
raise ValueError(f'Invalid dimension {dim} in addition block')
dim = dim_map[dim]
fn = layers.Addition(dim=dim, chunk_size=chunk_size)
self.idx += 1
logger.debug(f'{self.idx}\t\taddition dim: {dim} chunk: {chunk_size}')
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_identity(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
pattern = re.compile(r'(?P<type>I)(?P<name>{\w+})?')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
fn = layers.Identity()
self.idx += 1
logger.debug(f'{self.idx}\t\tidentity')
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_groupnorm(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
pattern = re.compile(r'(?P<type>Gn)(?P<name>{\w+})?(?P<groups>\d+)')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
groups = int(m.group('groups'))
fn = layers.GroupNorm(input[1], groups)
self.idx += 1
logger.debug('{}\t\tgroupnorm\tgroups {}'.format(self.idx, groups))
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_wav2vec2(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds a Wav2Vec2 masking layer.
"""
pattern = re.compile(r'(?P<type>W)(?P<name>{\w+})(?P<final_dim>\d+),(?P<mask_width>\d+),(?P<mask_prob>(\d+(\.\d*)?|\.\d+)),(?P<num_negatives>\d+)')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
final_dim = int(m.group('final_dim'))
mask_width = int(m.group('mask_width'))
mask_prob = float(m.group('mask_prob'))
num_negatives = int(m.group('num_negatives'))
from kraken.lib import pretrain
fn = pretrain.layers.Wav2Vec2Mask(input[1], final_dim, mask_width, mask_prob, num_negatives)
self.idx += 1
logger.debug(f'{self.idx}\t\twav2vec2\tmask width {mask_width}, prob '
f'{mask_prob}, negative samples {num_negatives}')
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_conv(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds a 2D convolution layer.
"""
pattern = re.compile(r'(?P<type>C)(?P<trans>T)?(?P<nl>s|t|r|l|lr|m)(?P<name>{\w+})?(\d+),'
r'(\d+),(?P<out>\d+)(,(?P<stride_y>\d+),(?P<stride_x>\d+))?(,(?P<dilation_y>\d+),(?P<dilation_x>\d+))?')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
transposed = m.group('trans') is not None
kernel_size = (int(m.group(5)), int(m.group(6)))
filters = int(m.group('out'))
stride = (int(m.group('stride_y')), int(m.group('stride_x'))) if m.group('stride_x') else (1, 1)
dilation = (int(m.group('dilation_y')), int(m.group('dilation_x'))) if m.group('dilation_x') else (1, 1)
nl = m.group('nl')
fn = layers.ActConv2D(input[1], filters, kernel_size, stride, nl, dilation, transposed)
self.idx += 1
logger.debug(f'{self.idx}\t\t{"transposed " if transposed else ""}conv\tkernel {kernel_size[0]} x {kernel_size[1]} '
f'filters {filters} stride {stride} dilation {dilation} activation {nl}')
return fn.get_shape(input, target_output_shape), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_maxpool(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds a maxpool layer.
"""
pattern = re.compile(r'(?P<type>Mp)(?P<name>{\w+})?(\d+),(\d+)(?:,(\d+),(\d+))?')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
kernel_size = (int(m.group(3)), int(m.group(4)))
stride = (kernel_size[0] if not m.group(5) else int(m.group(5)),
kernel_size[1] if not m.group(6) else int(m.group(6)))
fn = layers.MaxPool(kernel_size, stride)
self.idx += 1
logger.debug(f'{self.idx}\t\tmaxpool\tkernel {kernel_size[0]} x {kernel_size[1]} stride {stride[0]} x {stride[1]}')
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_reshape(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds a reshape layer
"""
pattern = re.compile(r'(?P<type>S)(?P<name>{\w+})?(?P<dim>\d+)\((?P<part_a>\d+)x'
r'(?P<part_b>\d+)\)(?P<high>\d+),(?P<low>\d+)')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
src_dim = int(m.group('dim'))
part_a = int(m.group('part_a'))
part_b = int(m.group('part_b'))
high = int(m.group('high'))
low = int(m.group('low'))
dim_map = {0: 0, 1: 2, 2: 3, 3: 1}
if part_a == 0:
part_a = -1
elif part_b == 0:
part_b = -1
if src_dim != high and src_dim != low:
raise ValueError('Either high ({}) or low ({}) must be source dimension ({})'.format(high, low, src_dim))
if part_a == 0 or part_b == 0:
raise ValueError('Expected non-zero size for part_a ({}) or part_b ({})'.format(part_a, part_b))
if part_a == -1 and part_b == -1:
raise ValueError('Only one size may be -1')
self.idx += 1
logger.debug('{}\t\treshape from {} {} x {} to {}/{}'.format(self.idx, src_dim, part_a, part_b, high, low))
src_dim = dim_map[src_dim]
high = dim_map[high]
low = dim_map[low]
fn = layers.Reshape(src_dim, part_a, part_b, high, low)
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
def build_output(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds an output layer.
"""
pattern = re.compile(r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)')
m = pattern.match(blocks[idx])
if not m:
return None, None, None
dim = int(m.group('dim'))
nl = m.group('type')
outdim = int(m.group('out'))
if dim == 0:
raise ValueError('categorical output not supported, yet.')
if nl == 'c' and dim == 2:
raise ValueError('CTC not supported for heatmap output')
if nl in ['l', 's'] and int(m.group('out')) >= 1:
self.criterion = nn.BCEWithLogitsLoss()
elif nl == 'c':
self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True)
else:
raise ValueError('unsupported output specification')
# heatmap output
if dim == 2:
act = 's' if nl == 'l' else 'm'
fn = layers.ActConv2D(input[1], outdim, (1, 1), (1, 1), act)
self.idx += 1
logger.debug('{}\t\tconv\tkernel 1 x 1 filters {} stride 1 activation {}'.format(self.idx, outdim, nl))
return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn
else:
aug = True if m.group('aug') else False
lin = layers.LinSoftmax(input[1], int(m.group('out')), aug)
self.idx += 1
logger.debug('{}\t\tlinear\taugmented {} out {}'.format(self.idx, aug, m.group('out')))
return lin.get_shape(input), [VGSLBlock(blocks[idx], m.group(1), m.group('name'), self.idx)], lin
def _bracket_count(self, block: str) -> int:
rst = 0
for c in block:
if c == "[":
rst += 1
elif c != "(":
break
for c in block[::-1]:
if c == "]":
rst -= 1
elif c != ")":
break
return rst
def _parenthesis_count(self, block: str) -> int:
rst = 0
for c in block:
if c == "(":
rst += 1
elif c != "[":
break
for c in block[::-1]:
if c == ")":
rst -= 1
elif c != "]":
break
return rst
def build_series(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds a serial block of layers.
"""
if not blocks[idx] or blocks[idx][0] != '[':
return None, None, None
# single layer in serial block
if blocks[idx][0] == '[' and blocks[idx][-1] == ']':
named_spec, nn, oshape = self._parse(input, [blocks[idx][1:-1]])
named_spec[0]._block = '[' + named_spec[0]._block + ']'
return oshape, named_spec, nn
# multiple layers in serial block
block_depth = 0
for bl_idx, block in enumerate(blocks[idx:]):
block_depth += self._bracket_count(block)
if block_depth == 0:
break
if block_depth:
raise ValueError('Unbalanced parentheses in VGSL spec')
named_spec, nn, oshape = self._parse(input, [blocks[idx][1:]] + blocks[idx+1:idx+bl_idx] + [blocks[idx+bl_idx][:-1]], target_output_shape=target_output_shape)
named_spec[0]._block = '[' + named_spec[0]._block
named_spec[-1]._block = named_spec[-1]._block + ']'
return oshape, named_spec, nn
def build_parallel(self,
input: Tuple[int, int, int, int],
blocks: List[str],
idx: int,
target_output_shape: Optional[Tuple[int, int, int, int]] = None) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
"""
Builds a block of parallel layers.
"""
if not blocks[idx] or blocks[idx][0] != '(':
return None, None, None
# single layer in parallel block
if blocks[idx][0] == '(' and blocks[idx][-1] == ')':
named_spec, nn, oshape = self._parse(input, [blocks[idx][1:-1]], parallel=True)
named_spec[0]._block = '(' + named_spec[0]._block + ')'
return oshape, named_spec, nn
block_depth = 0
for bl_idx, block in enumerate(blocks[idx:]):
block_depth += self._parenthesis_count(block)
if block_depth == 0:
break
if block_depth:
raise ValueError('Unbalanced parentheses in VGSL spec')
named_spec, nn, oshape = self._parse(input, [blocks[idx][1:]] + blocks[idx+1:idx+bl_idx] + [blocks[idx+bl_idx][:-1]], parallel=True, target_output_shape=target_output_shape)
named_spec[0]._block = '(' + named_spec[0]._block
named_spec[-1]._block = named_spec[-1]._block + ')'
return oshape, named_spec, nn
| 35,478 | 43.740227 | 181 | py |
kraken | kraken-main/kraken/lib/layers.py | """
Layers for VGSL models
"""
import torch
import numpy as np
from typing import List, Tuple, Optional, Iterable
from torch.nn import Module, Sequential
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from coremltools.proto import NeuralNetwork_pb2
# all tensors are ordered NCHW, the "feature" dimension is C, so the output of
# an LSTM will be put into C same as the filters of a CNN.
__all__ = ['Addition', 'MaxPool', 'Reshape', 'Dropout', 'TransposedSummarizingRNN', 'LinSoftmax', 'ActConv2D']
class MultiParamSequential(Sequential):
"""
Sequential variant accepting multiple arguments.
"""
def forward(self, *inputs, output_shape: Optional[Tuple[int, int]] = None):
modules = self._modules.values()
i = 0
for module in modules:
if type(inputs) == tuple:
inputs = module(*inputs, output_shape=output_shape if i == len(modules) - 1 else None)
else:
inputs = module(inputs, output_shape=output_shape if i == len(modules) - 1 else None)
i += 1
return inputs
class MultiParamParallel(Module):
"""
Parallel module.
"""
def forward(self, *inputs, output_shape: Optional[Tuple[int, int]] = None):
outputs = []
seq_lens = None
for module in self._modules.values():
if type(inputs) == tuple:
output, seq_lens = module(*inputs, output_shape=output_shape)
outputs.append(output)
else:
outputs.append(module(inputs, output_shape=output_shape))
if output_shape is None:
output_shape = outputs[-1].shape[2:]
return torch.cat(outputs, dim=1), seq_lens
def PeepholeLSTMCell(input: torch.Tensor,
hidden: Tuple[torch.Tensor, torch.Tensor],
w_ih: torch.Tensor,
w_hh: torch.Tensor,
w_ip: torch.Tensor,
w_fp: torch.Tensor,
w_op: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
An LSTM cell with peephole connections without biases.
Mostly ripped from the pytorch autograd lstm implementation.
"""
hx, cx = hidden
gates = F.linear(input, w_ih) + F.linear(hx, w_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
peep_i = w_ip.unsqueeze(0).expand_as(cx) * cx
ingate = ingate + peep_i
peep_f = w_fp.unsqueeze(0).expand_as(cx) * cx
forgetgate = forgetgate + peep_f
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
cy = (forgetgate * cx) + (ingate * cellgate)
peep_o = w_op.unsqueeze(0).expand_as(cy) * cy
outgate = outgate + peep_o
hy = outgate * F.tanh(cy)
return hy, cy
def StackedRNN(inners, num_layers: int, num_directions: int):
num_directions = len(inners)
total_layers = num_layers * num_directions
def forward(input, hidden, weight):
next_hidden = []
for i in range(num_layers):
all_output = []
for j, inner in enumerate(inners):
length = i * num_directions + j
hy, output = inner(input, hidden[length], weight[length])
next_hidden.append(hy)
all_output.append(output)
input = torch.cat(all_output, input.dim() - 1)
next_h, next_c = zip(*next_hidden)
next_hidden = [
torch.cat(next_h, 0).view(total_layers, *next_h[0].size()),
torch.cat(next_c, 0).view(total_layers, *next_c[0].size())
]
return next_hidden, input
return forward
def Recurrent(inner, reverse: bool = False):
def forward(input, hidden, weight):
output = []
steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
for i in steps:
hidden = inner(input[i], hidden, *weight)
# hack to handle LSTM
output.append(hidden[0] if isinstance(hidden, tuple) else hidden)
if reverse:
output.reverse()
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return hidden, output
return forward
class PeepholeBidiLSTM(Module):
def __init__(self, input_size: int, hidden_size: int) -> None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self._all_weights = [] # type: List[List[str]]
gate_size = 4 * hidden_size
for direction in range(2):
w_ih = torch.nn.Parameter(torch.Tensor(gate_size, input_size))
w_hh = torch.nn.Parameter(torch.Tensor(gate_size, hidden_size))
w_ci = torch.nn.Parameter(torch.Tensor(hidden_size))
w_cf = torch.nn.Parameter(torch.Tensor(hidden_size))
w_co = torch.nn.Parameter(torch.Tensor(hidden_size))
layer_params = (w_ih, w_hh, w_ci, w_cf, w_co)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l0{}', 'weight_hh_l0{}', 'weight_ip_l0{}', 'weight_fp_l0{}', 'weight_op_l0{}']
param_names = [x.format(suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
def forward(self, input: torch.Tensor, output_shape: Optional[List[int]] = None) -> torch.Tensor:
layer = (Recurrent(PeepholeLSTMCell), Recurrent(PeepholeLSTMCell, reverse=True))
func = StackedRNN(layer, 1, 2)
input = input.transpose(0, 1)
hidden = (torch.zeros(2, input.shape[1], self.hidden_size).to(input.device),
torch.zeros(2, input.shape[1], self.hidden_size).to(input.device))
hidden, output = func(input, hidden, self.all_weights)
output = output.transpose(0, 1)
return output, hidden
@property
def all_weights(self):
return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
class Addition(Module):
"""
An addition module
"""
def __init__(self, dim: int, chunk_size: int) -> None:
"""
An addition module
Shape:
- Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H`
height, and `W` width.
- Outputs output :math:`(N, C, H, W)`
"""
self.dim = dim
self.chunk_size = chunk_size
super().__init__()
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[List[int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
out = inputs.unfold(self.dim, self.chunk_size, self.chunk_size)
out = out.sum(self.dim, keepdim=True)
out = out.transpose(-1, self.dim).squeeze(-1)
return out, seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
"""
Calculates the output shape from input 4D tuple NCHW.
"""
input = list(input)
input[self.dim] = self.chunk_size
self.output_shape = tuple(input)
return self.output_shape
def deserialize(self, name, spec):
"""
Noop for deserialization
"""
pass
def serialize(self, name, input, builder):
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'addition'
params.description = 'An addition layer'
params.parameters['dim'].intValue = self.dim
params.parameters['chunk_size'].intValue = self.chunk_size
builder.add_custom(name,
input_names=[input],
output_names=[name],
custom_proto_spec=params)
return name
class Identity(Module):
"""
A placeholder identity operator.
"""
def __init__(self) -> None:
"""
A placeholder identity operator (mostly used for residual connections and similar).
Shape:
- Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H`
height, and `W` width.
- Outputs output :math:`(N, C, H, W)`
"""
super().__init__()
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
return inputs, seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
self.output_shape = input
return input
def deserialize(self, name, spec):
"""
Noop for deserialization
"""
pass
def serialize(self, name, input, builder):
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'identity'
params.description = 'An identity layer'
builder.add_custom(name,
input_names=[input],
output_names=[name],
custom_proto_spec=params)
return name
class Reshape(Module):
"""
Reshapes input and moves it into other dimensions.
"""
def __init__(self, src_dim: int, part_a: int, part_b: int, high: int, low: int) -> None:
"""
A wrapper around reshape with serialization and layer arithmetic.
Args:
src_dim (int): Source dimension
part_a (int): Size of split dim to move to `high`
part_b (int): Size of split dim to move to `low`
high (int): Target dimension 1
low (int): Target dimension 2
Shape:
- Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H`
height, and `W` width.
- Outputs output :math:`(N, C, H, W)`
"""
super().__init__()
self.src_dim = src_dim
self.part_a = part_a
self.part_b = part_b
self.high = high
self.low = low
def forward(self, input: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
initial_len = input.shape[3]
# split dimension src_dim into part_a x part_b
input = input.reshape(input.shape[:self.src_dim] + (self.part_a, self.part_b) + input.shape[self.src_dim + 1:])
dest = self.low
src_dim = self.src_dim
if self.high != src_dim:
dest = self.high
else:
src_dim += 1
# rotate dimension permutation list
perm = list(range(len(input.shape)))
step = 1 if dest > src_dim else -1
for x in range(src_dim, dest, step):
perm[x], perm[x + step] = perm[x + step], perm[x]
input = input.permute(perm)
o = input.reshape(input.shape[:dest] + (input.shape[dest] * input.shape[dest + 1],) + input.shape[dest + 2:])
if seq_len is not None:
seq_len = (seq_len * (float(initial_len)/o.shape[3])).int()
return o, seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
input_shape = torch.zeros([x if x else 1 for x in input])
with torch.no_grad():
o, _ = self.forward(input_shape)
self.output_shape = tuple(o.shape)
return self.output_shape # type: ignore
def deserialize(self, name, spec):
"""
Noop for deserialization
"""
pass
def serialize(self, name: str, input: str, builder) -> str:
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'reshape'
params.description = 'A generalized reshape layer'
params.parameters['src_dim'].intValue = self.src_dim
params.parameters['part_a'].intValue = self.part_a
params.parameters['part_b'].intValue = self.part_b
params.parameters['high'].intValue = self.high
params.parameters['low'].intValue = self.low
builder.add_custom(name,
input_names=[input],
output_names=[name],
custom_proto_spec=params)
return name
class MaxPool(Module):
"""
A simple wrapper for MaxPool layers
"""
def __init__(self, kernel_size: Tuple[int, int], stride: Tuple[int, int]) -> None:
"""
A wrapper around MaxPool layers with serialization and layer arithmetic.
"""
super().__init__()
self.kernel_size = kernel_size
self.stride = stride
self.layer = torch.nn.MaxPool2d(kernel_size, stride)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
o = self.layer(inputs)
if seq_len is not None:
seq_len = torch.floor((seq_len-(self.kernel_size[1]-1)-1).float()/self.stride[1]+1).int()
return o, seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
self.output_shape = (input[0],
input[1],
int(np.floor((input[2]-(self.kernel_size[0]-1)-1)/self.stride[0]+1) if input[2] != 0 else 0),
int(np.floor((input[3]-(self.kernel_size[1]-1)-1)/self.stride[1]+1) if input[3] != 0 else 0))
return self.output_shape
def deserialize(self, name, spec) -> None:
"""
Noop for MaxPool deserialization
"""
pass
def serialize(self, name: str, input: str, builder) -> str:
builder.add_pooling(name,
self.kernel_size[0],
self.kernel_size[1],
self.stride[0],
self.stride[1],
layer_type='MAX',
padding_type='SAME',
input_name=input,
output_name=name)
return name
class Dropout(Module):
"""
A simple wrapper for dropout layers
"""
def __init__(self, p: float, dim: int) -> None:
"""
A wrapper around dropout layers with serialization and layer arithmetic.
"""
super().__init__()
self.p = p
self.dim = dim
if dim == 1:
self.layer = torch.nn.Dropout(p)
elif dim == 2:
self.layer = torch.nn.Dropout2d(p)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
return self.layer(inputs), seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
self.output_shape = input
return input
def deserialize(self, name, spec):
"""
Noop for deserialization
"""
pass
def serialize(self, name, input, builder):
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'dropout'
params.description = 'An n-dimensional dropout layer'
params.parameters['dim'].intValue = self.dim
params.parameters['p'].doubleValue = self.p
builder.add_custom(name,
input_names=[input],
output_names=[name],
custom_proto_spec=params)
return name
class TransposedSummarizingRNN(Module):
"""
An RNN wrapper allowing time axis transpositions and other
"""
def __init__(self,
input_size: int,
hidden_size: int,
direction: str = 'b',
transpose: bool = True,
summarize: bool = True,
legacy: Optional[str] = None) -> None:
"""
A wrapper around torch.nn.LSTM optionally transposing inputs and
returning only the last column of output.
Args:
input_size:
hidden_size:
direction (str):
transpose (bool): Transpose width/height dimension
summarize (bool): Only return the last time step.
legacy (str): Set to `clstm` for clstm rnns and `ocropy` for ocropus models.
Shape:
- Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H`
height, and `W` width.
- Outputs output :math:`(N, hidden_size * num_directions, H, S)`
S (or H) being 1 if summarize (and transpose) are true
"""
super().__init__()
self.transpose = transpose
self.summarize = summarize
self.legacy = legacy
self.input_size = input_size
if self.legacy is not None:
self.input_size += 1
self.hidden_size = hidden_size
self.bidi = direction == 'b'
self.output_size = hidden_size if not self.bidi else 2*hidden_size
if legacy == 'ocropy':
self.layer = PeepholeBidiLSTM(self.input_size, hidden_size)
else:
self.layer = torch.nn.LSTM(self.input_size,
hidden_size,
bidirectional=self.bidi,
batch_first=True,
bias=False if legacy else True)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# NCHW -> HNWC
inputs = inputs.permute(2, 0, 3, 1)
if self.transpose:
# HNWC -> WNHC
inputs = inputs.transpose(0, 2)
if self.legacy is not None:
ones = torch.ones(inputs.shape[:3] + (1,))
inputs = torch.cat([ones, inputs], dim=3)
# HNWC -> (H*N)WC
siz = inputs.size()
inputs = inputs.contiguous().view(-1, siz[2], siz[3])
if not self.transpose and seq_len is not None:
if inputs.shape[0] != len(seq_len):
raise Exception(f'Height has to be 1 (not f{inputs.shape[0]} for batching/multi-sequences.')
seq_len = seq_len.cpu()
inputs = pack_padded_sequence(inputs, seq_len, batch_first=True, enforce_sorted=False)
# (H*N)WO
o, _ = self.layer(inputs)
if not self.transpose and seq_len is not None:
o, seq_len = pad_packed_sequence(o, batch_first=True)
# resize to HNWO
o = o.view(siz[0], siz[1], siz[2], self.output_size)
if self.summarize:
# HN1O
o = o[:, :, -1, :].unsqueeze(2)
if self.transpose:
o = o.transpose(0, 2)
# HNWO -> NOHW
if seq_len is not None and seq_len.max() > o.shape[2]:
raise Exception('Do not use summarizing layer in x-axis with batching/sequences')
return o.permute(1, 3, 0, 2), seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
"""
Calculates the output shape from input 4D tuple (batch, channel, input_size, seq_len).
"""
if self.summarize:
if self.transpose:
layer = (1, input[3])
else:
layer = (input[2], 1)
else:
layer = (input[2], input[3])
self.output_shape = (input[0], self.output_size) + layer
return self.output_shape # type: ignore
def deserialize(self, name: str, spec) -> None:
"""
Sets the weights of an initialized layer from a coreml spec.
"""
nn = [x for x in spec.neuralNetwork.layers if x.name == name][0]
arch = nn.WhichOneof('layer')
layer = getattr(nn, arch)
if arch == 'permute':
nn = [x for x in spec.neuralNetwork.layers if x.input == nn.output][0]
arch = nn.WhichOneof('layer')
layer = getattr(nn, arch)
def _deserialize_weights(params, layer, direction):
# ih_matrix
weight_ih = torch.Tensor([params.inputGateWeightMatrix.floatValue, # wi
params.forgetGateWeightMatrix.floatValue, # wf
params.blockInputWeightMatrix.floatValue, # wz/wg
params.outputGateWeightMatrix.floatValue]) # wo
# hh_matrix
weight_hh = torch.Tensor([params.inputGateRecursionMatrix.floatValue, # wi
params.forgetGateRecursionMatrix.floatValue, # wf
params.blockInputRecursionMatrix.floatValue, # wz/wg
params.outputGateRecursionMatrix.floatValue]) # wo
if direction == 'fwd':
layer.weight_ih_l0 = torch.nn.Parameter(weight_ih.resize_as_(layer.weight_ih_l0.data))
layer.weight_hh_l0 = torch.nn.Parameter(weight_hh.resize_as_(layer.weight_hh_l0.data))
elif direction == 'bwd':
layer.weight_ih_l0_reverse = torch.nn.Parameter(weight_ih.resize_as_(layer.weight_ih_l0.data))
layer.weight_hh_l0_reverse = torch.nn.Parameter(weight_hh.resize_as_(layer.weight_hh_l0.data))
def _deserialize_biases(params, layer, direction):
# ih biases
biases = torch.Tensor([params.inputGateBiasVector.floatValue, # bi
params.forgetGateBiasVector.floatValue, # bf
params.blockInputBiasVector.floatValue, # bz/bg
params.outputGateBiasVector.floatValue]) # bo
if direction == 'fwd':
layer.bias_hh_l0 = torch.nn.Parameter(biases.resize_as_(layer.bias_hh_l0.data))
# no ih_biases
layer.bias_ih_l0 = torch.nn.Parameter(torch.zeros(layer.bias_ih_l0.size()))
elif direction == 'bwd':
layer.bias_hh_l0_reverse = torch.nn.Parameter(biases.resize_as_(layer.bias_hh_l0.data))
# no ih_biases
layer.bias_ih_l0_reverse = torch.nn.Parameter(torch.zeros(layer.bias_ih_l0.size()))
fwd_params = layer.weightParams if arch == 'uniDirectionalLSTM' else layer.weightParams[0]
_deserialize_weights(fwd_params, self.layer, 'fwd')
if not self.legacy:
_deserialize_biases(fwd_params, self.layer, 'fwd')
# get backward weights
if arch == 'biDirectionalLSTM':
bwd_params = layer.weightParams[1]
_deserialize_weights(bwd_params, self.layer, 'bwd')
if not self.legacy:
_deserialize_biases(bwd_params, self.layer, 'bwd')
def serialize(self, name: str, input: str, builder) -> str:
"""
Serializes the module using a NeuralNetworkBuilder.
"""
# coreml weight order is IFOG while pytorch uses IFGO
# it also uses a single bias while pytorch splits them for some reason
def _reorder_indim(tensor, splits=4, idx=[0, 1, 3, 2]):
"""
Splits the first dimension into `splits` chunks, reorders them
according to idx, and convert them to a numpy array.
"""
s = tensor.chunk(splits)
return [s[i].data.numpy() for i in idx]
if self.transpose:
ninput = '{}_transposed'.format(name)
builder.add_permute(name=name,
dim=[0, 1, 3, 2],
input_name=input,
output_name=ninput)
input = ninput
name = ninput
if self.bidi:
builder.add_bidirlstm(name=name,
W_h=_reorder_indim(self.layer.weight_hh_l0),
W_x=_reorder_indim(self.layer.weight_ih_l0),
b=_reorder_indim((self.layer.bias_ih_l0 + self.layer.bias_hh_l0)
) if not self.legacy else None,
W_h_back=_reorder_indim(self.layer.weight_hh_l0_reverse),
W_x_back=_reorder_indim(self.layer.weight_ih_l0_reverse),
b_back=_reorder_indim((self.layer.bias_ih_l0_reverse +
self.layer.bias_hh_l0_reverse)) if not self.legacy else None,
hidden_size=self.hidden_size,
input_size=self.input_size,
input_names=[input],
output_names=[name],
peep=[self.layer.weight_ip_l0.data.numpy(),
self.layer.weight_fp_l0.data.numpy(),
self.layer.weight_op_l0.data.numpy()] if self.legacy == 'ocropy' else None,
peep_back=[self.layer.weight_ip_l0_reverse.data.numpy(),
self.layer.weight_fp_l0_reverse.data.numpy(),
self.layer.weight_op_l0_reverse.data.numpy()] if self.legacy == 'ocropy' else None,
output_all=not self.summarize)
else:
builder.add_unilstm(name=name,
W_h=_reorder_indim(self.layer.weight_hh_l0),
W_x=_reorder_indim(self.layer.weight_ih_l0),
b=_reorder_indim((self.layer.bias_ih_l0 + self.layer.bias_hh_l0)) if not self.legacy else None,
hidden_size=self.hidden_size,
input_size=self.input_size,
input_names=[input],
output_names=[name],
peep=[self.layer.weight_ip_l0.data.numpy(),
self.layer.weight_fp_l0.data.numpy(),
self.layer.weight_op_l0.data.numpy()] if self.legacy == 'ocropy' else None,
output_all=not self.summarize)
return name
class LinSoftmax(Module):
"""
A wrapper for linear projection + softmax dealing with dimensionality mangling.
"""
def __init__(self, input_size: int, output_size: int, augmentation: bool = False) -> None:
"""
Args:
input_size: Number of inputs in the feature dimension
output_size: Number of outputs in the feature dimension
augmentation (bool): Enables 1-augmentation of input vectors
Shape:
- Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H`
height, and `W` width.
- Outputs output :math:`(N, output_size, H, S)`
"""
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.augmentation = augmentation
if self.augmentation:
self.input_size += 1
self.lin = torch.nn.Linear(self.input_size, output_size)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
# move features (C) to last dimension for linear activation
# NCHW -> NWHC
inputs = inputs.transpose(1, 3)
# augment with ones along the input (C) axis
if self.augmentation:
inputs = torch.cat([torch.ones(inputs.shape[:3] + (1,)), inputs], dim=3)
o = self.lin(inputs)
# switch between log softmax (needed by ctc) and regular (for inference).
if not self.training:
o = F.softmax(o, dim=3)
else:
o = F.log_softmax(o, dim=3)
# and swap again
return o.transpose(1, 3), seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
"""
Calculates the output shape from input 4D tuple NCHW.
"""
self.output_shape = (input[0], self.output_size, input[2], input[3])
return self.output_shape
def deserialize(self, name: str, spec) -> None:
"""
Sets the weights of an initialized module from a CoreML protobuf spec.
"""
# extract conv parameters
lin = [x for x in spec.neuralNetwork.layers if x.name == '{}_lin'.format(name)][0].innerProduct
weights = torch.Tensor(lin.weights.floatValue).resize_as_(self.lin.weight.data)
bias = torch.Tensor(lin.bias.floatValue)
self.lin.weight = torch.nn.Parameter(weights)
self.lin.bias = torch.nn.Parameter(bias)
def serialize(self, name: str, input: str, builder):
"""
Serializes the module using a NeuralNetworkBuilder.
"""
lin_name = '{}_lin'.format(name)
softmax_name = '{}_softmax'.format(name)
builder.add_inner_product(lin_name, self.lin.weight.data.numpy(),
self.lin.bias.data.numpy(),
self.input_size, self.output_size,
has_bias=True, input_name=input, output_name=lin_name)
builder.add_softmax(softmax_name, lin_name, name)
return name
def resize(self, output_size: int, del_indices: Optional[Iterable[int]] = None) -> None:
"""
Resizes the linear layer with minimal disturbance to the existing
weights.
First removes the weight and bias at the output positions in
del_indices, then resizes both tensors to a new output size.
Args:
output_size (int): Desired output size after resizing
del_indices (list): List of connection to outputs to remove.
"""
if not del_indices:
del_indices = []
old_shape = self.lin.weight.size(0)
self.output_size = output_size
idx = torch.tensor([x for x in range(old_shape) if x not in del_indices])
weight = self.lin.weight.index_select(0, idx)
rweight = torch.zeros((output_size - weight.size(0), weight.size(1)))
torch.nn.init.xavier_uniform_(rweight)
weight = torch.cat([weight, rweight])
bias = self.lin.bias.index_select(0, idx)
bias = torch.cat([bias, torch.zeros(output_size - bias.size(0))])
self.lin = torch.nn.Linear(self.input_size, output_size)
self.lin.weight = torch.nn.Parameter(weight)
self.lin.bias = torch.nn.Parameter(bias)
class ActConv2D(Module):
"""
A wrapper for convolution + activation with automatic padding ensuring no
dropped columns.
"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: Tuple[int, int], stride: Tuple[int, int], nl: str = 'l', dilation: Tuple[int, int] = (1, 1), transposed: bool = False) -> None:
super().__init__()
self.in_channels = in_channels
self.kernel_size = kernel_size
self.out_channels = out_channels
self.stride = stride
self.dilation = dilation
self.padding = tuple((dilation[i] * (kernel_size[i] - 1)) // 2 for i in range(2))
self.transposed = transposed
if nl == 's':
self.nl = torch.sigmoid
self.nl_name = 'SIGMOID'
elif nl == 't':
self.nl = torch.tanh
self.nl_name = 'TANH'
elif nl == 'm':
self.nl = torch.nn.Softmax(dim=1)
self.nl_name = 'SOFTMAX'
elif nl == 'r':
self.nl = torch.relu
self.nl_name = 'RELU'
elif nl == 'lr':
self.nl = torch.nn.LeakyReLU()
self.nl_name = 'LEAKYRELU'
else:
self.nl_name = 'LINEAR'
self.nl = lambda x: x
if self.transposed:
self.co = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size,
stride=stride, padding=self.padding, dilation=self.dilation)
else:
self.co = torch.nn.Conv2d(in_channels, out_channels, kernel_size,
stride=stride, padding=self.padding, dilation=self.dilation)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None, output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
if self.transposed:
o = self.co(inputs, output_size=output_shape)
else:
o = self.co(inputs)
# return logits for sigmoid activation during training
if not (self.nl_name == 'SIGMOID' and self.training):
o = self.nl(o)
if seq_len is not None:
if self.transposed:
seq_len = torch.floor(
((seq_len - 1) * self.stride[1]\
- 2 * self.padding[1]\
+ self.dilation[1] * (self.kernel_size[1] - 1)
+ 1))
else:
seq_len = torch.clamp(torch.floor(
(seq_len+2*self.padding[1]-self.dilation[1]*(self.kernel_size[1]-1)-1).float()/self.stride[1]+1), min=1).int()
return o, seq_len
def get_shape(self, input: Tuple[int, int, int, int], target_shape: Optional[Tuple[int, int, int, int]] = None) -> Tuple[int, int, int, int]:
if self.transposed:
""" For transposed convolution, there is some flexibilty. """
min_y = int((input[2] - 1) * self.stride[0] - 2 * self.padding[0] + self.dilation[0] * (self.kernel_size[0] - 1) + 1 if input[2] != 0 else 0)
target_y = min_y if not target_shape or target_shape[2] == 0 else target_shape[2]
min_x = int((input[3] - 1) * self.stride[1] - 2 * self.padding[1] + self.dilation[1] * (self.kernel_size[1] - 1) + 1 if input[3] != 0 else 0)
target_x = min_x if not target_shape or target_shape[3] == 0 else target_shape[3]
self.output_shape = (input[0],
self.out_channels,
min(min_y + self.stride[0] - 1, max(target_y, min_y)),
min(min_x + self.stride[1] - 1, max(target_x, min_x)))
else:
self.output_shape = (input[0],
self.out_channels,
int(max(np.floor((input[2]+2*self.padding[0]-self.dilation[0]*(self.kernel_size[0]-1)-1) /
self.stride[0]+1), 1) if input[2] != 0 else 0),
int(max(np.floor((input[3]+2*self.padding[1]-self.dilation[1]*(self.kernel_size[1]-1)-1)/self.stride[1]+1), 1) if input[3] != 0 else 0))
return self.output_shape
def deserialize(self, name: str, spec) -> None:
"""
Sets the weight of an initialized model from a CoreML protobuf spec.
"""
conv = [x for x in spec.neuralNetwork.layers if x.name == '{}_conv'.format(name)][0].convolution
if self.transposed:
self.co.weight = torch.nn.Parameter(torch.Tensor(conv.weights.floatValue).view(self.in_channels,
self.out_channels,
*self.kernel_size))
else:
self.co.weight = torch.nn.Parameter(torch.Tensor(conv.weights.floatValue).view(self.out_channels,
self.in_channels,
*self.kernel_size))
self.co.bias = torch.nn.Parameter(torch.Tensor(conv.bias.floatValue))
def serialize(self, name: str, input: str, builder) -> str:
"""
Serializes the module using a NeuralNetworkBuilder.
"""
conv_name = '{}_conv'.format(name)
act_name = '{}_act'.format(name)
W = self.co.weight.permute(2, 3, 0, 1).data.numpy() if self.transposed else self.co.weight.permute(2, 3, 1, 0).data.numpy()
builder.add_convolution(name=conv_name,
kernel_channels=self.in_channels,
output_channels=self.out_channels,
height=self.kernel_size[0],
width=self.kernel_size[1],
stride_height=self.stride[0],
stride_width=self.stride[1],
dilation_factors=self.dilation,
border_mode='same',
groups=1,
W=W,
b=self.co.bias.data.numpy(),
has_bias=True,
is_deconv=self.transposed,
input_name=input,
output_name=conv_name)
if self.nl_name != 'SOFTMAX':
builder.add_activation(act_name, self.nl_name, conv_name, name, params=None if self.nl_name != 'LEAKYRELU' else [self.nl.negative_slope])
else:
builder.add_softmax(act_name, conv_name, name)
return name
def resize(self, output_size: int, del_indices: Optional[Iterable[int]] = None) -> None:
"""
Resizes the convolutional filters of the layer
First removes the filters at output positions in del_indices, then
resizes both tensors to a new output size.
Args:
output_size (int): Desired output dimensionality after resizing
del_indices (list): List of connection to outputs to remove.
"""
if not del_indices:
del_indices = []
old_shape = self.co.weight.size(0)
self.out_channels = output_size
idx = torch.tensor([x for x in range(old_shape) if x not in del_indices])
weight = self.co.weight.index_select(0, idx)
rweight = torch.zeros((output_size - weight.size(0), weight.size(1), weight.size(2), weight.size(3)))
if rweight.shape[0] > 0:
torch.nn.init.xavier_uniform_(rweight)
weight = torch.cat([weight, rweight], dim=0)
bias = self.co.bias.index_select(0, idx)
bias = torch.cat([bias, torch.zeros(output_size - bias.size(0))])
if self.transposed:
self.co = torch.nn.ConvTranspose2d(self.in_channels, self.out_channels, self.kernel_size,
stride=self.stride, padding=self.padding)
else:
self.co = torch.nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size,
stride=self.stride, padding=self.padding)
self.co.weight = torch.nn.Parameter(weight)
self.co.bias = torch.nn.Parameter(bias)
class GroupNorm(Module):
"""
A group normalization layer
"""
def __init__(self, in_channels: int, num_groups: int) -> None:
super().__init__()
self.in_channels = in_channels
self.num_groups = num_groups
self.layer = torch.nn.GroupNorm(num_groups, in_channels)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor], output_shape: Optional[Tuple[int, int]] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
t = inputs.dtype
# XXX: verify that pytorch AMP casts the inputs to float32 correctly at
# some point.
o = self.layer(inputs.type(torch.float32))
return o.type(t), seq_len
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
self.output_shape = input
return self.output_shape # type: ignore
def deserialize(self, name: str, spec) -> None:
"""
Sets the weight of an initialized model from a CoreML protobuf spec.
"""
gn = [x for x in spec.neuralNetwork.layers if x.name == '{}'.format(name)][0].custom
self.layer.weight = torch.nn.Parameter(torch.Tensor(gn.weights[0].floatValue).resize_as_(self.layer.weight))
self.layer.bias = torch.nn.Parameter(torch.Tensor(gn.weights[1].floatValue).resize_as_(self.layer.bias))
def serialize(self, name: str, input: str, builder) -> str:
"""
Serializes the module using a NeuralNetworkBuilder.
"""
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = 'groupnorm'
params.description = 'A Group Normalization layer'
params.parameters['in_channels'].intValue = self.in_channels
params.parameters['num_groups'].intValue = self.num_groups
weight = params.weights.add()
weight.floatValue.extend(self.layer.weight.data.numpy())
bias = params.weights.add()
bias.floatValue.extend(self.layer.bias.data.numpy())
builder.add_custom(name,
input_names=[input],
output_names=[name],
custom_proto_spec=params)
return name
| 41,375 | 42.416579 | 200 | py |
kraken | kraken-main/kraken/lib/util.py | """
Ocropus's magic PIL-numpy array conversion routines. They express slightly
different behavior from PIL.Image.toarray().
"""
import torch
import unicodedata
import numpy as np
from PIL import Image
from typing import Union
__all__ = ['pil2array', 'array2pil', 'is_bitonal', 'make_printable', 'get_im_str']
def pil2array(im: Image.Image, alpha: int = 0) -> np.ndarray:
if im.mode == '1':
return np.array(im.convert('L'))
return np.array(im)
def array2pil(a: np.ndarray) -> Image.Image:
if a.dtype == np.dtype("B"):
if a.ndim == 2:
return Image.frombytes("L", (a.shape[1], a.shape[0]),
a.tobytes())
elif a.ndim == 3:
return Image.frombytes("RGB", (a.shape[1], a.shape[0]),
a.tobytes())
else:
raise Exception("bad image rank")
elif a.dtype == np.dtype('float32'):
return Image.frombytes("F", (a.shape[1], a.shape[0]), a.tobytes())
else:
raise Exception("unknown image type")
def is_bitonal(im: Union[Image.Image, torch.Tensor]) -> bool:
"""
Tests a PIL image or torch tensor for bitonality.
Args:
im: Image to test
Returns:
True if the image contains only two different color values. False
otherwise.
"""
if isinstance(im, Image.Image):
return im.getcolors(2) is not None and len(im.getcolors(2)) == 2
elif isinstance(im, torch.Tensor):
return len(im.int().unique()) == 2
def get_im_str(im: Image.Image) -> str:
return im.filename if hasattr(im, 'filename') else str(im)
def is_printable(char: str) -> bool:
"""
Determines if a chode point is printable/visible when printed.
Args:
char (str): Input code point.
Returns:
True if printable, False otherwise.
"""
letters = ('LC', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu')
numbers = ('Nd', 'Nl', 'No')
punctuation = ('Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps')
symbol = ('Sc', 'Sk', 'Sm', 'So')
printable = letters + numbers + punctuation + symbol
return unicodedata.category(char) in printable
def make_printable(char: str) -> str:
"""
Takes a Unicode code point and return a printable representation of it.
Args:
char (str): Input code point
Returns:
Either the original code point, the name of the code point if it is a
combining mark, whitespace etc., or the hex code if it is a control
symbol.
"""
if not char or is_printable(char):
return char
elif unicodedata.category(char) in ('Cc', 'Cs', 'Co'):
return '0x{:x}'.format(ord(char))
else:
return unicodedata.name(char)
| 2,723 | 27.375 | 82 | py |
kraken | kraken-main/kraken/lib/models.py | """
kraken.lib.models
~~~~~~~~~~~~~~~~~
Wrapper around TorchVGSLModel including a variety of forward pass helpers for
sequence classification.
"""
from os import PathLike
from os.path import expandvars, expanduser, abspath
import torch
import numpy as np
import kraken.lib.lineest
import kraken.lib.ctc_decoder
from typing import List, Tuple, Optional, Union
from kraken.lib.vgsl import TorchVGSLModel
from kraken.lib.exceptions import KrakenInvalidModelException, KrakenInputException
__all__ = ['TorchSeqRecognizer', 'load_any']
import logging
logger = logging.getLogger(__name__)
class TorchSeqRecognizer(object):
"""
A wrapper class around a TorchVGSLModel for text recognition.
"""
def __init__(self,
nn: TorchVGSLModel,
decoder=kraken.lib.ctc_decoder.greedy_decoder,
train: bool = False,
device: str = 'cpu'):
"""
Constructs a sequence recognizer from a VGSL model and a decoder.
Args:
nn: Neural network used for recognition.
decoder: Decoder function used for mapping softmax activations to
labels and positions.
train: Enables or disables gradient calculation and dropout.
device: Device to run model on.
Attributes:
nn: Neural network used for recognition.
codec: PytorchCodec extracted from the recognition model.
decoder: Decoder function used for mapping softmax activations to
labels and positions.
train: Enables or disables gradient calculation and dropout.
device: Device to run model on.
one_channel_mode: flag indicating if the model expects binary or
grayscale input images.
seg_type: flag indicating if the model expects baseline- or bounding
box-derived text line images.
Raises:
ValueError: Is raised when the model type is not a sequence recognizer.
"""
self.nn = nn
self.kind = ''
if train is True:
self.nn.train()
elif train is False:
self.nn.eval()
self.codec = self.nn.codec
self.decoder = decoder
self.train = train
self.device = device
if nn.model_type not in [None, 'recognition']:
raise ValueError(f'Models of type {nn.model_type} are not supported by TorchSeqRecognizer')
self.one_channel_mode = nn.one_channel_mode
self.seg_type = nn.seg_type
if self.device:
self.nn.to(device)
def to(self, device):
"""
Moves model to device and automatically loads input tensors onto it.
"""
self.device = device
self.nn.to(device)
def forward(self, line: torch.Tensor, lens: torch.Tensor = None) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Performs a forward pass on a torch tensor of one or more lines with
shape (N, C, H, W) and returns a numpy array (N, W, C).
Args:
line: NCHW line tensor
lens: Optional tensor containing sequence lengths if N > 1
Returns:
Tuple with (N, W, C) shaped numpy array and final output sequence
lengths.
Raises:
KrakenInputException: Is raised if the channel dimension isn't of
size 1 in the network output.
"""
if self.device:
line = line.to(self.device)
o, olens = self.nn.nn(line, lens)
if o.size(2) != 1:
raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(o.size()))
self.outputs = o.detach().squeeze(2).cpu().numpy()
if olens is not None:
olens = olens.cpu().numpy()
return self.outputs, olens
def predict(self, line: torch.Tensor, lens: Optional[torch.Tensor] = None) -> List[List[Tuple[str, int, int, float]]]:
"""
Performs a forward pass on a torch tensor of a line with shape (N, C, H, W)
and returns the decoding as a list of tuples (string, start, end,
confidence).
Args:
line: NCHW line tensor
lens: Optional tensor containing sequence lengths if N > 1
Returns:
List of decoded sequences.
"""
o, olens = self.forward(line, lens)
dec_seqs = []
if olens is not None:
for seq, seq_len in zip(o, olens):
locs = self.decoder(seq[:, :seq_len])
dec_seqs.append(self.codec.decode(locs))
else:
locs = self.decoder(o[0])
dec_seqs.append(self.codec.decode(locs))
return dec_seqs
def predict_string(self, line: torch.Tensor, lens: Optional[torch.Tensor] = None) -> List[str]:
"""
Performs a forward pass on a torch tensor of a line with shape (N, C, H, W)
and returns a string of the results.
Args:
line: NCHW line tensor
lens: Optional tensor containing the sequence lengths of the input batch.
"""
o, olens = self.forward(line, lens)
dec_strs = []
if olens is not None:
for seq, seq_len in zip(o, olens):
locs = self.decoder(seq[:, :seq_len])
dec_strs.append(''.join(x[0] for x in self.codec.decode(locs)))
else:
locs = self.decoder(o[0])
dec_strs.append(''.join(x[0] for x in self.codec.decode(locs)))
return dec_strs
def predict_labels(self, line: torch.tensor, lens: torch.Tensor = None) -> List[List[Tuple[int, int, int, float]]]:
"""
Performs a forward pass on a torch tensor of a line with shape (N, C, H, W)
and returns a list of tuples (class, start, end, max). Max is the
maximum value of the softmax layer in the region.
"""
o, olens = self.forward(line, lens)
oseqs = []
if olens is not None:
for seq, seq_len in zip(o, olens):
oseqs.append(self.decoder(seq[:, :seq_len]))
else:
oseqs.append(self.decoder(o[0]))
return oseqs
def load_any(fname: Union[PathLike, str],
train: bool = False,
device: str = 'cpu') -> TorchSeqRecognizer:
"""
Loads anything that was, is, and will be a valid ocropus model and
instantiates a shiny new kraken.lib.lstm.SeqRecognizer from the RNN
configuration in the file.
Currently it recognizes the following kinds of models:
* protobuf models containing VGSL segmentation and recognition
networks.
Additionally an attribute 'kind' will be added to the SeqRecognizer
containing a string representation of the source kind. Current known values
are:
* vgsl for VGSL models
Args:
fname: Path to the model
train: Enables gradient calculation and dropout layers in model.
device: Target device
Returns:
A kraken.lib.models.TorchSeqRecognizer object.
Raises:
KrakenInvalidModelException: if the model is not loadable by any parser.
"""
nn = None
fname = abspath(expandvars(expanduser(fname)))
logger.info('Loading model from {}'.format(fname))
try:
nn = TorchVGSLModel.load_model(str(fname))
except Exception as e:
raise KrakenInvalidModelException('File {} not loadable by any parser.'.format(fname)) from e
seq = TorchSeqRecognizer(nn, train=train, device=device)
seq.kind = 'vgsl'
return seq
def validate_hyper_parameters(hyper_params):
"""
Validate some model's hyper parameters and modify them in place if need be.
"""
if (hyper_params['quit'] == 'dumb' and hyper_params['completed_epochs'] >= hyper_params['epochs']):
logger.warning('Maximum epochs reached (might be loaded from given model), starting again from 0.')
hyper_params['completed_epochs'] = 0
| 8,040 | 35.058296 | 122 | py |
kraken | kraken-main/kraken/lib/functional_im_transforms.py | #
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Named functions for all the transforms that were lambdas in the past to
facilitate pickling.
"""
import torch
import regex
import unicodedata
import bidi.algorithm as bd
from os import PathLike
from pathlib import Path
from PIL import Image
from PIL.Image import Resampling
from typing import Tuple, Optional, Callable, Any, Union
from kraken.binarization import nlbin
from kraken.lib.lineest import dewarp, CenterNormalizer
def pil_to_mode(im: Image.Image, mode: str) -> Image.Image:
return im.convert(mode)
def pil_to_bin(im: Image.Image) -> Image.Image:
return nlbin(im)
def dummy(x: Any) -> Any:
return x
def pil_dewarp(im: Image.Image, lnorm: CenterNormalizer) -> Image.Image:
return dewarp(lnorm, im)
def pil_fixed_resize(im: Image.Image, scale: Tuple[int, int]) -> Image.Image:
return _fixed_resize(im, scale, Resampling.LANCZOS)
def tensor_invert(im: torch.Tensor) -> torch.Tensor:
return im.max() - im
def tensor_permute(im: torch.Tensor, perm: Tuple[int, ...]) -> torch.Tensor:
return im.permute(*perm)
def _fixed_resize(img: Image.Image, size: Tuple[int, int], interpolation: int = Resampling.LANCZOS):
"""
Doesn't do the annoying runtime scale dimension switching the default
pytorch transform does.
Args:
img (PIL.Image.Image): image to resize
size (tuple): Tuple (height, width)
"""
w, h = img.size
oh, ow = size
if oh == 0:
oh = int(h * ow/w)
elif ow == 0:
ow = int(w * oh/h)
img = img.resize((ow, oh), interpolation)
return img
def text_normalize(text: str, normalization: str) -> str:
return unicodedata.normalize(normalization, text)
def text_whitespace_normalize(text: str) -> str:
return regex.sub(r'\s', ' ', text).strip()
def text_reorder(text: str, base_dir: Optional[str] = None) -> str:
return bd.get_display(text, base_dir=base_dir)
def default_split(x: Union[PathLike, str]) -> str:
x = Path(x)
while x.suffixes:
x = x.with_suffix('')
return str(x)
def suffix_split(x: Union[PathLike, str], split: Callable[[Union[PathLike, str]], str], suffix: str) -> str:
return split(x) + suffix
| 2,764 | 25.84466 | 108 | py |
kraken | kraken-main/kraken/lib/train.py | #
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Training loop interception helpers
"""
import re
import torch
import logging
import warnings
import numpy as np
import torch.nn.functional as F
import pytorch_lightning as pl
from os import PathLike
from functools import partial
from torch.multiprocessing import Pool
from torchmetrics import CharErrorRate, WordErrorRate
from torchmetrics.classification import MultilabelAccuracy, MultilabelJaccardIndex
from torch.optim import lr_scheduler
from typing import Callable, Dict, Optional, Sequence, Union, Any, Literal
from pytorch_lightning.callbacks import Callback, EarlyStopping, BaseFinetuning, LearningRateMonitor
from kraken.lib import models, vgsl, default_specs, progress
from kraken.lib.xml import preparse_xml_data
from kraken.lib.util import make_printable
from kraken.lib.codec import PytorchCodec
from kraken.lib.dataset import (ArrowIPCRecognitionDataset, BaselineSet,
GroundTruthDataset, PolygonGTDataset,
ImageInputTransforms, collate_sequences)
from kraken.lib.models import validate_hyper_parameters
from kraken.lib.exceptions import KrakenInputException, KrakenEncodeException
from torch.utils.data import DataLoader, random_split, Subset
logger = logging.getLogger(__name__)
def _star_fun(fun, kwargs):
try:
return fun(**kwargs)
except FileNotFoundError as e:
logger.warning(f'{e.strerror}: {e.filename}. Skipping.')
except KrakenInputException as e:
logger.warning(str(e))
return None
def _validation_worker_init_fn(worker_id):
""" Fix random seeds so that augmentation always produces the same
results when validating. Temporarily increase the logging level
for lightning because otherwise it will display a message
at info level about the seed being changed. """
from pytorch_lightning import seed_everything
level = logging.getLogger("lightning_fabric.utilities.seed").level
logging.getLogger("lightning_fabric.utilities.seed").setLevel(logging.WARN)
seed_everything(42)
logging.getLogger("lightning_fabric.utilities.seed").setLevel(level)
class KrakenTrainer(pl.Trainer):
def __init__(self,
enable_progress_bar: bool = True,
enable_summary: bool = True,
min_epochs: int = 5,
max_epochs: int = 100,
freeze_backbone=-1,
pl_logger: Union[pl.loggers.logger.Logger, str, None] = None,
log_dir: Optional[PathLike] = None,
*args,
**kwargs):
kwargs['enable_checkpointing'] = False
kwargs['enable_progress_bar'] = enable_progress_bar
kwargs['min_epochs'] = min_epochs
kwargs['max_epochs'] = max_epochs
kwargs['callbacks'] = ([] if 'callbacks' not in kwargs else kwargs['callbacks'])
if not isinstance(kwargs['callbacks'], list):
kwargs['callbacks'] = [kwargs['callbacks']]
if pl_logger:
if 'logger' in kwargs and isinstance(kwargs['logger'], pl.loggers.logger.Logger):
logger.debug('Experiment logger has been provided outside KrakenTrainer as `logger`')
elif isinstance(pl_logger, pl.loggers.logger.Logger):
logger.debug('Experiment logger has been provided outside KrakenTrainer as `pl_logger`')
kwargs['logger'] = pl_logger
elif pl_logger == 'tensorboard':
logger.debug('Creating default experiment logger')
kwargs['logger'] = pl.loggers.TensorBoardLogger(log_dir)
else:
logger.error('`pl_logger` was set, but %s is not an accepted value', pl_logger)
raise ValueError(f'{pl_logger} is not acceptable as logger')
kwargs['callbacks'].append(LearningRateMonitor(logging_interval='step'))
else:
kwargs['logger'] = False
if enable_progress_bar:
progress_bar_cb = progress.KrakenTrainProgressBar(leave=True)
kwargs['callbacks'].append(progress_bar_cb)
if enable_summary:
from pytorch_lightning.callbacks import RichModelSummary
summary_cb = RichModelSummary(max_depth=2)
kwargs['callbacks'].append(summary_cb)
kwargs['enable_model_summary'] = False
if freeze_backbone > 0:
kwargs['callbacks'].append(KrakenFreezeBackbone(freeze_backbone))
kwargs['callbacks'].extend([KrakenSetOneChannelMode(), KrakenSaveModel()])
super().__init__(*args, **kwargs)
self.automatic_optimization = False
def fit(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', category=UserWarning,
message='The dataloader,')
super().fit(*args, **kwargs)
class KrakenFreezeBackbone(BaseFinetuning):
"""
Callback freezing all but the last layer for fixed number of iterations.
"""
def __init__(self, unfreeze_at_iterations=10):
super().__init__()
self.unfreeze_at_iteration = unfreeze_at_iterations
def freeze_before_training(self, pl_module):
pass
def finetune_function(self, pl_module, current_epoch, optimizer):
pass
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.freeze(pl_module.net[:-1])
def on_train_batch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", batch, batch_idx) -> None:
"""
Called for each training batch.
"""
if trainer.global_step == self.unfreeze_at_iteration:
for opt_idx, optimizer in enumerate(trainer.optimizers):
num_param_groups = len(optimizer.param_groups)
self.unfreeze_and_add_param_group(modules=pl_module.net[:-1],
optimizer=optimizer,
train_bn=True,)
current_param_groups = optimizer.param_groups
self._store(pl_module, opt_idx, num_param_groups, current_param_groups)
def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
"""Called when the epoch begins."""
pass
class KrakenSetOneChannelMode(Callback):
"""
Callback that sets the one_channel_mode of the model after the first epoch.
"""
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
# fill one_channel_mode after 1 iteration over training data set
if not trainer.sanity_checking and trainer.current_epoch == 0 and trainer.model.nn.model_type == 'recognition':
ds = getattr(pl_module, 'train_set', None)
if not ds and trainer.datamodule:
ds = trainer.datamodule.train_set
im_mode = ds.dataset.im_mode
if im_mode in ['1', 'L']:
logger.info(f'Setting model one_channel_mode to {im_mode}.')
trainer.model.nn.one_channel_mode = im_mode
class KrakenSaveModel(Callback):
"""
Kraken's own serialization callback instead of pytorch's.
"""
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if not trainer.sanity_checking:
trainer.model.nn.hyper_params['completed_epochs'] += 1
metric = float(trainer.logged_metrics['val_metric']) if 'val_metric' in trainer.logged_metrics else -1.0
trainer.model.nn.user_metadata['accuracy'].append((trainer.global_step, metric))
trainer.model.nn.user_metadata['metrics'].append((trainer.global_step, {k: float(v) for k, v in trainer.logged_metrics.items()}))
logger.info('Saving to {}_{}.mlmodel'.format(trainer.model.output, trainer.current_epoch))
trainer.model.nn.save_model(f'{trainer.model.output}_{trainer.current_epoch}.mlmodel')
trainer.model.best_model = f'{trainer.model.output}_{trainer.model.best_epoch}.mlmodel'
class RecognitionModel(pl.LightningModule):
def __init__(self,
hyper_params: Dict[str, Any] = None,
output: str = 'model',
spec: str = default_specs.RECOGNITION_SPEC,
append: Optional[int] = None,
model: Optional[Union[PathLike, str]] = None,
reorder: Union[bool, str] = True,
training_data: Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]] = None,
evaluation_data: Optional[Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]]] = None,
partition: Optional[float] = 0.9,
binary_dataset_split: bool = False,
num_workers: int = 1,
load_hyper_parameters: bool = False,
repolygonize: bool = False,
force_binarization: bool = False,
format_type: Literal['path', 'alto', 'page', 'xml', 'binary'] = 'path',
codec: Optional[Dict] = None,
resize: Literal['fail', 'both', 'new', 'add', 'union'] = 'fail'):
"""
A LightningModule encapsulating the training setup for a text
recognition model.
Setup parameters (load, training_data, evaluation_data, ....) are
named, model hyperparameters (everything in
`kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS`) are in in the
`hyper_params` argument.
Args:
hyper_params (dict): Hyperparameter dictionary containing all fields
from
kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS
**kwargs: Setup parameters, i.e. CLI parameters of the train() command.
"""
super().__init__()
hyper_params_ = default_specs.RECOGNITION_HYPER_PARAMS.copy()
if model:
logger.info(f'Loading existing model from {model} ')
self.nn = vgsl.TorchVGSLModel.load_model(model)
if self.nn.model_type not in [None, 'recognition']:
raise ValueError(f'Model {model} is of type {self.nn.model_type} while `recognition` is expected.')
if load_hyper_parameters:
hp = self.nn.hyper_params
else:
hp = {}
hyper_params_.update(hp)
else:
self.nn = None
if hyper_params:
hyper_params_.update(hyper_params)
self.save_hyperparameters(hyper_params_)
self.reorder = reorder
self.append = append
self.model = model
self.num_workers = num_workers
if resize == "add":
resize = "union"
warnings.warn("'add' value for resize has been deprecated. Use 'union' instead.", DeprecationWarning)
elif resize == "both":
resize = "new"
warnings.warn("'both' value for resize has been deprecated. Use 'new' instead.", DeprecationWarning)
self.resize = resize
self.format_type = format_type
self.output = output
self.best_epoch = -1
self.best_metric = 0.0
self.best_model = None
DatasetClass = GroundTruthDataset
valid_norm = True
if format_type in ['xml', 'page', 'alto']:
logger.info(f'Parsing {len(training_data)} XML files for training data')
training_data = preparse_xml_data(training_data, format_type, repolygonize)
if evaluation_data:
logger.info(f'Parsing {len(evaluation_data)} XML files for validation data')
evaluation_data = preparse_xml_data(evaluation_data, format_type, repolygonize)
if binary_dataset_split:
logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')
binary_dataset_split = False
DatasetClass = PolygonGTDataset
valid_norm = False
elif format_type == 'binary':
DatasetClass = ArrowIPCRecognitionDataset
if repolygonize:
logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.')
valid_norm = False
logger.info(f'Got {len(training_data)} binary dataset files for training data')
training_data = [{'file': file} for file in training_data]
if evaluation_data:
logger.info(f'Got {len(evaluation_data)} binary dataset files for validation data')
evaluation_data = [{'file': file} for file in evaluation_data]
elif format_type == 'path':
if force_binarization:
logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')
if binary_dataset_split:
logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')
binary_dataset_split = False
logger.info(f'Got {len(training_data)} line strip images for training data')
training_data = [{'image': im} for im in training_data]
if evaluation_data:
logger.info(f'Got {len(evaluation_data)} line strip images for validation data')
evaluation_data = [{'image': im} for im in evaluation_data]
valid_norm = True
# format_type is None. Determine training type from length of training data entry
elif not format_type:
if len(training_data[0]) >= 4:
DatasetClass = PolygonGTDataset
valid_norm = False
else:
if force_binarization:
logger.warning('Forced binarization enabled with box lines. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled with box lines. Will be ignored.')
if binary_dataset_split:
logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')
binary_dataset_split = False
else:
raise ValueError(f'format_type {format_type} not in [alto, page, xml, path, binary].')
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise ValueError(f'VGSL spec {spec} not bracketed')
self.spec = spec
# preparse input sizes from vgsl string to seed ground truth data set
# sizes and dimension ordering.
if not self.nn:
blocks = spec[1:-1].split(' ')
m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0])
if not m:
raise ValueError(f'Invalid input spec {blocks[0]}')
batch, height, width, channels = [int(x) for x in m.groups()]
else:
batch, channels, height, width = self.nn.input
self.transforms = ImageInputTransforms(batch,
height,
width,
channels,
(self.hparams.pad, 0),
valid_norm,
force_binarization)
self.example_input_array = torch.Tensor(batch,
channels,
height if height else 32,
width if width else 400)
if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():
logger.debug('Setting multiprocessing tensor sharing strategy to file_system')
torch.multiprocessing.set_sharing_strategy('file_system')
if evaluation_data:
train_set = self._build_dataset(DatasetClass, training_data)
self.train_set = Subset(train_set, range(len(train_set)))
val_set = self._build_dataset(DatasetClass, evaluation_data)
self.val_set = Subset(val_set, range(len(val_set)))
elif binary_dataset_split:
train_set = self._build_dataset(DatasetClass, training_data, split_filter='train')
self.train_set = Subset(train_set, range(len(train_set)))
val_set = self._build_dataset(DatasetClass, training_data, split_filter='validation')
self.val_set = Subset(val_set, range(len(val_set)))
logger.info(f'Found {len(self.train_set)} (train) / {len(self.val_set)} (val) samples in pre-encoded dataset')
else:
train_set = self._build_dataset(DatasetClass, training_data)
train_len = int(len(train_set)*partition)
val_len = len(train_set) - train_len
logger.info(f'No explicit validation data provided. Splitting off '
f'{val_len} (of {len(train_set)}) samples to validation '
'set. (Will disable alphabet mismatch detection.)')
self.train_set, self.val_set = random_split(train_set, (train_len, val_len))
if len(self.train_set) == 0 or len(self.val_set) == 0:
raise ValueError('No valid training data was provided to the train '
'command. Please add valid XML, line, or binary data.')
logger.info(f'Training set {len(self.train_set)} lines, validation set '
f'{len(self.val_set)} lines, alphabet {len(train_set.alphabet)} '
'symbols')
alpha_diff_only_train = set(self.train_set.dataset.alphabet).difference(set(self.val_set.dataset.alphabet))
alpha_diff_only_val = set(self.val_set.dataset.alphabet).difference(set(self.train_set.dataset.alphabet))
if alpha_diff_only_train:
logger.warning(f'alphabet mismatch: chars in training set only: '
f'{alpha_diff_only_train} (not included in accuracy test '
'during training)')
if alpha_diff_only_val:
logger.warning(f'alphabet mismatch: chars in validation set only: {alpha_diff_only_val} (not trained)')
logger.info('grapheme\tcount')
for k, v in sorted(train_set.alphabet.items(), key=lambda x: x[1], reverse=True):
char = make_printable(k)
if char == k:
char = '\t' + char
logger.info(f'{char}\t{v}')
if codec:
logger.info('Instantiating codec')
self.codec = PytorchCodec(codec)
for k, v in self.codec.c2l.items():
char = make_printable(k)
if char == k:
char = '\t' + char
logger.info(f'{char}\t{v}')
else:
self.codec = None
logger.info('Encoding training set')
self.val_cer = CharErrorRate()
self.val_wer = WordErrorRate()
def _build_dataset(self,
DatasetClass,
training_data,
**kwargs):
dataset = DatasetClass(normalization=self.hparams.normalization,
whitespace_normalization=self.hparams.normalize_whitespace,
reorder=self.reorder,
im_transforms=self.transforms,
augmentation=self.hparams.augment,
**kwargs)
if (self.num_workers and self.num_workers > 1) and self.format_type != 'binary':
with Pool(processes=self.num_workers) as pool:
for im in pool.imap_unordered(partial(_star_fun, dataset.parse), training_data, 5):
logger.debug(f'Adding sample {im} to training set')
if im:
dataset.add(**im)
else:
for im in training_data:
try:
dataset.add(**im)
except KrakenInputException as e:
logger.warning(str(e))
if self.format_type == 'binary' and self.hparams.normalization:
logger.debug('Rebuilding dataset using unicode normalization')
dataset.rebuild_alphabet()
return dataset
def forward(self, x, seq_lens=None):
return self.net(x, seq_lens)
def training_step(self, batch, batch_idx):
input, target = batch['image'], batch['target']
# sequence batch
if 'seq_lens' in batch:
seq_lens, label_lens = batch['seq_lens'], batch['target_lens']
target = (target, label_lens)
o = self.net(input, seq_lens)
else:
o = self.net(input)
seq_lens = o[1]
output = o[0]
target_lens = target[1]
target = target[0]
# height should be 1 by now
if output.size(2) != 1:
raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(output.size(2)))
output = output.squeeze(2)
# NCW -> WNC
loss = self.nn.criterion(output.permute(2, 0, 1), # type: ignore
target,
seq_lens,
target_lens)
self.log('train_loss', loss, on_step=True, on_epoch=False, prog_bar=False, logger=True)
return loss
def validation_step(self, batch, batch_idx):
pred = self.rec_nn.predict_string(batch['image'], batch['seq_lens'])
idx = 0
decoded_targets = []
for offset in batch['target_lens']:
decoded_targets.append(''.join([x[0] for x in self.val_codec.decode([(x, 0, 0, 0) for x in batch['target'][idx:idx+offset]])]))
idx += offset
self.val_cer.update(pred, decoded_targets)
self.val_wer.update(pred, decoded_targets)
if self.logger and self.trainer.state.stage != 'sanity_check' and self.hparams.batch_size * batch_idx < 16:
for i in range(self.hparams.batch_size):
count = self.hparams.batch_size * batch_idx + i
if count < 16:
self.logger.experiment.add_image(f'Validation #{count}, target: {decoded_targets[i]}', batch['image'][i], self.global_step, dataformats="CHW")
self.logger.experiment.add_text(f'Validation #{count}, target: {decoded_targets[i]}', pred[i], self.global_step)
def on_validation_epoch_end(self):
accuracy = 1.0 - self.val_cer.compute()
word_accuracy = 1.0 - self.val_wer.compute()
if accuracy > self.best_metric:
logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {accuracy} ({self.current_epoch})')
self.best_epoch = self.current_epoch
self.best_metric = accuracy
logger.info(f'validation run: total chars {self.val_cer.total} errors {self.val_cer.errors} accuracy {accuracy}')
self.log('val_accuracy', accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('val_word_accuracy', word_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('val_metric', accuracy, on_step=False, on_epoch=True, prog_bar=False, logger=True)
self.val_cer.reset()
self.val_wer.reset()
def setup(self, stage: Optional[str] = None):
# finalize models in case of appending/loading
if stage in [None, 'fit']:
# Log a few sample images before the datasets are encoded.
# This is only possible for Arrow datasets, because the
# other dataset types can only be accessed after encoding
if self.logger and isinstance(self.train_set.dataset, ArrowIPCRecognitionDataset) :
for i in range(min(len(self.train_set), 16)):
idx = np.random.randint(len(self.train_set))
sample = self.train_set[idx]
self.logger.experiment.add_image(f'train_set sample #{i}: {sample["target"]}', sample['image'])
if self.append:
self.train_set.dataset.encode(self.codec)
# now we can create a new model
self.spec = '[{} O1c{}]'.format(self.spec[1:-1], self.train_set.dataset.codec.max_label + 1)
logger.info(f'Appending {self.spec} to existing model {self.nn.spec} after {self.append}')
self.nn.append(self.append, self.spec)
self.nn.add_codec(self.train_set.dataset.codec)
logger.info(f'Assembled model spec: {self.nn.spec}')
elif self.model:
self.spec = self.nn.spec
# prefer explicitly given codec over network codec if mode is 'new'
codec = self.codec if (self.codec and self.resize == 'new') else self.nn.codec
codec.strict = True
try:
self.train_set.dataset.encode(codec)
except KrakenEncodeException:
alpha_diff = set(self.train_set.dataset.alphabet).difference(
set(codec.c2l.keys())
)
if self.resize == 'fail':
raise KrakenInputException(f'Training data and model codec alphabets mismatch: {alpha_diff}')
elif self.resize == 'union':
logger.info(f'Resizing codec to include '
f'{len(alpha_diff)} new code points')
# Construct two codecs:
# 1. training codec containing only the vocabulary in the training dataset
# 2. validation codec = training codec + validation set vocabulary
# This keep the codec in the model from being 'polluted' by non-trained characters.
train_codec = codec.add_labels(alpha_diff)
self.nn.add_codec(train_codec)
logger.info(f'Resizing last layer in network to {train_codec.max_label+1} outputs')
self.nn.resize_output(train_codec.max_label + 1)
self.train_set.dataset.encode(train_codec)
elif self.resize == 'new':
logger.info(f'Resizing network or given codec to '
f'{len(self.train_set.dataset.alphabet)} '
f'code sequences')
# same codec procedure as above, just with merging.
self.train_set.dataset.encode(None)
train_codec, del_labels = codec.merge(self.train_set.dataset.codec)
# Switch codec.
self.nn.add_codec(train_codec)
logger.info(f'Deleting {len(del_labels)} output classes from network '
f'({len(codec)-len(del_labels)} retained)')
self.nn.resize_output(train_codec.max_label + 1, del_labels)
self.train_set.dataset.encode(train_codec)
else:
raise ValueError(f'invalid resize parameter value {self.resize}')
self.nn.codec.strict = False
self.spec = self.nn.spec
else:
self.train_set.dataset.encode(self.codec)
logger.info(f'Creating new model {self.spec} with {self.train_set.dataset.codec.max_label+1} outputs')
self.spec = '[{} O1c{}]'.format(self.spec[1:-1], self.train_set.dataset.codec.max_label + 1)
self.nn = vgsl.TorchVGSLModel(self.spec)
# initialize weights
self.nn.init_weights()
self.nn.add_codec(self.train_set.dataset.codec)
val_diff = set(self.val_set.dataset.alphabet).difference(
set(self.train_set.dataset.codec.c2l.keys())
)
logger.info(f'Adding {len(val_diff)} dummy labels to validation set codec.')
val_codec = self.nn.codec.add_labels(val_diff)
self.val_set.dataset.encode(val_codec)
self.val_codec = val_codec
if self.nn.one_channel_mode and self.train_set.dataset.im_mode != self.nn.one_channel_mode:
logger.warning(f'Neural network has been trained on mode {self.nn.one_channel_mode} images, '
f'training set contains mode {self.train_set.dataset.im_mode} data. Consider setting `force_binarization`')
if self.format_type != 'path' and self.nn.seg_type == 'bbox':
logger.warning('Neural network has been trained on bounding box image information but training set is polygonal.')
self.nn.hyper_params = self.hparams
self.nn.model_type = 'recognition'
if not self.nn.seg_type:
logger.info(f'Setting seg_type to {self.train_set.dataset.seg_type}.')
self.nn.seg_type = self.train_set.dataset.seg_type
self.rec_nn = models.TorchSeqRecognizer(self.nn, train=None, device=None)
self.net = self.nn.nn
torch.set_num_threads(max(self.num_workers, 1))
def train_dataloader(self):
return DataLoader(self.train_set,
batch_size=self.hparams.batch_size,
num_workers=self.num_workers,
pin_memory=True,
shuffle=True,
collate_fn=collate_sequences)
def val_dataloader(self):
return DataLoader(self.val_set,
shuffle=False,
batch_size=self.hparams.batch_size,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=collate_sequences,
worker_init_fn=_validation_worker_init_fn)
def configure_callbacks(self):
callbacks = []
if self.hparams.quit == 'early':
callbacks.append(EarlyStopping(monitor='val_accuracy',
mode='max',
patience=self.hparams.lag,
stopping_threshold=1.0))
return callbacks
# configuration of optimizers and learning rate schedulers
# --------------------------------------------------------
#
# All schedulers are created internally with a frequency of step to enable
# batch-wise learning rate warmup. In lr_scheduler_step() calls to the
# scheduler are then only performed at the end of the epoch.
def configure_optimizers(self):
return _configure_optimizer_and_lr_scheduler(self.hparams,
self.nn.nn.parameters(),
len_train_set=len(self.train_set),
loss_tracking_mode='max')
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
# update params
optimizer.step(closure=optimizer_closure)
# linear warmup between 0 and the initial learning rate `lrate` in `warmup`
# steps.
if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup:
lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams.lrate
def lr_scheduler_step(self, scheduler, metric):
if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup:
# step OneCycleLR each batch if not in warmup phase
if isinstance(scheduler, lr_scheduler.OneCycleLR):
scheduler.step()
# step every other scheduler epoch-wise
elif self.trainer.is_last_batch:
if metric is None:
scheduler.step()
else:
scheduler.step(metric)
class SegmentationModel(pl.LightningModule):
def __init__(self,
hyper_params: Dict = None,
load_hyper_parameters: bool = False,
progress_callback: Callable[[str, int], Callable[[None], None]] = lambda string, length: lambda: None,
message: Callable[[str], None] = lambda *args, **kwargs: None,
output: str = 'model',
spec: str = default_specs.SEGMENTATION_SPEC,
model: Optional[Union[PathLike, str]] = None,
training_data: Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]] = None,
evaluation_data: Optional[Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]]] = None,
partition: Optional[float] = 0.9,
num_workers: int = 1,
force_binarization: bool = False,
format_type: Literal['path', 'alto', 'page', 'xml'] = 'path',
suppress_regions: bool = False,
suppress_baselines: bool = False,
valid_regions: Optional[Sequence[str]] = None,
valid_baselines: Optional[Sequence[str]] = None,
merge_regions: Optional[Dict[str, str]] = None,
merge_baselines: Optional[Dict[str, str]] = None,
bounding_regions: Optional[Sequence[str]] = None,
resize: Literal['fail', 'both', 'new', 'add', 'union'] = 'fail',
topline: Union[bool, None] = False):
"""
A LightningModule encapsulating the training setup for a page
segmentation model.
Setup parameters (load, training_data, evaluation_data, ....) are
named, model hyperparameters (everything in
`kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS`) are in in the
`hyper_params` argument.
Args:
hyper_params (dict): Hyperparameter dictionary containing all fields
from
kraken.lib.default_specs.SEGMENTATION_HYPER_PARAMS
**kwargs: Setup parameters, i.e. CLI parameters of the segtrain() command.
"""
super().__init__()
self.best_epoch = -1
self.best_metric = 0.0
self.best_model = None
self.model = model
self.num_workers = num_workers
if resize == "add":
resize = "union"
warnings.warn("'add' value for resize has been deprecated. Use 'union' instead.", DeprecationWarning)
elif resize == "both":
resize = "new"
warnings.warn("'both' value for resize has been deprecated. Use 'new' instead.", DeprecationWarning)
self.resize = resize
self.format_type = format_type
self.output = output
self.bounding_regions = bounding_regions
self.topline = topline
hyper_params_ = default_specs.SEGMENTATION_HYPER_PARAMS.copy()
if model:
logger.info(f'Loading existing model from {model}')
self.nn = vgsl.TorchVGSLModel.load_model(model)
if self.nn.model_type not in [None, 'segmentation']:
raise ValueError(f'Model {model} is of type {self.nn.model_type} while `segmentation` is expected.')
if load_hyper_parameters:
hp = self.nn.hyper_params
else:
hp = {}
hyper_params_.update(hp)
batch, channels, height, width = self.nn.input
else:
self.nn = None
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise ValueError(f'VGSL spec "{spec}" not bracketed')
self.spec = spec
blocks = spec[1:-1].split(' ')
m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0])
if not m:
raise ValueError(f'Invalid input spec {blocks[0]}')
batch, height, width, channels = [int(x) for x in m.groups()]
if hyper_params:
hyper_params_.update(hyper_params)
validate_hyper_parameters(hyper_params_)
self.save_hyperparameters(hyper_params_)
if not training_data:
raise ValueError('No training data provided. Please add some.')
transforms = ImageInputTransforms(batch,
height,
width,
channels,
self.hparams.padding,
valid_norm=False,
force_binarization=force_binarization)
self.example_input_array = torch.Tensor(batch,
channels,
height if height else 400,
width if width else 300)
# set multiprocessing tensor sharing strategy
if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():
logger.debug('Setting multiprocessing tensor sharing strategy to file_system')
torch.multiprocessing.set_sharing_strategy('file_system')
if not valid_regions:
valid_regions = None
if not valid_baselines:
valid_baselines = None
if suppress_regions:
valid_regions = []
merge_regions = None
if suppress_baselines:
valid_baselines = []
merge_baselines = None
train_set = BaselineSet(training_data,
line_width=self.hparams.line_width,
im_transforms=transforms,
mode=format_type,
augmentation=self.hparams.augment,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
valid_regions=valid_regions,
merge_regions=merge_regions)
if format_type is None:
for page in training_data:
train_set.add(**page)
if evaluation_data:
val_set = BaselineSet(evaluation_data,
line_width=self.hparams.line_width,
im_transforms=transforms,
mode=format_type,
augmentation=False,
valid_baselines=valid_baselines,
merge_baselines=merge_baselines,
valid_regions=valid_regions,
merge_regions=merge_regions)
if format_type is None:
for page in evaluation_data:
val_set.add(**page)
train_set = Subset(train_set, range(len(train_set)))
val_set = Subset(val_set, range(len(val_set)))
else:
train_len = int(len(train_set)*partition)
val_len = len(train_set) - train_len
logger.info(f'No explicit validation data provided. Splitting off '
f'{val_len} (of {len(train_set)}) samples to validation '
'set.')
train_set, val_set = random_split(train_set, (train_len, val_len))
if len(train_set) == 0:
raise ValueError('No valid training data provided. Please add some.')
if len(val_set) == 0:
raise ValueError('No valid validation data provided. Please add some.')
# overwrite class mapping in validation set
val_set.dataset.num_classes = train_set.dataset.num_classes
val_set.dataset.class_mapping = train_set.dataset.class_mapping
self.train_set = train_set
self.val_set = val_set
def forward(self, x):
return self.nn.nn(x)
def training_step(self, batch, batch_idx):
input, target = batch['image'], batch['target']
output, _ = self.nn.nn(input)
output = F.interpolate(output, size=(target.size(2), target.size(3)))
loss = self.nn.criterion(output, target)
self.log('train_loss', loss, on_step=True, on_epoch=False, prog_bar=False, logger=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch['image'], batch['target']
pred, _ = self.nn.nn(x)
# scale target to output size
y = F.interpolate(y, size=(pred.size(2), pred.size(3))).int()
self.val_px_accuracy.update(pred, y)
self.val_mean_accuracy.update(pred, y)
self.val_mean_iu.update(pred, y)
self.val_freq_iu.update(pred, y)
def on_validation_epoch_end(self):
pixel_accuracy = self.val_px_accuracy.compute()
mean_accuracy = self.val_mean_accuracy.compute()
mean_iu = self.val_mean_iu.compute()
freq_iu = self.val_freq_iu.compute()
if mean_iu > self.best_metric:
logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {mean_iu} ({self.current_epoch})')
self.best_epoch = self.current_epoch
self.best_metric = mean_iu
logger.info(f'validation run: accuracy {pixel_accuracy} mean_acc {mean_accuracy} mean_iu {mean_iu} freq_iu {freq_iu}')
self.log('val_accuracy', pixel_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('val_mean_acc', mean_accuracy, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('val_mean_iu', mean_iu, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('val_freq_iu', freq_iu, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.log('val_metric', mean_iu, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.val_px_accuracy.reset()
self.val_mean_accuracy.reset()
self.val_mean_iu.reset()
self.val_freq_iu.reset()
def setup(self, stage: Optional[str] = None):
# finalize models in case of appending/loading
if stage in [None, 'fit']:
if not self.model:
self.spec = f'[{self.spec[1:-1]} O2l{self.train_set.dataset.num_classes}]'
logger.info(f'Creating model {self.spec} with {self.train_set.dataset.num_classes} outputs')
nn = vgsl.TorchVGSLModel(self.spec)
if self.bounding_regions is not None:
nn.user_metadata['bounding_regions'] = self.bounding_regions
nn.user_metadata['topline'] = self.topline
self.nn = nn
else:
if self.train_set.dataset.class_mapping['baselines'].keys() != self.nn.user_metadata['class_mapping']['baselines'].keys() or \
self.train_set.dataset.class_mapping['regions'].keys() != self.nn.user_metadata['class_mapping']['regions'].keys():
bl_diff = set(self.train_set.dataset.class_mapping['baselines'].keys()).symmetric_difference(
set(self.nn.user_metadata['class_mapping']['baselines'].keys()))
regions_diff = set(self.train_set.dataset.class_mapping['regions'].keys()).symmetric_difference(
set(self.nn.user_metadata['class_mapping']['regions'].keys()))
if self.resize == 'fail':
raise ValueError(f'Training data and model class mapping differ (bl: {bl_diff}, regions: {regions_diff}')
elif self.resize == 'union':
new_bls = self.train_set.dataset.class_mapping['baselines'].keys() - self.nn.user_metadata['class_mapping']['baselines'].keys()
new_regions = self.train_set.dataset.class_mapping['regions'].keys() - self.nn.user_metadata['class_mapping']['regions'].keys()
cls_idx = max(max(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else -1,
max(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else -1)
logger.info(f'Adding {len(new_bls) + len(new_regions)} missing types to network output layer.')
self.nn.resize_output(cls_idx + len(new_bls) + len(new_regions) + 1)
for c in new_bls:
cls_idx += 1
self.nn.user_metadata['class_mapping']['baselines'][c] = cls_idx
for c in new_regions:
cls_idx += 1
self.nn.user_metadata['class_mapping']['regions'][c] = cls_idx
elif self.resize == 'new':
logger.info('Fitting network exactly to training set.')
new_bls = self.train_set.dataset.class_mapping['baselines'].keys() - self.nn.user_metadata['class_mapping']['baselines'].keys()
new_regions = self.train_set.dataset.class_mapping['regions'].keys() - self.nn.user_metadata['class_mapping']['regions'].keys()
del_bls = self.nn.user_metadata['class_mapping']['baselines'].keys() - self.train_set.dataset.class_mapping['baselines'].keys()
del_regions = self.nn.user_metadata['class_mapping']['regions'].keys() - self.train_set.dataset.class_mapping['regions'].keys()
logger.info(f'Adding {len(new_bls) + len(new_regions)} missing '
f'types and removing {len(del_bls) + len(del_regions)} to network output layer ')
cls_idx = max(max(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else -1,
max(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else -1)
del_indices = [self.nn.user_metadata['class_mapping']['baselines'][x] for x in del_bls]
del_indices.extend(self.nn.user_metadata['class_mapping']['regions'][x] for x in del_regions)
self.nn.resize_output(cls_idx + len(new_bls) + len(new_regions) -
len(del_bls) - len(del_regions) + 1, del_indices)
# delete old baseline/region types
cls_idx = min(min(self.nn.user_metadata['class_mapping']['baselines'].values()) if self.nn.user_metadata['class_mapping']['baselines'] else np.inf,
min(self.nn.user_metadata['class_mapping']['regions'].values()) if self.nn.user_metadata['class_mapping']['regions'] else np.inf)
bls = {}
for k, v in sorted(self.nn.user_metadata['class_mapping']['baselines'].items(), key=lambda item: item[1]):
if k not in del_bls:
bls[k] = cls_idx
cls_idx += 1
regions = {}
for k, v in sorted(self.nn.user_metadata['class_mapping']['regions'].items(), key=lambda item: item[1]):
if k not in del_regions:
regions[k] = cls_idx
cls_idx += 1
self.nn.user_metadata['class_mapping']['baselines'] = bls
self.nn.user_metadata['class_mapping']['regions'] = regions
# add new baseline/region types
cls_idx -= 1
for c in new_bls:
cls_idx += 1
self.nn.user_metadata['class_mapping']['baselines'][c] = cls_idx
for c in new_regions:
cls_idx += 1
self.nn.user_metadata['class_mapping']['regions'][c] = cls_idx
else:
raise ValueError(f'invalid resize parameter value {self.resize}')
# backfill train_set/val_set mapping if key-equal as the actual
# numbering in the train_set might be different
self.train_set.dataset.class_mapping = self.nn.user_metadata['class_mapping']
self.val_set.dataset.class_mapping = self.nn.user_metadata['class_mapping']
# updates model's hyper params with user-defined ones
self.nn.hyper_params = self.hparams
# change topline/baseline switch
loc = {None: 'centerline',
True: 'topline',
False: 'baseline'}
if 'topline' not in self.nn.user_metadata:
logger.warning(f'Setting baseline location to {loc[self.topline]} from unset model.')
elif self.nn.user_metadata['topline'] != self.topline:
from_loc = loc[self.nn.user_metadata['topline']]
logger.warning(f'Changing baseline location from {from_loc} to {loc[self.topline]}.')
self.nn.user_metadata['topline'] = self.topline
logger.info('Training line types:')
for k, v in self.train_set.dataset.class_mapping['baselines'].items():
logger.info(f' {k}\t{v}\t{self.train_set.dataset.class_stats["baselines"][k]}')
logger.info('Training region types:')
for k, v in self.train_set.dataset.class_mapping['regions'].items():
logger.info(f' {k}\t{v}\t{self.train_set.dataset.class_stats["regions"][k]}')
if len(self.train_set) == 0:
raise ValueError('No valid training data was provided to the train command. Please add valid XML data.')
# set model type metadata field and dump class_mapping
self.nn.model_type = 'segmentation'
self.nn.user_metadata['class_mapping'] = self.val_set.dataset.class_mapping
# for model size/trainable parameter output
self.net = self.nn.nn
torch.set_num_threads(max(self.num_workers, 1))
# set up validation metrics after output classes have been determined
self.val_px_accuracy = MultilabelAccuracy(average='micro', num_labels=self.train_set.dataset.num_classes)
self.val_mean_accuracy = MultilabelAccuracy(average='macro', num_labels=self.train_set.dataset.num_classes)
self.val_mean_iu = MultilabelJaccardIndex(average='macro', num_labels=self.train_set.dataset.num_classes)
self.val_freq_iu = MultilabelJaccardIndex(average='weighted', num_labels=self.train_set.dataset.num_classes)
def train_dataloader(self):
return DataLoader(self.train_set,
batch_size=1,
num_workers=self.num_workers,
shuffle=True,
pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_set,
shuffle=False,
batch_size=1,
num_workers=self.num_workers,
pin_memory=True)
def configure_callbacks(self):
callbacks = []
if self.hparams.quit == 'early':
callbacks.append(EarlyStopping(monitor='val_mean_iu',
mode='max',
patience=self.hparams.lag,
stopping_threshold=1.0))
return callbacks
# configuration of optimizers and learning rate schedulers
# --------------------------------------------------------
#
# All schedulers are created internally with a frequency of step to enable
# batch-wise learning rate warmup. In lr_scheduler_step() calls to the
# scheduler are then only performed at the end of the epoch.
def configure_optimizers(self):
return _configure_optimizer_and_lr_scheduler(self.hparams,
self.nn.nn.parameters(),
len_train_set=len(self.train_set),
loss_tracking_mode='max')
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
# update params
optimizer.step(closure=optimizer_closure)
# linear warmup between 0 and the initial learning rate `lrate` in `warmup`
# steps.
if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup:
lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams.lrate
def lr_scheduler_step(self, scheduler, metric):
if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup:
# step OneCycleLR each batch if not in warmup phase
if isinstance(scheduler, lr_scheduler.OneCycleLR):
scheduler.step()
# step every other scheduler epoch-wise
elif self.trainer.is_last_batch:
scheduler.step()
def _configure_optimizer_and_lr_scheduler(hparams, params, len_train_set=None, loss_tracking_mode='max'):
# XXX: Warmup is not configured here because it needs to be manually done in optimizer_step()
logger.debug(f'Constructing {hparams.optimizer} optimizer (lr: {hparams.lrate}, momentum: {hparams.momentum})')
if hparams.optimizer == 'Adam':
optim = torch.optim.Adam(params, lr=hparams.lrate, weight_decay=hparams.weight_decay)
else:
optim = getattr(torch.optim, hparams.optimizer)(params,
lr=hparams.lrate,
momentum=hparams.momentum,
weight_decay=hparams.weight_decay)
lr_sched = {}
if hparams.schedule == 'exponential':
lr_sched = {'scheduler': lr_scheduler.ExponentialLR(optim, hparams.gamma, last_epoch=hparams.completed_epochs-1),
'interval': 'step'}
elif hparams.schedule == 'cosine':
lr_sched = {'scheduler': lr_scheduler.CosineAnnealingLR(optim, hparams.gamma, last_epoch=hparams.completed_epochs-1),
'interval': 'step'}
elif hparams.schedule == 'step':
lr_sched = {'scheduler': lr_scheduler.StepLR(optim, hparams.step_size, hparams.gamma, last_epoch=hparams.completed_epochs-1),
'interval': 'step'}
elif hparams.schedule == 'reduceonplateau':
lr_sched = {'scheduler': lr_scheduler.ReduceLROnPlateau(optim,
mode=loss_tracking_mode,
factor=hparams.rop_factor,
patience=hparams.rop_patience),
'interval': 'step'}
elif hparams.schedule == '1cycle':
if hparams.epochs <= 0:
raise ValueError('1cycle learning rate scheduler selected but '
'number of epochs is less than 0 '
f'({hparams.epochs}).')
last_epoch = hparams.completed_epochs*len_train_set if hparams.completed_epochs else -1
lr_sched = {'scheduler': lr_scheduler.OneCycleLR(optim,
max_lr=hparams.lrate,
epochs=hparams.epochs,
steps_per_epoch=len_train_set,
last_epoch=last_epoch),
'interval': 'step'}
elif hparams.schedule != 'constant':
raise ValueError(f'Unsupported learning rate scheduler {hparams.schedule}.')
ret = {'optimizer': optim}
if lr_sched:
ret['lr_scheduler'] = lr_sched
if hparams.schedule == 'reduceonplateau':
lr_sched['monitor'] = 'val_metric'
lr_sched['strict'] = False
lr_sched['reduce_on_plateau'] = True
return ret
| 57,490 | 49.742277 | 171 | py |
kraken | kraken-main/kraken/lib/dataset/utils.py | #
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Utility functions for data loading and training of VGSL networks.
"""
import json
import torch
import numbers
import pkg_resources
import torch.nn.functional as F
from functools import partial
from torchvision import transforms
from collections import Counter
from typing import Dict, List, Tuple, Sequence, Any, Union
from kraken.lib.models import TorchSeqRecognizer
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.lineest import CenterNormalizer
from kraken.lib import functional_im_transforms as F_t
__all__ = ['ImageInputTransforms',
'collate_sequences']
import logging
logger = logging.getLogger(__name__)
class ImageInputTransforms(transforms.Compose):
def __init__(self,
batch: int,
height: int,
width: int,
channels: int,
pad: Union[int, Tuple[int, int], Tuple[int, int, int, int]],
valid_norm: bool = True,
force_binarization: bool = False) -> None:
"""
Container for image input transforms for recognition and segmentation
networks.
Args:
batch: mini-batch size
height: height of input image in pixels
width: width of input image in pixels
channels: color channels of input
pad: Amount of padding on horizontal ends of image
valid_norm: Enables/disables baseline normalization as a valid
preprocessing step. If disabled we will fall back to
standard scaling.
force_binarization: Forces binarization of input images using the
nlbin algorithm.
"""
super().__init__(None)
self._scale = (height, width) # type: Tuple[int, int]
self._valid_norm = valid_norm
self._force_binarization = force_binarization
self._batch = batch
self._channels = channels
self.pad = pad
self._create_transforms()
def _create_transforms(self) -> None:
height = self._scale[0]
width = self._scale[1]
self._center_norm = False
self._mode = 'RGB' if self._channels == 3 else 'L'
if height == 1 and width == 0 and self._channels > 3:
perm = (1, 0, 2)
self._scale = (self._channels, 0)
self._channels = 1
if self._valid_norm:
self._center_norm = True
self._mode = 'L'
elif height > 1 and width == 0 and self._channels in (1, 3):
perm = (0, 1, 2)
if self._valid_norm and self._channels == 1:
self._center_norm = True
elif height == 0 and width > 1 and self._channels in (1, 3):
perm = (0, 1, 2)
# fixed height and width image => bicubic scaling of the input image, disable padding
elif height > 0 and width > 0 and self._channels in (1, 3):
perm = (0, 1, 2)
self._pad = 0
elif height == 0 and width == 0 and self._channels in (1, 3):
perm = (0, 1, 2)
self._pad = 0
else:
raise KrakenInputException(f'Invalid input spec {self._batch}, {height}, {width}, {self._channels}, {self._pad}.')
if self._mode != 'L' and self._force_binarization:
raise KrakenInputException(f'Invalid input spec {self._batch}, {height}, {width}, {self._channels}, {self._pad} in '
'combination with forced binarization.')
self.transforms = []
self.transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=self._mode)))
if self._force_binarization:
self.transforms.append(transforms.Lambda(F_t.pil_to_bin))
if self._scale != (0, 0):
if self._center_norm:
lnorm = CenterNormalizer(self._scale[0])
self.transforms.append(transforms.Lambda(partial(F_t.pil_dewarp, lnorm=lnorm)))
self.transforms.append(transforms.Lambda(partial(F_t.pil_to_mode, mode=self._mode)))
else:
self.transforms.append(transforms.Lambda(partial(F_t.pil_fixed_resize, scale=self._scale)))
if self._pad:
self.transforms.append(transforms.Pad(self._pad, fill=255))
self.transforms.append(transforms.ToTensor())
# invert
self.transforms.append(transforms.Lambda(F_t.tensor_invert))
self.transforms.append(transforms.Lambda(partial(F_t.tensor_permute, perm=perm)))
@property
def batch(self) -> int:
"""
Batch size attribute. Ignored.
"""
return self._batch
@batch.setter
def batch(self, batch: int) -> None:
self._batch = batch
@property
def channels(self) -> int:
"""
Channels attribute. Can be either 1 (binary/grayscale), 3 (RGB).
"""
if self._channels not in [1, 3] and self._scale[0] == self._channels:
return 1
else:
return self._channels
@channels.setter
def channels(self, channels: int) -> None:
self._channels = channels
self._create_transforms()
@property
def height(self) -> int:
"""
Desired output image height. If set to 0, image will be rescaled
proportionally with width, if 1 and `channels` is larger than 3 output
will be grayscale and of the height set with the channels attribute.
"""
if self._scale == (1, 0) and self.channels > 3:
return self._channels
else:
return self._scale[0]
@height.setter
def height(self, height: int) -> None:
self._scale = (height, self.scale[1])
self._create_transforms()
@property
def width(self) -> int:
"""
Desired output image width. If set to 0, image will be rescaled
proportionally with height.
"""
return self._scale[1]
@width.setter
def width(self, width: int) -> None:
self._scale = (self._scale[0], width)
self._create_transforms()
@property
def mode(self) -> str:
"""
Imaginary PIL.Image.Image mode of the output tensor. Possible values
are RGB, L, and 1.
"""
return self._mode if not self.force_binarization else '1'
@property
def scale(self) -> Tuple[int, int]:
"""
Desired output shape (height, width) of the image. If any value is set
to 0, image will be rescaled proportionally with height, width, if 1
and `channels` is larger than 3 output will be grayscale and of the
height set with the channels attribute.
"""
if self._scale == (1, 0) and self.channels > 3:
return (self._channels, self._scale[1])
else:
return self._scale
@scale.setter
def scale(self, scale: Tuple[int, int]) -> None:
self._scale = scale
self._create_transforms()
@property
def pad(self) -> int:
"""
Amount of padding around left/right end of image.
"""
return self._pad
@pad.setter
def pad(self, pad: Union[int, Tuple[int, int], Tuple[int, int, int, int]]) -> None:
if not isinstance(pad, (numbers.Number, tuple, list)):
raise TypeError('Got inappropriate padding arg')
self._pad = pad
self._create_transforms()
@property
def valid_norm(self) -> bool:
"""
Switch allowing/disallowing centerline normalization. Even if enabled
won't be applied to 3-channel images.
"""
return self._valid_norm
@valid_norm.setter
def valid_norm(self, valid_norm: bool) -> None:
self._valid_norm = valid_norm
self._create_transforms()
@property
def centerline_norm(self) -> bool:
"""
Attribute indicating if centerline normalization will be applied to
input images.
"""
return self._center_norm
@property
def force_binarization(self) -> bool:
"""
Switch enabling/disabling forced binarization.
"""
return self._force_binarization
@force_binarization.setter
def force_binarization(self, force_binarization: bool) -> None:
self._force_binarization = force_binarization
self._create_transforms()
def global_align(seq1: Sequence[Any], seq2: Sequence[Any]) -> Tuple[int, List[str], List[str]]:
"""
Computes a global alignment of two strings.
Args:
seq1 (Sequence[Any]):
seq2 (Sequence[Any]):
Returns a tuple (distance, list(algn1), list(algn2))
"""
# calculate cost and direction matrix
cost = [[0] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
for i in range(1, len(cost)):
cost[i][0] = i
for i in range(1, len(cost[0])):
cost[0][i] = i
direction = [[(0, 0)] * (len(seq2) + 1) for x in range(len(seq1) + 1)]
direction[0] = [(0, x) for x in range(-1, len(seq2))]
for i in range(-1, len(direction) - 1):
direction[i + 1][0] = (i, 0)
for i in range(1, len(cost)):
for j in range(1, len(cost[0])):
delcost = ((i - 1, j), cost[i - 1][j] + 1)
addcost = ((i, j - 1), cost[i][j - 1] + 1)
subcost = ((i - 1, j - 1), cost[i - 1][j - 1] + (seq1[i - 1] != seq2[j - 1]))
best = min(delcost, addcost, subcost, key=lambda x: x[1])
cost[i][j] = best[1]
direction[i][j] = best[0]
d = cost[-1][-1]
# backtrace
algn1: List[Any] = []
algn2: List[Any] = []
i = len(direction) - 1
j = len(direction[0]) - 1
while direction[i][j] != (-1, 0):
k, m = direction[i][j]
if k == i - 1 and m == j - 1:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, seq2[j - 1])
elif k < i:
algn1.insert(0, seq1[i - 1])
algn2.insert(0, '')
elif m < j:
algn1.insert(0, '')
algn2.insert(0, seq2[j - 1])
i, j = k, m
return d, algn1, algn2
def compute_confusions(algn1: Sequence[str], algn2: Sequence[str]):
"""
Compute confusion matrices from two globally aligned strings.
Args:
align1 (Sequence[str]): sequence 1
align2 (Sequence[str]): sequence 2
Returns:
A tuple (counts, scripts, ins, dels, subs) with `counts` being per-character
confusions, `scripts` per-script counts, `ins` a dict with per script
insertions, `del` an integer of the number of deletions, `subs` per
script substitutions.
"""
counts: Dict[Tuple[str, str], int] = Counter()
with pkg_resources.resource_stream(__name__, 'scripts.json') as fp:
script_map = json.load(fp)
def _get_script(c):
for s, e, n in script_map:
if ord(c) == s or (e and s <= ord(c) <= e):
return n
return 'Unknown'
scripts: Dict[Tuple[str, str], int] = Counter()
ins: Dict[Tuple[str, str], int] = Counter()
dels: int = 0
subs: Dict[Tuple[str, str], int] = Counter()
for u, v in zip(algn1, algn2):
counts[(u, v)] += 1
for k, v in counts.items():
if k[0] == '':
dels += v
else:
script = _get_script(k[0])
scripts[script] += v
if k[1] == '':
ins[script] += v
elif k[0] != k[1]:
subs[script] += v
return counts, scripts, ins, dels, subs
def collate_sequences(batch):
"""
Sorts and pads sequences.
"""
sorted_batch = sorted(batch, key=lambda x: x['image'].shape[2], reverse=True)
seqs = [x['image'] for x in sorted_batch]
seq_lens = torch.LongTensor([seq.shape[2] for seq in seqs])
max_len = seqs[0].shape[2]
seqs = torch.stack([F.pad(seq, pad=(0, max_len-seq.shape[2])) for seq in seqs])
if isinstance(sorted_batch[0]['target'], str):
labels = [x['target'] for x in sorted_batch]
else:
labels = torch.cat([x['target'] for x in sorted_batch]).long()
label_lens = torch.LongTensor([len(x['target']) for x in sorted_batch])
return {'image': seqs, 'target': labels, 'seq_lens': seq_lens, 'target_lens': label_lens}
| 12,846 | 34.00545 | 128 | py |
kraken | kraken-main/kraken/lib/dataset/segmentation.py | #
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Utility functions for data loading and training of VGSL networks.
"""
import json
import torch
import traceback
import numpy as np
import torch.nn.functional as F
import shapely.geometry as geom
from os import path, PathLike
from PIL import Image
from shapely.ops import split
from itertools import groupby
from torchvision import transforms
from collections import defaultdict
from torch.utils.data import Dataset
from typing import Dict, List, Tuple, Sequence, Callable, Any, Union, Literal, Optional
from skimage.draw import polygon
from kraken.lib.xml import parse_alto, parse_page, parse_xml
from kraken.lib.exceptions import KrakenInputException
__all__ = ['BaselineSet']
import logging
logger = logging.getLogger(__name__)
class BaselineSet(Dataset):
"""
Dataset for training a baseline/region segmentation model.
"""
def __init__(self, imgs: Sequence[Union[PathLike, str]] = None,
suffix: str = '.path',
line_width: int = 4,
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
mode: Optional[Literal['path', 'alto', 'page', 'xml']] = 'path',
augmentation: bool = False,
valid_baselines: Sequence[str] = None,
merge_baselines: Dict[str, Sequence[str]] = None,
valid_regions: Sequence[str] = None,
merge_regions: Dict[str, Sequence[str]] = None):
"""
Reads a list of image-json pairs and creates a data set.
Args:
imgs:
suffix: Suffix to attach to image base name to load JSON files
from.
line_width: Height of the baseline in the scaled input.
padding: Tuple of ints containing the left/right, top/bottom
padding of the input images.
target_size: Target size of the image as a (height, width) tuple.
mode: Either path, alto, page, xml, or None. In alto, page, and xml
mode the baseline paths and image data is retrieved from an
ALTO/PageXML file. In `None` mode data is iteratively added
through the `add` method.
augmentation: Enable/disable augmentation.
valid_baselines: Sequence of valid baseline identifiers. If `None`
all are valid.
merge_baselines: Sequence of baseline identifiers to merge. Note
that merging occurs after entities not in valid_*
have been discarded.
valid_regions: Sequence of valid region identifiers. If `None` all
are valid.
merge_regions: Sequence of region identifiers to merge. Note that
merging occurs after entities not in valid_* have
been discarded.
"""
super().__init__()
self.mode = mode
self.im_mode = '1'
self.pad = padding
self.aug = None
self.targets = []
# n-th entry contains semantic of n-th class
self.class_mapping = {'aux': {'_start_separator': 0, '_end_separator': 1}, 'baselines': {}, 'regions': {}}
# keep track of samples that failed to load
self.failed_samples = set()
self.class_stats = {'baselines': defaultdict(int), 'regions': defaultdict(int)}
self.num_classes = 2
self.mbl_dict = merge_baselines if merge_baselines is not None else {}
self.mreg_dict = merge_regions if merge_regions is not None else {}
self.valid_baselines = valid_baselines
self.valid_regions = valid_regions
if mode in ['alto', 'page', 'xml']:
if mode == 'alto':
fn = parse_alto
elif mode == 'page':
fn = parse_page
elif mode == 'xml':
fn = parse_xml
im_paths = []
self.targets = []
for img in imgs:
try:
data = fn(img)
im_paths.append(data['image'])
lines = defaultdict(list)
for line in data['lines']:
if valid_baselines is None or set(line['tags'].values()).intersection(valid_baselines):
tags = set(line['tags'].values()).intersection(valid_baselines) if valid_baselines else line['tags'].values()
for tag in tags:
lines[self.mbl_dict.get(tag, tag)].append(line['baseline'])
self.class_stats['baselines'][self.mbl_dict.get(tag, tag)] += 1
regions = defaultdict(list)
for k, v in data['regions'].items():
if valid_regions is None or k in valid_regions:
regions[self.mreg_dict.get(k, k)].extend(v)
self.class_stats['regions'][self.mreg_dict.get(k, k)] += len(v)
data['regions'] = regions
self.targets.append({'baselines': lines, 'regions': data['regions']})
except KrakenInputException as e:
logger.warning(e)
continue
# get line types
imgs = im_paths
# calculate class mapping
line_types = set()
region_types = set()
for page in self.targets:
for line_type in page['baselines'].keys():
line_types.add(line_type)
for reg_type in page['regions'].keys():
region_types.add(reg_type)
idx = -1
for idx, line_type in enumerate(line_types):
self.class_mapping['baselines'][line_type] = idx + self.num_classes
self.num_classes += idx + 1
idx = -1
for idx, reg_type in enumerate(region_types):
self.class_mapping['regions'][reg_type] = idx + self.num_classes
self.num_classes += idx + 1
elif mode == 'path':
pass
elif mode is None:
imgs = []
else:
raise Exception('invalid dataset mode')
if augmentation:
import cv2
cv2.setNumThreads(0)
from albumentations import (
Compose, ToFloat, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform,
HueSaturationValue,
)
self.aug = Compose([
ToFloat(),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(p=0.1),
], p=0.2),
HueSaturationValue(hue_shift_limit=20, sat_shift_limit=0.1, val_shift_limit=0.1, p=0.3),
], p=0.5)
self.imgs = imgs
self.line_width = line_width
self.transforms = im_transforms
self.seg_type = None
def add(self,
image: Union[PathLike, str, Image.Image],
baselines: List[List[List[Tuple[int, int]]]] = None,
regions: Dict[str, List[List[Tuple[int, int]]]] = None,
*args,
**kwargs):
"""
Adds a page to the dataset.
Args:
im: Path to the whole page image
baseline: A list containing dicts with a list of coordinates
and tags [{'baseline': [[x0, y0], ...,
[xn, yn]], 'tags': ('script_type',)}, ...]
regions: A dict containing list of lists of coordinates
{'region_type_0': [[x0, y0], ..., [xn, yn]]],
'region_type_1': ...}.
"""
if self.mode:
raise Exception(f'The `add` method is incompatible with dataset mode {self.mode}')
baselines_ = defaultdict(list)
for line in baselines:
if self.valid_baselines is None or set(line['tags'].values()).intersection(self.valid_baselines):
tags = set(line['tags'].values()).intersection(self.valid_baselines) if self.valid_baselines else line['tags'].values()
for tag in tags:
baselines_[tag].append(line['baseline'])
self.class_stats['baselines'][tag] += 1
if tag not in self.class_mapping['baselines']:
self.num_classes += 1
self.class_mapping['baselines'][tag] = self.num_classes - 1
regions_ = defaultdict(list)
for k, v in regions.items():
reg_type = self.mreg_dict.get(k, k)
if self.valid_regions is None or reg_type in self.valid_regions:
regions_[reg_type].extend(v)
self.class_stats['baselines'][reg_type] += len(v)
if reg_type not in self.class_mapping['regions']:
self.num_classes += 1
self.class_mapping['regions'][reg_type] = self.num_classes - 1
self.targets.append({'baselines': baselines_, 'regions': regions_})
self.imgs.append(image)
def __getitem__(self, idx):
im = self.imgs[idx]
if self.mode != 'path':
target = self.targets[idx]
else:
with open('{}.path'.format(path.splitext(im)[0]), 'r') as fp:
target = json.load(fp)
if not isinstance(im, Image.Image):
try:
logger.debug(f'Attempting to load {im}')
im = Image.open(im)
im, target = self.transform(im, target)
return {'image': im, 'target': target}
except Exception:
self.failed_samples.add(idx)
idx = np.random.randint(0, len(self.imgs))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[idx]
im, target = self.transform(im, target)
return {'image': im, 'target': target}
@staticmethod
def _get_ortho_line(lineseg, point, line_width, offset):
lineseg = np.array(lineseg)
norm_vec = lineseg[1, ...] - lineseg[0, ...]
norm_vec_len = np.sqrt(np.sum(norm_vec**2))
unit_vec = norm_vec / norm_vec_len
ortho_vec = unit_vec[::-1] * ((1, -1), (-1, 1))
if offset == 'l':
point -= unit_vec * line_width
else:
point += unit_vec * line_width
return (ortho_vec * 10 + point).astype('int').tolist()
def transform(self, image, target):
orig_size = image.size
image = self.transforms(image)
scale = (image.shape[2] - 2*self.pad[1])/orig_size[0]
t = torch.zeros((self.num_classes,) + tuple(np.subtract(image.shape[1:], (2*self.pad[1], 2*self.pad[0]))))
start_sep_cls = self.class_mapping['aux']['_start_separator']
end_sep_cls = self.class_mapping['aux']['_end_separator']
for key, lines in target['baselines'].items():
try:
cls_idx = self.class_mapping['baselines'][key]
except KeyError:
# skip lines of classes not present in the training set
continue
for line in lines:
# buffer out line to desired width
line = [k for k, g in groupby(line)]
line = np.array(line)*scale
shp_line = geom.LineString(line)
split_offset = min(5, shp_line.length/2)
line_pol = np.array(shp_line.buffer(self.line_width/2, cap_style=2).boundary.coords, dtype=int)
rr, cc = polygon(line_pol[:, 1], line_pol[:, 0], shape=image.shape[1:])
t[cls_idx, rr, cc] = 1
split_pt = shp_line.interpolate(split_offset).buffer(0.001)
# top
start_sep = np.array((split(shp_line, split_pt).geoms[0].buffer(self.line_width,
cap_style=3).boundary.coords), dtype=int)
rr_s, cc_s = polygon(start_sep[:, 1], start_sep[:, 0], shape=image.shape[1:])
t[start_sep_cls, rr_s, cc_s] = 1
t[start_sep_cls, rr, cc] = 0
split_pt = shp_line.interpolate(-split_offset).buffer(0.001)
# top
end_sep = np.array((split(shp_line, split_pt).geoms[-1].buffer(self.line_width,
cap_style=3).boundary.coords), dtype=int)
rr_s, cc_s = polygon(end_sep[:, 1], end_sep[:, 0], shape=image.shape[1:])
t[end_sep_cls, rr_s, cc_s] = 1
t[end_sep_cls, rr, cc] = 0
for key, regions in target['regions'].items():
try:
cls_idx = self.class_mapping['regions'][key]
except KeyError:
# skip regions of classes not present in the training set
continue
for region in regions:
region = np.array(region)*scale
rr, cc = polygon(region[:, 1], region[:, 0], shape=image.shape[1:])
t[cls_idx, rr, cc] = 1
target = F.pad(t, self.pad)
if self.aug:
image = image.permute(1, 2, 0).numpy()
target = target.permute(1, 2, 0).numpy()
o = self.aug(image=image, mask=target)
image = torch.tensor(o['image']).permute(2, 0, 1)
target = torch.tensor(o['mask']).permute(2, 0, 1)
return image, target
def __len__(self):
return len(self.imgs)
| 14,843 | 44.673846 | 137 | py |
kraken | kraken-main/kraken/lib/dataset/recognition.py | #
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Utility functions for data loading and training of VGSL networks.
"""
import io
import json
import torch
import traceback
import numpy as np
import pyarrow as pa
from PIL import Image
from os import PathLike
from functools import partial
from torchvision import transforms
from collections import Counter
from torch.utils.data import Dataset
from typing import Dict, List, Tuple, Callable, Optional, Any, Union, Literal
from kraken.lib.util import is_bitonal
from kraken.lib.codec import PytorchCodec
from kraken.lib.segmentation import extract_polygons
from kraken.lib.exceptions import KrakenInputException, KrakenEncodeException
from kraken.lib import functional_im_transforms as F_t
__all__ = ['DefaultAugmenter',
'ArrowIPCRecognitionDataset',
'PolygonGTDataset',
'GroundTruthDataset']
import logging
logger = logging.getLogger(__name__)
class DefaultAugmenter():
def __init__(self):
import cv2
cv2.setNumThreads(0)
from albumentations import (
Compose, ToFloat, OneOf, MotionBlur, MedianBlur, Blur,
ShiftScaleRotate, OpticalDistortion, ElasticTransform,
PixelDropout
)
self._transforms = Compose([
ToFloat(),
PixelDropout(p=0.2),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=1, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
ElasticTransform(alpha=64, sigma=25, alpha_affine=0.25, p=0.1),
], p=0.2),
], p=0.5)
def __call__(self, image):
return self._transforms(image=image)
class ArrowIPCRecognitionDataset(Dataset):
"""
Dataset for training a recognition model from a precompiled dataset in
Arrow IPC format.
"""
def __init__(self,
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
skip_empty_lines: bool = True,
reorder: Union[bool, Literal['L', 'R']] = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
augmentation: bool = False,
split_filter: Optional[str] = None) -> None:
"""
Creates a dataset for a polygonal (baseline) transcription model.
Args:
normalization: Unicode normalization for gt
whitespace_normalization: Normalizes unicode whitespace and strips
whitespace.
skip_empty_lines: Whether to return samples without text.
reorder: Whether to rearrange code points in "display"/LTR order.
Set to L|R to change the default text direction.
im_transforms: Function taking an PIL.Image and returning a tensor
suitable for forward passes.
augmentation: Enables augmentation.
split_filter: Enables filtering of the dataset according to mask
values in the set split. If set to `None` all rows
are sampled, if set to `train`, `validation`, or
`test` only rows with the appropriate flag set in the
file will be considered.
"""
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
self.failed_samples = set()
self.transforms = im_transforms
self.aug = None
self._split_filter = split_filter
self._num_lines = 0
self.arrow_table = None
self.codec = None
self.skip_empty_lines = skip_empty_lines
self.seg_type = None
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
if reorder in ('L', 'R'):
self.text_transforms.append(partial(F_t.text_reorder, base_dir=reorder))
else:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
self.aug = DefaultAugmenter()
self.im_mode = self.transforms.mode
def add(self, file: Union[str, PathLike]) -> None:
"""
Adds an Arrow IPC file to the dataset.
Args:
file: Location of the precompiled dataset file.
"""
# extract metadata and update alphabet
with pa.memory_map(file, 'rb') as source:
ds_table = pa.ipc.open_file(source).read_all()
raw_metadata = ds_table.schema.metadata
if not raw_metadata or b'lines' not in raw_metadata:
raise ValueError(f'{file} does not contain a valid metadata record.')
metadata = json.loads(raw_metadata[b'lines'])
if metadata['type'] == 'kraken_recognition_baseline':
if not self.seg_type:
self.seg_type = 'baselines'
if self.seg_type != 'baselines':
raise ValueError(f'File {file} has incompatible type {metadata["type"]} for dataset with type {self.seg_type}.')
elif metadata['type'] == 'kraken_recognition_bbox':
if not self.seg_type:
self.seg_type = 'bbox'
if self.seg_type != 'bbox':
raise ValueError(f'File {file} has incompatible type {metadata["type"]} for dataset with type {self.seg_type}.')
else:
raise ValueError(f'Unknown type {metadata["type"]} of dataset.')
if self._split_filter and metadata['counts'][self._split_filter] == 0:
logger.warning(f'No explicit split for "{self._split_filter}" in dataset {file} (with splits {metadata["counts"].items()}).')
return
if metadata['im_mode'] > self.im_mode and self.transforms.mode >= metadata['im_mode']:
logger.info(f'Upgrading "im_mode" from {self.im_mode} to {metadata["im_mode"]}.')
self.im_mode = metadata['im_mode']
# centerline normalize raw bbox dataset
if self.seg_type == 'bbox' and metadata['image_type'] == 'raw':
self.transforms.valid_norm = True
self.alphabet.update(metadata['alphabet'])
num_lines = metadata['counts'][self._split_filter] if self._split_filter else metadata['counts']['all']
if self._split_filter:
ds_table = ds_table.filter(ds_table.column(self._split_filter))
if self.skip_empty_lines:
logger.debug('Getting indices of empty lines after text transformation.')
self.skip_empty_lines = False
mask = np.ones(len(ds_table), dtype=bool)
for index in range(len(ds_table)):
try:
text = self._apply_text_transform(ds_table.column('lines')[index].as_py(),)
except KrakenInputException:
mask[index] = False
continue
num_lines = np.count_nonzero(mask)
logger.debug(f'Filtering out {np.count_nonzero(~mask)} empty lines')
if np.any(~mask):
ds_table = ds_table.filter(pa.array(mask))
self.skip_empty_lines = True
if not self.arrow_table:
self.arrow_table = ds_table
else:
self.arrow_table = pa.concat_tables([self.arrow_table, ds_table])
self._num_lines += num_lines
def rebuild_alphabet(self):
"""
Recomputes the alphabet depending on the given text transformation.
"""
self.alphabet = Counter()
for index in range(len(self)):
try:
text = self._apply_text_transform(self.arrow_table.column('lines')[index].as_py(),)
self.alphabet.update(text)
except KrakenInputException:
continue
def _apply_text_transform(self, sample) -> str:
"""
Applies text transform to a sample.
"""
text = sample['text']
for func in self.text_transforms:
text = func(text)
if not text:
logger.debug(f'Text line "{sample["text"]}" is empty after transformations')
if not self.skip_empty_lines:
raise KrakenInputException('empty text line')
return text
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset.
"""
if codec:
self.codec = codec
logger.info(f'Trying to encode dataset with codec {codec}')
for index in range(self._num_lines):
try:
text = self._apply_text_transform(
self.arrow_table.column('lines')[index].as_py(),
)
self.codec.encode(text)
except KrakenEncodeException as e:
raise e
except KrakenInputException:
pass
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
pass
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
try:
sample = self.arrow_table.column('lines')[index].as_py()
logger.debug(f'Loading sample {index}')
im = Image.open(io.BytesIO(sample['im']))
im = self.transforms(im)
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
text = self._apply_text_transform(sample)
except Exception:
self.failed_samples.add(index)
idx = np.random.randint(0, len(self))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[idx]
return {'image': im, 'target': self.codec.encode(text) if self.codec is not None else text}
def __len__(self) -> int:
return self._num_lines
class PolygonGTDataset(Dataset):
"""
Dataset for training a line recognition model from polygonal/baseline data.
"""
def __init__(self,
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
skip_empty_lines: bool = True,
reorder: Union[bool, Literal['L', 'R']] = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
augmentation: bool = False) -> None:
"""
Creates a dataset for a polygonal (baseline) transcription model.
Args:
normalization: Unicode normalization for gt
whitespace_normalization: Normalizes unicode whitespace and strips
whitespace.
skip_empty_lines: Whether to return samples without text.
reorder: Whether to rearrange code points in "display"/LTR order.
Set to L|R to change the default text direction.
im_transforms: Function taking an PIL.Image and returning a tensor
suitable for forward passes.
augmentation: Enables augmentation.
"""
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
self.transforms = im_transforms
self.aug = None
self.skip_empty_lines = skip_empty_lines
self.failed_samples = set()
self.seg_type = 'baselines'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
if reorder in ('L', 'R'):
self.text_transforms.append(partial(F_t.text_reorder, base_dir=reorder))
else:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
self.aug = DefaultAugmenter()
self.im_mode = '1'
def add(self, *args, **kwargs):
"""
Adds a line to the dataset.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(*args, **kwargs)
self._images.append((kwargs['image'], kwargs['baseline'], kwargs['boundary']))
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self,
image: Union[PathLike, str, Image.Image],
text: str,
baseline: List[Tuple[int, int]],
boundary: List[Tuple[int, int]],
*args,
**kwargs):
"""
Parses a sample for the dataset and returns it.
This function is mainly uses for parallelized loading of training data.
Args:
im (path): Path to the whole page image
text (str): Transcription of the line.
baseline (list): A list of coordinates [[x0, y0], ..., [xn, yn]].
boundary (list): A polygon mask for the line.
"""
orig_text = text
for func in self.text_transforms:
text = func(text)
if not text and self.skip_empty_lines:
raise KrakenInputException(f'Text line "{orig_text}" is empty after transformations')
if not baseline:
raise KrakenInputException('No baseline given for line')
if not boundary:
raise KrakenInputException('No boundary given for line')
return {'text': text,
'image': image,
'baseline': baseline,
'boundary': boundary,
'preparse': True}
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0][0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im, _ = next(extract_polygons(im, {'type': 'baselines',
'lines': [{'baseline': item[0][1], 'boundary': item[0][2]}]}))
im = self.transforms(im)
if im.shape[0] == 3:
im_mode = 'RGB'
elif im.shape[0] == 1:
im_mode = 'L'
if is_bitonal(im):
im_mode = '1'
if im_mode > self.im_mode:
logger.info(f'Upgrading "im_mode" from {self.im_mode} to {im_mode}')
self.im_mode = im_mode
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
self.failed_samples.add(index)
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[idx]
def __len__(self) -> int:
return len(self._images)
class GroundTruthDataset(Dataset):
"""
Dataset for training a line recognition model.
All data is cached in memory.
"""
def __init__(self, split: Callable[[Union[PathLike, str]], str] = F_t.default_split,
suffix: str = '.gt.txt',
normalization: Optional[str] = None,
whitespace_normalization: bool = True,
skip_empty_lines: bool = True,
reorder: Union[bool, str] = True,
im_transforms: Callable[[Any], torch.Tensor] = transforms.Compose([]),
augmentation: bool = False) -> None:
"""
Reads a list of image-text pairs and creates a ground truth set.
Args:
split: Function for generating the base name without
extensions from paths
suffix: Suffix to attach to image base name for text
retrieval
mode: Image color space. Either RGB (color) or L
(grayscale/bw). Only L is compatible with vertical
scaling/dewarping.
scale: Target height or (width, height) of dewarped
line images. Vertical-only scaling is through
CenterLineNormalizer, resizing with Lanczos
interpolation. Set to 0 to disable.
normalization: Unicode normalization for gt
whitespace_normalization: Normalizes unicode whitespace and
strips whitespace.
skip_empty_lines: Whether to return samples without text.
reorder: Whether to rearrange code points in "display"/LTR
order. Set to L|R to change the default text
direction.
im_transforms: Function taking an PIL.Image and returning a
tensor suitable for forward passes.
augmentation: Enables augmentation.
"""
self.suffix = suffix
self.split = partial(F_t.suffix_split, split=split, suffix=suffix)
self._images = [] # type: Union[List[Image], List[torch.Tensor]]
self._gt = [] # type: List[str]
self.alphabet = Counter() # type: Counter
self.text_transforms = [] # type: List[Callable[[str], str]]
self.transforms = im_transforms
self.skip_empty_lines = skip_empty_lines
self.aug = None
self.failed_samples = set()
self.seg_type = 'bbox'
# built text transformations
if normalization:
self.text_transforms.append(partial(F_t.text_normalize, normalization=normalization))
if whitespace_normalization:
self.text_transforms.append(F_t.text_whitespace_normalize)
if reorder:
if reorder in ('L', 'R'):
self.text_transforms.append(partial(F_t.text_reorder, base_dir=reorder))
else:
self.text_transforms.append(F_t.text_reorder)
if augmentation:
self.aug = DefaultAugmenter()
self.im_mode = '1'
def add(self, *args, **kwargs) -> None:
"""
Adds a line-image-text pair to the dataset.
Args:
image (str): Input image path
"""
if 'preparse' not in kwargs or not kwargs['preparse']:
kwargs = self.parse(*args, **kwargs)
self._images.append(kwargs['image'])
self._gt.append(kwargs['text'])
self.alphabet.update(kwargs['text'])
def parse(self, image: Union[PathLike, str, Image.Image], *args, **kwargs) -> Dict:
"""
Parses a sample for this dataset.
This is mostly used to parallelize populating the dataset.
Args:
image (str): Input image path
"""
with open(self.split(image), 'r', encoding='utf-8') as fp:
text = fp.read().strip('\n\r')
for func in self.text_transforms:
text = func(text)
if not text and self.skip_empty_lines:
raise KrakenInputException(f'Text line is empty ({fp.name})')
return {'image': image, 'text': text, 'preparse': True}
def encode(self, codec: Optional[PytorchCodec] = None) -> None:
"""
Adds a codec to the dataset and encodes all text lines.
Has to be run before sampling from the dataset.
"""
if codec:
self.codec = codec
else:
self.codec = PytorchCodec(''.join(self.alphabet.keys()))
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], torch.Tensor]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, self.codec.encode(gt)))
def no_encode(self) -> None:
"""
Creates an unencoded dataset.
"""
self.training_set = [] # type: List[Tuple[Union[Image, torch.Tensor], str]]
for im, gt in zip(self._images, self._gt):
self.training_set.append((im, gt))
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
item = self.training_set[index]
try:
logger.debug(f'Attempting to load {item[0]}')
im = item[0]
if not isinstance(im, Image.Image):
im = Image.open(im)
im = self.transforms(im)
if im.shape[0] == 3:
im_mode = 'RGB'
elif im.shape[0] == 1:
im_mode = 'L'
if is_bitonal(im):
im_mode = '1'
if im_mode > self.im_mode:
logger.info(f'Upgrading "im_mode" from {self.im_mode} to {im_mode}')
self.im_mode = im_mode
if self.aug:
im = im.permute((1, 2, 0)).numpy()
o = self.aug(image=im)
im = torch.tensor(o['image'].transpose(2, 0, 1))
return {'image': im, 'target': item[1]}
except Exception:
self.failed_samples.add(index)
idx = np.random.randint(0, len(self.training_set))
logger.debug(traceback.format_exc())
logger.info(f'Failed. Replacing with sample {idx}')
return self[idx]
def __len__(self) -> int:
return len(self._images)
| 23,940 | 40.277586 | 137 | py |
kraken | kraken-main/kraken/lib/pretrain/model.py | #
# Copyright 2022 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Pytorch-lightning modules for recognition model pretraining.
Pretraining is based on an image inpainting surrogate task that aims to
reconstruct randomly sampled masked patches from the initial convolutional
feature maps that have been replaced with a learnable embedding. The model is
trained with a contrastive loss where negative samples are randomly generated
from the unmasked parts of the sequence.
Apart from an improved sampling method the implementation is mostly a faithful
adaptation of:
Vogler, Nikolai, et al. "Lacuna Reconstruction: Self-supervised Pre-training
for Low-Resource Historical Document Transcription." arXiv preprint
arXiv:2112.08692 (2021).
"""
import re
import math
import torch
import logging
import numpy as np
import torch.nn.functional as F
import pytorch_lightning as pl
from os import PathLike
from itertools import chain
from functools import partial
from torch.optim import lr_scheduler
from torch.multiprocessing import Pool
from typing import Dict, Optional, Sequence, Union, Any
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda
from kraken.lib import vgsl, default_specs, layers
from kraken.lib.xml import preparse_xml_data
from kraken.lib.codec import PytorchCodec
from kraken.lib.dataset import (ArrowIPCRecognitionDataset,
GroundTruthDataset, PolygonGTDataset,
ImageInputTransforms, collate_sequences)
from kraken.lib.exceptions import KrakenInputException
from kraken.lib.train import _configure_optimizer_and_lr_scheduler
from kraken.lib.pretrain.layers import Wav2Vec2Mask
from torch.utils.data import DataLoader, random_split, Subset
logger = logging.getLogger(__name__)
def _star_fun(fun, kwargs):
try:
return fun(**kwargs)
except FileNotFoundError as e:
logger.warning(f'{e.strerror}: {e.filename}. Skipping.')
except KrakenInputException as e:
logger.warning(str(e))
return None
class PretrainDataModule(pl.LightningDataModule):
def __init__(self,
training_data: Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]] = None,
evaluation_data: Optional[Union[Sequence[Union[PathLike, str]], Sequence[Dict[str, Any]]]] = None,
partition: Optional[float] = 0.9,
binary_dataset_split: bool = False,
batch_size: int = 4,
height: int = 48,
width: int = 0,
channels: int = 1,
num_workers: int = 1,
repolygonize: bool = False,
force_binarization: bool = False,
format_type: str = 'path',
pad: int = 16,
augment: bool = default_specs.RECOGNITION_PRETRAIN_HYPER_PARAMS['augment']):
"""
A LightningDataModule encapsulating text-less training data for
unsupervised recognition model pretraining.
Args:
training_data:
evaluation_data:
partition:
binary_dataset_split:
batch_size:
num_workers:
force_binarization:
format_type:
augment:
"""
super().__init__()
self.save_hyperparameters()
DatasetClass = GroundTruthDataset
valid_norm = True
if format_type in ['xml', 'page', 'alto']:
logger.info(f'Parsing {len(training_data)} XML files for training data')
training_data = preparse_xml_data(training_data, format_type, repolygonize)
if evaluation_data:
logger.info(f'Parsing {len(evaluation_data)} XML files for validation data')
evaluation_data = preparse_xml_data(evaluation_data, format_type, repolygonize)
if binary_dataset_split:
logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')
binary_dataset_split = False
DatasetClass = PolygonGTDataset
valid_norm = False
elif format_type == 'binary':
DatasetClass = ArrowIPCRecognitionDataset
if repolygonize:
logger.warning('Repolygonization enabled in `binary` mode. Will be ignored.')
valid_norm = False
logger.info(f'Got {len(training_data)} binary dataset files for training data')
training_data = [{'file': file} for file in training_data]
if evaluation_data:
logger.info(f'Got {len(evaluation_data)} binary dataset files for validation data')
evaluation_data = [{'file': file} for file in evaluation_data]
elif format_type == 'path':
if force_binarization:
logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')
if binary_dataset_split:
logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')
binary_dataset_split = False
logger.info(f'Got {len(training_data)} line strip images for training data')
training_data = [{'image': im} for im in training_data]
if evaluation_data:
logger.info(f'Got {len(evaluation_data)} line strip images for validation data')
evaluation_data = [{'image': im} for im in evaluation_data]
valid_norm = True
# format_type is None. Determine training type from length of training data entry
elif not format_type:
if len(training_data[0]) >= 4:
DatasetClass = PolygonGTDataset
valid_norm = False
else:
if force_binarization:
logger.warning('Forced binarization enabled with box lines. Will be ignored.')
force_binarization = False
if repolygonize:
logger.warning('Repolygonization enabled with box lines. Will be ignored.')
if binary_dataset_split:
logger.warning('Internal binary dataset splits are enabled but using non-binary dataset files. Will be ignored.')
binary_dataset_split = False
else:
raise ValueError(f'format_type {format_type} not in [alto, page, xml, path, binary].')
self.transforms = ImageInputTransforms(batch_size,
height,
width,
channels,
(pad, 0),
valid_norm,
force_binarization)
if evaluation_data:
train_set = self._build_dataset(DatasetClass, training_data)
self.train_set = Subset(train_set, range(len(train_set)))
val_set = self._build_dataset(DatasetClass, evaluation_data)
self.val_set = Subset(val_set, range(len(val_set)))
elif binary_dataset_split:
train_set = self._build_dataset(DatasetClass, training_data, split_filter='train')
self.train_set = Subset(train_set, range(len(train_set)))
val_set = self._build_dataset(DatasetClass, training_data, split_filter='validation')
self.val_set = Subset(val_set, range(len(val_set)))
logger.info(f'Found {len(self.train_set)} (train) / {len(self.val_set)} (val) samples in pre-encoded dataset')
else:
train_set = self._build_dataset(DatasetClass, training_data)
train_len = int(len(train_set)*partition)
val_len = len(train_set) - train_len
logger.info(f'No explicit validation data provided. Splitting off '
f'{val_len} (of {len(train_set)}) samples to validation '
'set. (Will disable alphabet mismatch detection.)')
self.train_set, self.val_set = random_split(train_set, (train_len, val_len))
if len(self.train_set) == 0 or len(self.val_set) == 0:
raise ValueError('No valid training data was provided to the train '
'command. Please add valid XML, line, or binary data.')
logger.info(f'Training set {len(self.train_set)} lines, validation set '
f'{len(self.val_set)} lines')
def _build_dataset(self,
DatasetClass,
training_data,
**kwargs):
dataset = DatasetClass(im_transforms=self.transforms,
augmentation=self.hparams.augment,
skip_empty_lines=False,
**kwargs)
if (self.hparams.num_workers and self.hparams.num_workers > 1) and self.hparams.format_type != 'binary':
with Pool(processes=self.hparams.num_workers) as pool:
for im in pool.imap_unordered(partial(_star_fun, dataset.parse), training_data, 5):
logger.debug(f'Adding sample {im} to training set')
if im:
dataset.add(**im)
else:
for im in training_data:
try:
dataset.add(**im)
except KrakenInputException as e:
logger.warning(str(e))
return dataset
def train_dataloader(self):
return DataLoader(self.train_set,
collate_fn=collate_sequences,
batch_size=self.hparams.batch_size,
num_workers=self.hparams.num_workers,
pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val_set,
collate_fn=collate_sequences,
batch_size=self.hparams.batch_size,
num_workers=self.hparams.num_workers,
pin_memory=True)
def setup(self, stage: Optional[str] = None):
self.train_set.dataset.no_encode()
self.val_set.dataset.no_encode()
class RecognitionPretrainModel(pl.LightningModule):
def __init__(self,
hyper_params: Dict[str, Any] = None,
output: str = 'model',
spec: str = default_specs.RECOGNITION_SPEC,
model: Optional[Union[PathLike, str]] = None,
load_hyper_parameters: bool = False,
len_train_set: int = -1):
"""
A LightningModule encapsulating the unsupervised pretraining setup for
a text recognition model.
Setup parameters (load, training_data, evaluation_data, ....) are
named, model hyperparameters (everything in
`kraken.lib.default_specs.RECOGNITION_HYPER_PARAMS`) are in in the
`hyper_params` argument.
Args:
hyper_params (dict): Hyperparameter dictionary containing all fields
from
kraken.lib.default_specs.RECOGNITION_PRETRAIN_HYPER_PARAMS
**kwargs: Setup parameters, i.e. CLI parameters of the train() command.
"""
super().__init__()
hyper_params_ = default_specs.RECOGNITION_PRETRAIN_HYPER_PARAMS
if model:
logger.info(f'Loading existing model from {model} ')
self.nn = vgsl.TorchVGSLModel.load_model(model)
if self.nn.model_type not in [None, 'recognition']:
raise ValueError(f'Model {model} is of type {self.nn.model_type} while `recognition` is expected.')
if load_hyper_parameters:
hp = self.nn.hyper_params
else:
hp = {}
hyper_params_.update(hp)
else:
self.nn = None
if hyper_params:
hyper_params_.update(hyper_params)
self.save_hyperparameters(hyper_params_)
self.model = model
self.output = output
self.len_train_set = len_train_set
self.best_epoch = 0
self.best_metric = math.inf
self.best_model = None
self.val_ce = []
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise ValueError(f'VGSL spec {spec} not bracketed')
self.spec = spec
# preparse input sizes from vgsl string to seed ground truth data set
# sizes and dimension ordering.
if not self.nn:
blocks = spec[1:-1].split(' ')
m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0])
if not m:
raise ValueError(f'Invalid input spec {blocks[0]}')
self.batch, self.height, self.width, self.channels = [int(x) for x in m.groups()]
else:
self.batch, self.channels, self.height, self.width = self.nn.input
if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():
logger.debug('Setting multiprocessing tensor sharing strategy to file_system')
torch.multiprocessing.set_sharing_strategy('file_system')
logger.info('Encoding training set')
def forward(self, x, seq_lens):
return self.net(x, seq_lens)
def _step(self, batch, batch_idx):
try:
# sequence batch
if 'seq_lens' in batch:
output = self.features(batch['image'], batch['seq_lens'])
else:
output = self.features(batch['image'])
# height should be 1 by now
if output[0].size(2) != 1:
raise KrakenInputException('Expected dimension 3 to be 1, actual {}'.format(output[0].size(2)))
mask_output = self.wav2vec2mask(*output)
# run contextual encoder, i.e. recurrent layers
output, seq_lens = self.encoder(mask_output['output'], mask_output['seq_len'])
# unmasked features in encoder output domain
y = mask_output['unmasked_samples']
# negative samples
negatives = mask_output['negative_samples']
N, C, H, W = output.shape
output = output.transpose(1, 3).reshape(-1, W, C)
# masked features after encoder
x = output[mask_output['mask']].reshape_as(y)
mask_n_neg = torch.cat([y.unsqueeze(0), negatives], dim=0)
logits = torch.cosine_similarity(x.float(), mask_n_neg.float(), dim=-1).type_as(x)
targets = logits.new_zeros(logits.size(1) * logits.size(2), dtype=torch.long)
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
logits /= self.hparams.logit_temp
loss = F.cross_entropy(logits, targets)
return logits, targets, loss
except RuntimeError as e:
if is_oom_error(e):
logger.warning('Out of memory error in trainer. Skipping batch and freeing caches.')
garbage_collection_cuda()
else:
raise
def validation_step(self, batch, batch_idx):
o = self._step(batch, batch_idx)
if o is not None:
logits, targets, loss = o
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
_max = logits.argmax(-1) == 0
_min = logits.argmin(-1) == 0
both = _max & _min
corr = _max.long().sum().item() - both.long().sum().item()
self.val_ce.append(loss.cpu())
self.log('CE', loss, on_step=True, on_epoch=True)
def on_validation_epoch_end(self):
ce = np.mean(self.val_ce)
self.val_ce.clear()
if ce < self.best_metric:
logger.debug(f'Updating best metric from {self.best_metric} ({self.best_epoch}) to {ce} ({self.current_epoch})')
self.best_epoch = self.current_epoch
self.best_metric = ce
logger.info(f'validation run: cross_enctropy: {ce}')
self.log('val_ce', ce, on_step=False, on_epoch=True, prog_bar=True, logger=True)
def training_step(self, batch, batch_idx):
o = self._step(batch, batch_idx)
if o is not None:
_, _, loss = o
self.log('CE', loss)
return loss
def configure_optimizers(self):
return _configure_optimizer_and_lr_scheduler(self.hparams,
chain(self.features.parameters(),
self.wav2vec2mask.parameters(),
self.encoder.parameters()),
len_train_set=self.len_train_set,
loss_tracking_mode='min')
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_closure):
# update params
optimizer.step(closure=optimizer_closure)
# linear warmup between 0 and the initial learning rate `lrate` in `warmup`
# steps.
if self.hparams.warmup and self.trainer.global_step < self.hparams.warmup:
lr_scale = min(1.0, float(self.trainer.global_step + 1) / self.hparams.warmup)
for pg in optimizer.param_groups:
pg["lr"] = lr_scale * self.hparams.lrate
def lr_scheduler_step(self, scheduler, metric):
if not self.hparams.warmup or self.trainer.global_step >= self.hparams.warmup:
# step OneCycleLR each batch if not in warmup phase
if isinstance(scheduler, lr_scheduler.OneCycleLR):
scheduler.step()
# step every other scheduler epoch-wise
elif self.trainer.is_last_batch:
scheduler.step()
def setup(self, stage: Optional[str] = None):
# finalize models in case of appending/loading
if stage in [None, 'fit']:
if self.model:
self.spec = self.nn.spec
else:
logger.info(f'Creating new model {self.spec}')
self.nn = vgsl.TorchVGSLModel(self.spec)
# initialize weights
self.nn.init_weights()
self.net = self.nn.nn
for idx, layer in enumerate(self.net.children()):
if isinstance(layer, layers.TransposedSummarizingRNN):
break
self.features = self.net[:idx]
if self.model and 'wav2vec2mask' in self.nn.aux_layers:
logger.info('Extracting wav2vec2mask layer from model: mask width '
f'{self.nn.aux_layers["wav2vec2mask"].mask_width}, prob '
f'{self.nn.aux_layers["wav2vec2mask"].mask_prob}, negative samples '
f'{self.nn.aux_layers["wav2vec2mask"].num_negatives}')
self.wav2vec2mask = self.nn.aux_layers['wav2vec2mask']
logger.info("Overriding masking hyperparameters with model one's: ")
self.hparams.mask_width = self.wav2vec2mask.mask_width
self.hparams.mask_mask_prob = self.wav2vec2mask.mask_prob
self.hparams.num_negatives = self.wav2vec2mask.num_negatives
else:
logger.info(f'Instantiating new wav2vec2mask layer: mask width '
f'{self.hparams.mask_width}, prob '
f'{self.hparams.mask_prob}, negative samples '
f'{self.hparams.num_negatives}')
self.wav2vec2mask = Wav2Vec2Mask(self.net[idx-1].output_shape[1],
self.net[-1].output_shape[1],
self.hparams.mask_width,
self.hparams.mask_prob,
self.hparams.num_negatives)
self.nn.aux_layers = {'wav2vec2mask': self.wav2vec2mask}
# add dummy codec and output layer
if not self.nn.codec and not isinstance(self.net[-1], layers.LinSoftmax):
logger.info('Adding dummy codec and output layer to model')
self.nn.add_codec(PytorchCodec(' '))
self.nn.append(len(self.net), "[O1c2]")
self.encoder = self.net[idx:]
self.nn.hyper_params = self.hparams
self.nn.model_type = 'recognition'
def configure_callbacks(self):
callbacks = []
if self.hparams.quit == 'early':
callbacks.append(EarlyStopping(monitor='CE',
mode='min',
patience=self.hparams.lag,
stopping_threshold=0.0))
return callbacks
| 21,793 | 44.404167 | 133 | py |
kraken | kraken-main/kraken/lib/pretrain/layers.py | """
Layers for VGSL models
"""
import torch
from typing import Tuple, Optional
from torch.nn import Module, Embedding, Linear
from kraken.lib.vgsl import VGSLBlock
from kraken.lib.pretrain.util import compute_mask_indices, sample_negatives
# all tensors are ordered NCHW, the "feature" dimension is C, so the output of
# an LSTM will be put into C same as the filters of a CNN.
__all__ = ['Wav2Vec2Mask']
class Wav2Vec2Mask(Module):
"""
A layer for Wav2Vec2-style masking. Needs to be placed just before
recurrent/contextual layers.
"""
def __init__(self,
context_encoder_input_dim: int,
final_dim: int,
mask_width: int,
mask_prob: float,
num_negatives: int) -> None:
"""
Args:
context_encoder_input_dim: size of the `C` input dimension
final_dim: size of the decoder `C` output dimension just before the
final linear projection.
mask_width: width of the non-overlapping masked areas.
mask_prob: probability of masking at each time step
num_negatives: number of negative samples with width mask_width *
num_masks
Shape:
- Inputs: :math:`(N, C, H, W)` where `N` batches, `C` channels, `H`
height, and `W` width.
- Outputs output :math:`(N, C, H, W)`
"""
super().__init__()
self.context_encoder_input_dim = context_encoder_input_dim
self.final_dim = final_dim
self.mask_width = mask_width
self.mask_prob = mask_prob
self.num_negatives = num_negatives
# mask embedding replacing the masked out areas
self.mask_emb = Embedding(1, context_encoder_input_dim)
self.project_q = Linear(context_encoder_input_dim, final_dim)
def forward(self, inputs: torch.Tensor, seq_len: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
N, C, H, W = inputs.shape
if H != 1:
raise Exception(f'Height has to be 1, not {H} for Wav2Vec2 masking layer.')
# NCHW -> NWC
inputs = inputs.transpose(1, 3).reshape(-1, W, C)
mask_indices = compute_mask_indices((N, W), self.mask_prob, self.mask_width)
mask_indices = torch.from_numpy(mask_indices).to(inputs.device)
unmasked_features = inputs.clone()
# mask out
inputs[mask_indices] = self.mask_emb.weight
# project into same dimensionality as final recurrent layer
unmasked_features = self.project_q(unmasked_features)
unmasked_samples = unmasked_features[mask_indices].view(unmasked_features.size(0), -1, unmasked_features.size(-1))
# negative samples
negative_samples = sample_negatives(unmasked_samples, unmasked_samples.size(1), self.num_negatives)
# NWC -> NCHW
inputs = inputs.permute(0, 2, 1).unsqueeze(2)
return {'output': inputs,
'unmasked_samples': unmasked_samples,
'negative_samples': negative_samples,
'seq_len': seq_len,
'mask': mask_indices}
def get_shape(self, input: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
"""
Calculates the output shape from input 4D tuple NCHW.
"""
return input
def get_spec(self, name) -> "VGSLBlock":
"""
Generates a VGSL spec block from the layer instance.
"""
return f'[1,{self.final_dim},0,{self.context_encoder_input_dim} W{{{name}}}{self.final_dim},{self.mask_width},{self.mask_prob},{self.num_negatives}]'
def deserialize(self, name: str, spec) -> None:
"""
Sets the weights of an initialized module from a CoreML protobuf spec.
"""
# extract embedding parameters
emb = [x for x in spec.neuralNetwork.layers if x.name == '{}_wave2vec2_emb'.format(name)][0].embedding
weights = torch.Tensor(emb.weights.floatValue).resize_as_(self.mask_emb.weight.data)
self.mask_emb.weight = torch.nn.Parameter(weights)
# extract linear projection parameters
lin = [x for x in spec.neuralNetwork.layers if x.name == '{}_wave2vec2_lin'.format(name)][0].innerProduct
weights = torch.Tensor(lin.weights.floatValue).resize_as_(self.project_q.weight.data)
bias = torch.Tensor(lin.bias.floatValue)
self.project_q.weight = torch.nn.Parameter(weights)
self.project_q.bias = torch.nn.Parameter(bias)
def serialize(self, name: str, input: str, builder):
"""
Serializes the module using a NeuralNetworkBuilder.
"""
wave2vec2_emb_name = f'{name}_wave2vec2_emb'
builder.add_embedding(wave2vec2_emb_name, self.mask_emb.weight.data.numpy(),
None,
self.context_encoder_input_dim, self.mask_width,
has_bias=False, input_name=input, output_name=wave2vec2_emb_name)
wave2vec2_lin_name = f'{name}_wave2vec2_lin'
builder.add_inner_product(wave2vec2_lin_name, self.project_q.weight.data.numpy(),
self.project_q.bias.data.numpy(),
self.context_encoder_input_dim, self.final_dim,
has_bias=True, input_name=input, output_name=wave2vec2_lin_name)
return name
| 5,457 | 41.640625 | 157 | py |
kraken | kraken-main/kraken/lib/pretrain/util.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence, Union, Tuple
import torch
import random
import numpy as np
def positive_integers_with_sum(n, total):
ls = [0]
rv = []
while len(ls) < n:
c = random.randint(0, total)
ls.append(c)
ls = sorted(ls)
ls.append(total)
for i in range(1, len(ls)):
rv.append(ls[i] - ls[i-1])
return rv
def compute_masks(mask_prob: int,
mask_width: int,
num_neg_samples: int,
seq_lens: Union[torch.Tensor, Sequence[int]]):
"""
Samples num_mask non-overlapping random masks of length mask_width in
sequence of length seq_len.
Args:
mask_prob: Probability of each individual token being chosen as start
of a masked sequence. Overall number of masks num_masks is
mask_prob * sum(seq_lens) / mask_width.
mask_width: width of each mask
num_neg_samples: Number of samples from unmasked sequence parts (gets
multiplied by num_mask)
seq_lens: sequence lengths
Returns:
An index array containing 1 for masked bits, 2 for negative samples,
the number of masks, and the actual number of negative samples.
"""
mask_samples = np.zeros(sum(seq_lens))
num_masks = int(mask_prob * sum(seq_lens.numpy()) // mask_width)
num_neg_samples = num_masks * num_neg_samples
num_masks += num_neg_samples
indices = [x+mask_width for x in positive_integers_with_sum(num_masks, sum(seq_lens)-num_masks*mask_width)]
start = 0
mask_slices = []
for i in indices:
i_start = random.randint(start, i+start-mask_width)
mask_slices.append(slice(i_start, i_start+mask_width))
start += i
neg_idx = random.sample(range(len(mask_slices)), num_neg_samples)
neg_slices = [mask_slices.pop(idx) for idx in sorted(neg_idx, reverse=True)]
mask_samples[np.r_[tuple(mask_slices)]] = 1
mask_samples[np.r_[tuple(neg_slices)]] = 2
return mask_samples, num_masks - num_neg_samples, num_neg_samples
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def sample_negatives(y, num_samples, num_neg_samples: int):
B, W, C = y.shape
y = y.view(-1, C) # BTC => (BxT)C
with torch.no_grad():
tszs = (buffered_arange(num_samples).unsqueeze(-1).expand(-1, num_neg_samples).flatten())
neg_idxs = torch.randint(low=0, high=W - 1, size=(B, num_neg_samples * num_samples))
neg_idxs[neg_idxs >= tszs] += 1
for i in range(1, B):
neg_idxs[i] += i * W
negs = y[neg_idxs.view(-1)]
negs = negs.view(B, num_samples, num_neg_samples, C).permute(2, 0, 1, 3) # to NxBxTxC
return negs
def compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int = 4, mask_min_space: int = 2) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller.
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(mask_prob * all_sz / float(mask_length) + np.random.rand())
mask_idcs = []
for i in range(bsz):
# import ipdb; ipdb.set_trace()
sz = all_sz
num_mask = all_num_mask
lengths = np.full(num_mask, mask_length)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - mask_min_space >= keep_length:
new_parts.append((s, span_start - mask_min_space + 1))
if e - span_start - keep_length - mask_min_space > keep_length:
new_parts.append((span_start + length + mask_min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + mask_min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
# make sure all masks are the same length in the batch by removing masks
# if they are greater than the min length mask
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
assert len(mask_idc) == min_len
mask[i, mask_idc] = True
return mask
| 5,805 | 33.975904 | 128 | py |
kraken | kraken-main/tests/test_layers.py | # -*- coding: utf-8 -*-
import unittest
import torch
from kraken.lib import layers
class TestLayers(unittest.TestCase):
"""
Testing custom layer implementations.
"""
def setUp(self):
torch.set_grad_enabled(False)
def test_maxpool(self):
"""
Test maximum pooling layer.
"""
mp = layers.MaxPool((3, 3), (2, 2))
o = mp(torch.randn(1, 2, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 15, 31))
def test_1d_dropout(self):
"""
Test 1d dropout layer.
"""
do = layers.Dropout(0.2, 1)
o = do(torch.randn(1, 2, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 32, 64))
def test_2d_dropout(self):
"""
Test 2d dropout layer.
"""
do = layers.Dropout(0.2, 2)
o = do(torch.randn(1, 2, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 32, 64))
def test_forward_rnn_layer_x(self):
"""
Test unidirectional RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 32, 64))
def test_forward_rnn_layer_y(self):
"""
Test unidirectional RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', True, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 32, 64))
def test_forward_rnn_layer_x_summarize(self):
"""
Test unidirectional summarizing RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', False, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 32, 1))
def test_forward_rnn_layer_y_summarize(self):
"""
Test unidirectional summarizing RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'f', True, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 2, 1, 64))
def test_bidi_rnn_layer_x(self):
"""
Test bidirectional RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', False, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 4, 32, 64))
def test_bidi_rnn_layer_y(self):
"""
Test bidirectional RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', True, False)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 4, 32, 64))
def test_bidi_rnn_layer_x_summarize(self):
"""
Test bidirectional summarizing RNN layer in x-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', False, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 4, 32, 1))
def test_bidi_rnn_layer_y_summarize(self):
"""
Test bidirectional summarizing RNN layer in y-dimension.
"""
rnn = layers.TransposedSummarizingRNN(10, 2, 'b', True, True)
o = rnn(torch.randn(1, 10, 32, 64))
self.assertEqual(o[0].shape, (1, 4, 1, 64))
def test_linsoftmax(self):
"""
Test basic function of linear layer.
"""
lin = layers.LinSoftmax(20, 10)
o = lin(torch.randn(1, 20, 12, 24))
self.assertEqual(o[0].shape, (1, 10, 12, 24))
def test_linsoftmax_train(self):
"""
Test function of linear layer in training mode (log_softmax)
"""
lin = layers.LinSoftmax(20, 10).train()
o = lin(torch.randn(1, 20, 12, 24))
self.assertLess(o[0].max(), 0)
def test_linsoftmax_test(self):
"""
Test function of linear layer in eval mode (softmax)
"""
lin = layers.LinSoftmax(20, 10).eval()
o = lin(torch.randn(1, 20, 12, 24))
self.assertGreaterEqual(o[0].min(), 0)
def test_linsoftmax_aug(self):
"""
Test basic function of linear layer with 1-augmentation.
"""
lin = layers.LinSoftmax(20, 10, True)
o = lin(torch.randn(1, 20, 12, 24))
self.assertEqual(o[0].shape, (1, 10, 12, 24))
def test_linsoftmax_aug_train(self):
"""
Test function of linear layer in training mode (log_softmax) with 1-augmentation
"""
lin = layers.LinSoftmax(20, 10, True).train()
o = lin(torch.randn(1, 20, 12, 24))
self.assertLess(o[0].max(), 0)
def test_linsoftmax_aug_test(self):
"""
Test function of linear layer in eval mode (softmax) with 1-augmentation
"""
lin = layers.LinSoftmax(20, 10, True).eval()
o = lin(torch.randn(1, 20, 12, 24))
self.assertGreaterEqual(o[0].min(), 0)
def test_actconv2d_lin(self):
"""
Test convolutional layer without activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'l')
o = conv(torch.randn(1, 5, 24, 12))
self.assertEqual(o[0].shape, (1, 12, 24, 12))
def test_actconv2d_train_sigmoid(self):
"""
Test convolutional layer with sigmoid activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 's')
o = conv(torch.randn(1, 5, 24, 12))
conv.train()
self.assertFalse(0 <= o[0].min() <= 1)
self.assertFalse(0 <= o[0].max() <= 1)
def test_actconv2d_eval_sigmoid(self):
"""
Test convolutional layer with sigmoid activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 's')
conv.eval()
o = conv(torch.randn(1, 5, 24, 12))
self.assertTrue(0 <= o[0].min() <= 1)
self.assertTrue(0 <= o[0].max() <= 1)
def test_actconv2d_tanh(self):
"""
Test convolutional layer with tanh activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 't')
o = conv(torch.randn(1, 5, 24, 12))
self.assertTrue(-1 <= o[0].min() <= 1)
self.assertTrue(-1 <= o[0].max() <= 1)
def test_actconv2d_softmax(self):
"""
Test convolutional layer with softmax activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'm')
o = conv(torch.randn(1, 5, 24, 12))
self.assertTrue(0 <= o[0].min() <= 1)
self.assertTrue(0 <= o[0].max() <= 1)
def test_actconv2d_relu(self):
"""
Test convolutional layer with relu activation.
"""
conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 'r')
o = conv(torch.randn(1, 5, 24, 12))
self.assertLessEqual(0, o[0].min())
self.assertLessEqual(0, o[0].max())
def test_linsoftmax_resize_add(self):
"""
Tests resizing of a fully connected layer.
"""
lin = layers.LinSoftmax(20, 10)
w_cp = lin.lin.weight.clone()
b_cp = lin.lin.bias.clone()
lin.resize(25)
self.assertTrue(w_cp.eq(lin.lin.weight[:10, :]).all())
self.assertTrue(b_cp.eq(lin.lin.bias[:10]).all())
self.assertTrue(lin.lin.weight.shape[0] == 25)
self.assertTrue(lin.lin.bias.shape[0] == 25)
def test_linsoftmax_resize_remove(self):
"""
Tests resizing of a fully connected layer.
"""
lin = layers.LinSoftmax(20, 10)
w_cp = lin.lin.weight.clone()
b_cp = lin.lin.bias.clone()
lin.resize(5, (1, 5, 6, 7, 9))
self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(lin.lin.weight).all())
self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(lin.lin.bias).all())
def test_linsoftmax_resize_both(self):
"""
Tests resizing of a fully connected layer.
"""
lin = layers.LinSoftmax(20, 10)
w_cp = lin.lin.weight.clone()
b_cp = lin.lin.bias.clone()
lin.resize(25, (1, 5, 6, 7, 9))
self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(lin.lin.weight[:5, :]).all())
self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(lin.lin.bias[:5]).all())
self.assertTrue(lin.lin.weight.shape[0] == 25)
self.assertTrue(lin.lin.bias.shape[0] == 25)
def test_conv_resize_add(self):
"""
Tests resizing of a convolutional output layer.
"""
conv = layers.ActConv2D(20, 10, (1, 1), (1, 1))
w_cp = conv.co.weight.clone()
b_cp = conv.co.bias.clone()
conv.resize(25)
self.assertTrue(w_cp.eq(conv.co.weight[:10, :]).all())
self.assertTrue(b_cp.eq(conv.co.bias[:10]).all())
self.assertTrue(conv.co.weight.shape[0] == 25)
self.assertTrue(conv.co.bias.shape[0] == 25)
def test_conv_resize_remove(self):
"""
Tests resizing of a convolutional output layer.
"""
conv = layers.ActConv2D(20, 10, (1, 1), (1, 1))
w_cp = conv.co.weight.clone()
b_cp = conv.co.bias.clone()
conv.resize(5, (1, 5, 6, 7, 9))
self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(conv.co.weight).all())
self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(conv.co.bias).all())
def test_conv_resize_both(self):
"""
Tests resizing of a convolutional output layer.
"""
conv = layers.ActConv2D(20, 10, (1, 1), (1, 1))
w_cp = conv.co.weight.clone()
b_cp = conv.co.bias.clone()
conv.resize(25, (1, 5, 6, 7, 9))
self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(conv.co.weight[:5, :]).all())
self.assertTrue(b_cp[(0, 2, 3, 4, 8),].eq(conv.co.bias[:5]).all())
self.assertTrue(conv.co.weight.shape[0] == 25)
self.assertTrue(conv.co.bias.shape[0] == 25)
| 9,708 | 33.675 | 88 | py |
kraken | kraken-main/tests/test_vgsl.py | # -*- coding: utf-8 -*-
import unittest
from pytest import raises
import os
import torch
import tempfile
from kraken.lib import vgsl, layers
class TestVGSL(unittest.TestCase):
"""
Testing VGSL module
"""
def test_helper_train(self):
"""
Tests train/eval mode helper methods
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.train()
self.assertTrue(torch.is_grad_enabled())
self.assertTrue(rnn.nn.training)
rnn.eval()
self.assertFalse(torch.is_grad_enabled())
self.assertFalse(rnn.nn.training)
@unittest.skip('works randomly on ci')
def test_helper_threads(self):
"""
Test openmp threads helper method.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.set_num_threads(4)
self.assertEqual(torch.get_num_threads(), 4)
def test_save_model(self):
"""
Test model serialization.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
with tempfile.TemporaryDirectory() as dir:
rnn.save_model(dir + '/foo.mlmodel')
self.assertTrue(os.path.exists(dir + '/foo.mlmodel'))
def test_append(self):
"""
Test appending one VGSL spec to another.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.append(1, '[Cr1,1,2 Gn2 Cr3,3,4]')
self.assertEqual(rnn.spec, '[1,1,0,48 Lbx{L_0}10 Cr{C_1}1,1,2 Gn{Gn_2}2 Cr{C_3}3,3,4]')
def test_resize(self):
"""
Tests resizing of output layers.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.resize_output(80)
self.assertEqual(rnn.nn[-1].lin.out_features, 80)
def test_del_resize(self):
"""
Tests resizing of output layers with entry deletion.
"""
rnn = vgsl.TorchVGSLModel('[1,1,0,48 Lbx10 Do O1c57]')
rnn.resize_output(80, [2, 4, 5, 6, 7, 12, 25])
self.assertEqual(rnn.nn[-1].lin.out_features, 80)
def test_nested_serial_model(self):
"""
Test the creation of a nested serial model.
"""
net = vgsl.TorchVGSLModel('[1,48,0,1 Cr4,2,1,4,2 ([Cr4,2,1,1,1 Do Cr3,3,2,1,1] [Cr4,2,1,1,1 Cr3,3,2,1,1 Do]) S1(1x0)1,3 Lbx2 Do0.5 Lbx2]')
self.assertIsInstance(net.nn[1], layers.MultiParamParallel)
for x in net.nn[1].children():
self.assertIsInstance(x, layers.MultiParamSequential)
self.assertEqual(len(x), 3)
def test_parallel_model_inequal(self):
"""
Test proper raising of ValueError when parallel layers do not have the same output shape.
"""
with raises(ValueError):
net = vgsl.TorchVGSLModel('[1,48,0,1 Cr4,2,1,4,2 [Cr4,2,1,1,1 (Cr4,2,1,4,2 Cr3,3,2,1,1) S1(1x0)1,3 Lbx2 Do0.5] Lbx2]')
def test_complex_serialization(self):
"""
Test proper serialization and deserialization of a complex model.
"""
net = vgsl.TorchVGSLModel('[1,48,0,1 Cr4,2,1,4,2 ([Cr4,2,1,1,1 Do Cr3,3,2,1,1] [Cr4,2,1,1,1 Cr3,3,2,1,1 Do]) S1(1x0)1,3 Lbx2 Do0.5 Lbx2]')
| 3,156 | 33.692308 | 146 | py |
kraken | kraken-main/tests/test_codec.py | # -*- coding: utf-8 -*-
import unittest
from pytest import raises
from torch import IntTensor
from kraken.lib import codec
from kraken.lib.exceptions import KrakenEncodeException, KrakenCodecException
class TestCodec(unittest.TestCase):
"""
Testing codec mapping routines
"""
def setUp(self):
# codec mapping one code point to one label
self.o2o_codec = codec.PytorchCodec('ab')
self.o2o_codec_strict = codec.PytorchCodec('ab', strict=True)
# codec mapping many code points to one label
self.m2o_codec = codec.PytorchCodec(['aaa' , 'aa', 'a', 'b'])
self.m2o_codec_strict = codec.PytorchCodec(['aaa' , 'aa', 'a', 'b'], strict=True)
# codec mapping one code point to many labels
self.o2m_codec = codec.PytorchCodec({'a': [10, 11, 12], 'b': [12, 45, 80]})
self.o2m_codec_strict = codec.PytorchCodec({'a': [10, 11, 12], 'b': [12, 45, 80]}, strict=True)
# codec mapping many code points to many labels
self.m2m_codec = codec.PytorchCodec({'aaa': [10, 11, 12], 'aa': [9, 9], 'a': [11], 'bb': [15], 'b': [12]})
self.m2m_codec_strict = codec.PytorchCodec({'aaa': [10, 11, 12], 'aa': [9, 9], 'a': [11], 'bb': [15], 'b': [12]}, strict=True)
self.invalid_c_sequence = 'aaababbcaaa'
self.valid_c_sequence = 'aaababbaaabbbb'
self.ada_sequence = 'cdaabae'
self.invalid_l_sequence = [(45, 78, 778, 0.3793492615638364),
(10, 203, 859, 0.9485075253700872),
(11, 70, 601, 0.7885297329523855),
(12, 251, 831, 0.7216817042926938),
(900, 72, 950, 0.27609823017048707)]
def test_o2o_encode(self):
"""
Test correct encoding of one-to-one code point sequence
"""
self.assertTrue(self.o2o_codec.encode(self.valid_c_sequence).eq(
IntTensor([1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 2, 2, 2, 2])).all())
def test_m2o_encode(self):
"""
Test correct encoding of many-to-one code point sequence
"""
self.assertTrue(self.m2o_codec.encode(self.valid_c_sequence).eq(
IntTensor([3, 4, 1, 4, 4, 3, 4, 4, 4, 4])).all())
def test_o2m_encode(self):
"""
Test correct encoding of one-to-many code point sequence
"""
self.assertTrue(self.o2m_codec.encode(self.valid_c_sequence).eq(
IntTensor([10, 11, 12, 10, 11, 12, 10, 11, 12,
12, 45, 80, 10, 11, 12, 12, 45, 80, 12, 45,
80, 10, 11, 12, 10, 11, 12, 10, 11, 12, 12,
45, 80, 12, 45, 80, 12, 45, 80, 12, 45,
80])).all())
def test_m2m_encode(self):
"""
Test correct encoding of many-to-many code point sequence
"""
self.assertTrue(self.m2m_codec.encode(self.valid_c_sequence).eq(
IntTensor([10, 11, 12, 12, 11, 15, 10, 11, 12, 15, 15])).all())
def test_o2o_decode(self):
"""
Test correct decoding of one-to-one label sequence
"""
self.assertEqual(''.join(x[0] for x in self.o2o_codec.decode([(1, 288, 652, 0.8537325587315542),
(1, 120, 861, 0.4968470297302481),
(1, 372, 629, 0.008650773294205938),
(2, 406, 831, 0.15637985875540783),
(1, 3, 824, 0.26475146828232776),
(2, 228, 959, 0.3062689368044844),
(2, 472, 679, 0.8677848554329698),
(1, 482, 771, 0.6055591197109657),
(1, 452, 606, 0.40744265053745055),
(1, 166, 879, 0.7509269177978337),
(2, 92, 729, 0.34554103785480306),
(2, 227, 959, 0.3006394689033981),
(2, 341, 699, 0.07798704843315862),
(2, 142, 513, 0.9933850573241767)])),
'aaababbaaabbbb')
def test_m2o_decode(self):
"""
Test correct decoding of many-to-one label sequence
"""
self.assertEqual(''.join(x[0] for x in self.m2o_codec.decode([(3, 28, 967, 0.07761440833942468),
(4, 282, 565, 0.4946281412618093),
(1, 411, 853, 0.7767301050586806),
(4, 409, 501, 0.47915609540996495),
(4, 299, 637, 0.7755889399450564),
(3, 340, 834, 0.726656062406549),
(4, 296, 846, 0.2274859668684881),
(4, 238, 695, 0.32982930128257815),
(4, 187, 970, 0.43354272748701805),
(4, 376, 863, 0.24483897879550764)])),
'aaababbaaabbbb')
def test_o2m_decode(self):
"""
Test correct decoding of one-to-many label sequence
"""
self.assertEqual(''.join(x[0] for x in self.o2m_codec.decode([(10, 35, 959, 0.43819571289990644),
(11, 361, 904, 0.1801115018592916),
(12, 15, 616, 0.5987506334315549),
(10, 226, 577, 0.6178248939780698),
(11, 227, 814, 0.31531097360327787),
(12, 390, 826, 0.7706594984014595),
(10, 251, 579, 0.9442530315305507),
(11, 269, 870, 0.4475979925584944),
(12, 456, 609, 0.9396137478409995),
(12, 60, 757, 0.06416607235266458),
(45, 318, 918, 0.8129458423341515),
(80, 15, 914, 0.49773432435726517),
(10, 211, 648, 0.7919220961861382),
(11, 326, 804, 0.7852387442556333),
(12, 93, 978, 0.9376801123379804),
(12, 23, 698, 0.915543635886972),
(45, 71, 599, 0.8137750423628737),
(80, 167, 980, 0.6501035181890226),
(12, 259, 823, 0.3122860659712233),
(45, 312, 948, 0.20582589628806058),
(80, 430, 694, 0.3528792552966924),
(10, 470, 866, 0.0685524032330419),
(11, 459, 826, 0.39354887700146846),
(12, 392, 926, 0.4102018609185847),
(10, 271, 592, 0.1877915301623876),
(11, 206, 995, 0.21614062190981576),
(12, 466, 648, 0.3106914763314057),
(10, 368, 848, 0.28715379701274113),
(11, 252, 962, 0.5535299604896257),
(12, 387, 709, 0.844810014550603),
(12, 156, 916, 0.9803695305965802),
(45, 150, 555, 0.5969071330809561),
(80, 381, 922, 0.5608300913697513),
(12, 35, 762, 0.5227506455088722),
(45, 364, 931, 0.7205481732247938),
(80, 341, 580, 0.536934566913969),
(12, 79, 919, 0.5136066153481802),
(45, 377, 773, 0.6507467790760987),
(80, 497, 931, 0.7635100185309783),
(12, 76, 580, 0.9542477438586341),
(45, 37, 904, 0.4299813924853797),
(80, 425, 638, 0.6825047210425983)])),
'aaababbaaabbbb')
def test_m2m_decode(self):
"""
Test correct decoding of many-to-many label sequence
"""
self.assertEqual(''.join(x[0] for x in self.m2m_codec.decode([(10, 313, 788, 0.9379917930525369),
(11, 117, 793, 0.9974374577004185),
(12, 50, 707, 0.020074164253385374),
(12, 382, 669, 0.525910770170754),
(10, 458, 833, 0.4292373233167248),
(15, 45, 831, 0.5759709886686226),
(10, 465, 729, 0.8492104897235935),
(11, 78, 800, 0.24733538459309445),
(12, 375, 872, 0.26908722769105353),
(15, 296, 889, 0.44251812620463726),
(15, 237, 930, 0.5456105208117391)])),
'aaabbbaaabbbb')
def test_o2o_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (one-to-one decoder)
"""
self.assertEqual(self.o2o_codec.decode(self.invalid_l_sequence), [])
def test_m2o_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (many-to-one decoder)
"""
self.assertEqual(self.m2o_codec.decode(self.invalid_l_sequence), [])
def test_o2m_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (one-to-many decoder)
"""
self.assertEqual(self.o2m_codec.decode(self.invalid_l_sequence),
[('a', 203, 831, 0.8195729875383888)])
def test_m2m_decode_invalid_nonstrict(self):
"""
Test correct handling of undecodable sequences (many-to-many decoder)
"""
self.assertEqual(self.m2m_codec.decode(self.invalid_l_sequence),
[('a', 203, 831, 0.8195729875383888),
('a', 203, 831, 0.8195729875383888),
('a', 203, 831, 0.8195729875383888)])
def test_o2o_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (one-to-one encoder)
"""
self.assertTrue(self.o2o_codec.encode(self.invalid_c_sequence).eq(
IntTensor([1, 1, 1, 2, 1, 2, 2, 1, 1, 1])).all())
def test_m2o_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (many-to-one encoder)
"""
self.assertTrue(self.m2o_codec.encode(self.invalid_c_sequence).eq(
IntTensor([3, 4, 1, 4, 4, 3])).all())
def test_o2m_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (one-to-many encoder)
"""
self.assertTrue(self.o2m_codec.encode(self.invalid_c_sequence).eq(
IntTensor([10, 11, 12, 10, 11, 12, 10, 11, 12, 12, 45,
80, 10, 11, 12, 12, 45, 80, 12, 45, 80, 10,
11, 12, 10, 11, 12, 10, 11, 12])).all())
def test_m2m_encode_invalid_nonstrict(self):
"""
Test correct handling of noisy character sequences (many-to-many encoder)
"""
self.assertTrue(self.m2m_codec.encode(self.invalid_c_sequence).eq(
IntTensor([10, 11, 12, 12, 11, 15, 10, 11, 12])).all())
def test_o2o_decode_invalid(self):
"""
Test correct handling of undecodable sequences (one-to-one decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2o_codec_strict.decode(self.invalid_l_sequence)
def test_m2o_decode_invalid(self):
"""
Test correct handling of undecodable sequences (many-to-one decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2o_codec_strict.decode(self.invalid_l_sequence)
def test_o2m_decode_invalid(self):
"""
Test correct handling of undecodable sequences (one-to-many decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2m_codec_strict.decode(self.invalid_l_sequence)
def test_m2m_decode_invalid(self):
"""
Test correct handling of undecodable sequences (many-to-many decoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2m_codec_strict.decode(self.invalid_l_sequence)
def test_o2o_encode_invalid(self):
"""
Test correct handling of unencodable sequences (one-to-one encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2o_codec_strict.encode(self.invalid_c_sequence)
def test_m2o_encode_invalid(self):
"""
Test correct handling of unencodable sequences (many-to-one encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2o_codec_strict.encode(self.invalid_c_sequence)
def test_o2m_encode_invalid(self):
"""
Test correct handling of unencodable sequences (one-to-many encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.o2m_codec_strict.encode(self.invalid_c_sequence)
def test_m2m_encode_invalid(self):
"""
Test correct handling of unencodable sequences (many-to-many encoder) in strict mode
"""
with raises(KrakenEncodeException):
self.m2m_codec_strict.encode(self.invalid_c_sequence)
def test_codec_add_simple(self):
"""
Test adding of new code points to codec.
"""
prev_len = len(self.o2o_codec)
codec = self.o2o_codec.add_labels('cde')
self.assertEqual(len(codec), prev_len + 3)
self.assertTrue(codec.encode(self.ada_sequence).eq(
IntTensor([3, 4, 1, 1, 2, 1, 5])).all())
def test_codec_add_list(self):
"""
Test adding of new code points to codec.
"""
prev_len = len(self.o2o_codec)
codec = self.o2o_codec.add_labels(['cd', 'e'])
self.assertEqual(len(codec), prev_len + 2)
self.assertTrue(codec.encode(self.ada_sequence).eq(
IntTensor([3, 1, 1, 2, 1, 4])).all())
def test_codec_add_dict(self):
"""
Test adding of new code points to codec.
"""
prev_len = len(self.o2o_codec)
codec = self.o2o_codec.add_labels({'cd': [3], 'e': [4]})
self.assertEqual(len(codec), prev_len + 2)
self.assertTrue(codec.encode(self.ada_sequence).eq(
IntTensor([3, 1, 1, 2, 1, 4])).all())
def test_codec_merge_both(self):
"""
Test merging of a codec adding and removing code points
"""
merge_codec = codec.PytorchCodec('acde')
new_codec, del_labels = self.o2o_codec.merge(merge_codec)
self.assertEqual(del_labels, {2})
self.assertEqual(new_codec.c2l, {'a': [1], 'c': [2], 'd': [3], 'e': [4]})
def test_codec_merge_add(self):
"""
Test merging of a codec adding and removing code points
"""
merge_codec = codec.PytorchCodec('abcde')
new_codec, del_labels = self.o2o_codec.merge(merge_codec)
self.assertEqual(del_labels, set())
self.assertEqual(new_codec.c2l, {'a': [1], 'b': [2], 'c': [3], 'd': [4], 'e': [5]})
def test_codec_merge_remove(self):
"""
Test merging of a codec removing code points
"""
merge_codec = codec.PytorchCodec('a')
new_codec, del_labels = self.o2o_codec.merge(merge_codec)
self.assertEqual(del_labels, {2})
self.assertEqual(new_codec.c2l, {'a': [1]})
| 18,763 | 53.231214 | 134 | py |
SSL4MIS | SSL4MIS-master/code/test_urpc_util.py | import math
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from skimage.measure import label
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y_main, y_aux1, y_aux2, y_aux3 = net(test_patch)
# ensemble
y_main = torch.softmax(y_main, dim=1)
y_aux1 = torch.softmax(y_aux1, dim=1)
y_aux2 = torch.softmax(y_aux2, dim=1)
y_aux3 = torch.softmax(y_aux3, dim=1)
y = y_main
# y = (y_main+y_aux1+y_aux2+y_aux3)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, method="unet_3D", test_list="full_test.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24, test_save_path=None):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes - 1, 4))
print("Testing begin")
with open(test_save_path + "/{}.txt".format(method), "a") as f:
for image_path in tqdm(image_list):
ids = image_path.split("/")[-1].replace(".h5", "")
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
metric = calculate_metric_percase(prediction == 1, label == 1)
total_metric[0, :] += metric
f.writelines("{},{},{},{},{}\n".format(
ids, metric[0], metric[1], metric[2], metric[3]))
pred_itk = sitk.GetImageFromArray(prediction.astype(np.uint8))
pred_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(pred_itk, test_save_path +
"/{}_pred.nii.gz".format(ids))
img_itk = sitk.GetImageFromArray(image)
img_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(img_itk, test_save_path +
"/{}_img.nii.gz".format(ids))
lab_itk = sitk.GetImageFromArray(label.astype(np.uint8))
lab_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(lab_itk, test_save_path +
"/{}_lab.nii.gz".format(ids))
f.writelines("Mean metrics,{},{},{},{}".format(total_metric[0, 0] / len(image_list), total_metric[0, 1] / len(
image_list), total_metric[0, 2] / len(image_list), total_metric[0, 3] / len(image_list)))
f.close()
print("Testing end")
return total_metric / len(image_list)
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction == i)
label_tmp = (label == i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / \
(np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
ravd = abs(metric.binary.ravd(pred, gt))
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return np.array([dice, ravd, hd, asd])
else:
return np.zeros(4)
| 6,418 | 38.623457 | 169 | py |
SSL4MIS | SSL4MIS-master/code/train_interpolation_consistency_training_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Interpolation_Consistency_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=300,
help='labeled data')
parser.add_argument('--ict_alpha', type=int, default=0.2,
help='ict_alpha')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
labeled_volume_batch = volume_batch[:args.labeled_bs]
# ICT mix factors
ict_mix_factors = np.random.beta(
args.ict_alpha, args.ict_alpha, size=(args.labeled_bs//2, 1, 1, 1))
ict_mix_factors = torch.tensor(
ict_mix_factors, dtype=torch.float).cuda()
unlabeled_volume_batch_0 = unlabeled_volume_batch[0:args.labeled_bs//2, ...]
unlabeled_volume_batch_1 = unlabeled_volume_batch[args.labeled_bs//2:, ...]
# Mix images
batch_ux_mixed = unlabeled_volume_batch_0 * \
(1.0 - ict_mix_factors) + \
unlabeled_volume_batch_1 * ict_mix_factors
input_volume_batch = torch.cat(
[labeled_volume_batch, batch_ux_mixed], dim=0)
outputs = model(input_volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output_ux0 = torch.softmax(
ema_model(unlabeled_volume_batch_0), dim=1)
ema_output_ux1 = torch.softmax(
ema_model(unlabeled_volume_batch_1), dim=1)
batch_pred_mixed = ema_output_ux0 * \
(1.0 - ict_mix_factors) + ema_output_ux1 * ict_mix_factors
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:] - batch_pred_mixed) ** 2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
image = batch_ux_mixed[1, 0:1, :, :]
writer.add_image('train/Mixed_Unlabeled',
image, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,984 | 41.996689 | 108 | py |
SSL4MIS | SSL4MIS-master/code/test_3D.py | import argparse
import os
import shutil
from glob import glob
import torch
from networks.unet_3D import unet_3D
from test_3D_util import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTS2019/Interpolation_Consistency_Training_25', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
def Inference(FLAGS):
snapshot_path = "../model/{}/{}".format(FLAGS.exp, FLAGS.model)
num_classes = 2
test_save_path = "../model/{}/Prediction".format(FLAGS.exp)
if os.path.exists(test_save_path):
shutil.rmtree(test_save_path)
os.makedirs(test_save_path)
net = unet_3D(n_classes=num_classes, in_channels=1).cuda()
save_mode_path = os.path.join(
snapshot_path, '{}_best_model.pth'.format(FLAGS.model))
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
avg_metric = test_all_case(net, base_dir=FLAGS.root_path, method=FLAGS.model, test_list="test.txt", num_classes=num_classes,
patch_size=(96, 96, 96), stride_xy=64, stride_z=64, test_save_path=test_save_path)
return avg_metric
if __name__ == '__main__':
FLAGS = parser.parse_args()
metric = Inference(FLAGS)
print(metric)
| 1,495 | 34.619048 | 128 | py |
SSL4MIS | SSL4MIS-master/code/train_adversarial_network_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.discriminator import FCDiscriminator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Adversarial_Network', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--DAN_lr', type=float, default=0.0001,
help='DAN learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
DAN = FCDiscriminator(num_classes=num_classes)
DAN = DAN.cuda()
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
DAN_optimizer = optim.Adam(
DAN.parameters(), lr=args.DAN_lr, betas=(0.9, 0.99))
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
DAN_target = torch.tensor([0] * args.batch_size).cuda()
DAN_target[:args.labeled_bs] = 1
model.train()
DAN.eval()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
DAN_outputs = DAN(
outputs_soft[args.labeled_bs:], volume_batch[args.labeled_bs:])
consistency_loss = F.cross_entropy(
DAN_outputs, (DAN_target[:args.labeled_bs]).long())
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
DAN.train()
with torch.no_grad():
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
DAN_outputs = DAN(outputs_soft, volume_batch)
DAN_loss = F.cross_entropy(DAN_outputs, DAN_target.long())
DAN_optimizer.zero_grad()
DAN_loss.backward()
DAN_optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,787 | 40.507042 | 108 | py |
SSL4MIS | SSL4MIS-master/code/train_mean_teacher_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
def create_model(ema=False):
# Network definition
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
ema_output_soft = torch.softmax(ema_output, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:] - ema_output_soft)**2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,142 | 40.890977 | 126 | py |
SSL4MIS | SSL4MIS-master/code/train_interpolation_consistency_training_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTS2019_Interpolation_Consistency_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=14,
help='labeled data')
parser.add_argument('--total_labeled_num', type=int, default=140,
help='total labeled data')
parser.add_argument('--ict_alpha', type=int, default=0.2,
help='ict_alpha')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
def create_model(ema=False):
# Network definition
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, args.total_labeled_num))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
labeled_volume_batch = volume_batch[:args.labeled_bs]
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
# ICT mix factors
ict_mix_factors = np.random.beta(
args.ict_alpha, args.ict_alpha, size=(args.labeled_bs//2, 1, 1, 1, 1))
ict_mix_factors = torch.tensor(
ict_mix_factors, dtype=torch.float).cuda()
unlabeled_volume_batch_0 = unlabeled_volume_batch[0:1, ...]
unlabeled_volume_batch_1 = unlabeled_volume_batch[1:2, ...]
# Mix images
batch_ux_mixed = unlabeled_volume_batch_0 * \
(1.0 - ict_mix_factors) + \
unlabeled_volume_batch_1 * ict_mix_factors
input_volume_batch = torch.cat(
[labeled_volume_batch, batch_ux_mixed], dim=0)
outputs = model(input_volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output_ux0 = torch.softmax(
ema_model(unlabeled_volume_batch_0), dim=1)
ema_output_ux1 = torch.softmax(
ema_model(unlabeled_volume_batch_1), dim=1)
batch_pred_mixed = ema_output_ux0 * \
(1.0 - ict_mix_factors) + ema_output_ux1 * ict_mix_factors
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:] - batch_pred_mixed)**2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=32, stride_z=32)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,251 | 41.989474 | 126 | py |
SSL4MIS | SSL4MIS-master/code/val_3D.py | import math
from glob import glob
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y1 = net(test_patch)
# ensemble
y = torch.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, test_list="full_test.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes-1, 2))
print("Validation begin")
for image_path in tqdm(image_list):
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
for i in range(1, num_classes):
total_metric[i-1, :] += cal_metric(label == i, prediction == i)
print("Validation end")
return total_metric / len(image_list)
| 4,073 | 36.722222 | 130 | py |
SSL4MIS | SSL4MIS-master/code/train_fully_supervised_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import BaseDataSets, RandomGenerator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume, test_single_volume_ds
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Fully_Supervised', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--labeled_num', type=int, default=50,
help='labeled data')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=labeled_slice, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs, label_batch[:].long())
loss_dice = dice_loss(outputs_soft, label_batch.unsqueeze(1))
loss = 0.5 * (loss_dice + loss_ce)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 8,999 | 40.09589 | 117 | py |
SSL4MIS | SSL4MIS-master/code/train_regularized_dropout_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Regularized_Dropout', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=4.0, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model1 = create_model()
model2 = create_model()
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance1 = 0.0
best_performance2 = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
model1_loss = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
model2_loss = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
r_drop_loss = losses.compute_kl_loss(outputs1[args.labeled_bs:], outputs2[args.labeled_bs:])
loss = model1_loss + model2_loss + consistency_weight * r_drop_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer1.param_groups:
param_group['lr'] = lr_
for param_group in optimizer2.param_groups:
param_group['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
writer.add_scalar('loss/r_drop_loss',
r_drop_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f r_drop_loss: %f' % (iter_num, model1_loss.item(), model2_loss.item(), r_drop_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs1, dim=1), dim=1, keepdim=True)
writer.add_image('train/model1_Prediction',
outputs[1, ...] * 50, iter_num)
outputs = torch.argmax(torch.softmax(
outputs2, dim=1), dim=1, keepdim=True)
writer.add_image('train/model2_Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance1 = np.mean(metric_list, axis=0)[0]
mean_hd951 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model1_val_mean_dice', performance1, iter_num)
writer.add_scalar('info/model1_val_mean_hd95', mean_hd951, iter_num)
if performance1 > best_performance1:
best_performance1 = performance1
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
logging.info(
'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951))
model1.train()
model2.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance2 = np.mean(metric_list, axis=0)[0]
mean_hd952 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model2_val_mean_dice', performance2, iter_num)
writer.add_scalar('info/model2_val_mean_hd95', mean_hd952, iter_num)
if performance2 > best_performance2:
best_performance2 = performance2
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
logging.info(
'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,926 | 42.141618 | 165 | py |
SSL4MIS | SSL4MIS-master/code/train_cross_pseudo_supervision_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Cross_Pseudo_Supervision', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
net1 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
net2 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
model1 = kaiming_normal_init_weight(net1)
model2 = xavier_normal_init_weight(net2)
model1.train()
model2.train()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
best_performance1 = 0.0
best_performance2 = 0.0
iter_num = 0
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
pseudo_outputs1 = torch.argmax(outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_outputs2 = torch.argmax(outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_supervision1 = ce_loss(outputs1[args.labeled_bs:], pseudo_outputs2)
pseudo_supervision2 = ce_loss(outputs2[args.labeled_bs:], pseudo_outputs1)
model1_loss = loss1 + consistency_weight * pseudo_supervision1
model2_loss = loss2 + consistency_weight * pseudo_supervision2
loss = model1_loss + model2_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group1 in optimizer1.param_groups:
param_group1['lr'] = lr_
for param_group2 in optimizer2.param_groups:
param_group2['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
logging.info(
'iteration %d : model1 loss : %f model2 loss : %f' % (iter_num, model1_loss.item(), model2_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft1[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model1_Predicted_label',
grid_image, iter_num)
image = outputs_soft2[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model2_Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
avg_metric1 = test_all_case(
model1, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric1[:, 0].mean() > best_performance1:
best_performance1 = avg_metric1[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
writer.add_scalar('info/model1_val_dice_score',
avg_metric1[0, 0], iter_num)
writer.add_scalar('info/model1_val_hd95',
avg_metric1[0, 1], iter_num)
logging.info(
'iteration %d : model1_dice_score : %f model1_hd95 : %f' % (
iter_num, avg_metric1[0, 0].mean(), avg_metric1[0, 1].mean()))
model1.train()
model2.eval()
avg_metric2 = test_all_case(
model2, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric2[:, 0].mean() > best_performance2:
best_performance2 = avg_metric2[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
writer.add_scalar('info/model2_val_dice_score',
avg_metric2[0, 0], iter_num)
writer.add_scalar('info/model2_val_hd95',
avg_metric2[0, 1], iter_num)
logging.info(
'iteration %d : model2_dice_score : %f model2_hd95 : %f' % (
iter_num, avg_metric2[0, 0].mean(), avg_metric2[0, 1].mean()))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,030 | 42.574534 | 120 | py |
SSL4MIS | SSL4MIS-master/code/val_urpc_util.py | import math
from glob import glob
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y1, _, _, _ = net(test_patch)
# ensemble
y = torch.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, test_list="val.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes-1, 2))
print("Validation begin")
for image_path in tqdm(image_list):
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
for i in range(1, num_classes):
total_metric[i-1, :] += cal_metric(label == i, prediction == i)
print("Validation end")
return total_metric / len(image_list)
| 4,076 | 36.75 | 124 | py |
SSL4MIS | SSL4MIS-master/code/test_2D_fully.py | import argparse
import os
import shutil
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
from medpy import metric
from scipy.ndimage import zoom
from scipy.ndimage.interpolation import zoom
from tqdm import tqdm
# from networks.efficientunet import UNet
from networks.net_factory import net_factory
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Fully_Supervised', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
dice = metric.binary.dc(pred, gt)
asd = metric.binary.asd(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95, asd
def test_single_volume(case, net, test_save_path, FLAGS):
h5f = h5py.File(FLAGS.root_path + "/data/{}.h5".format(case), 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (256 / x, 256 / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
if FLAGS.model == "unet_urds":
out_main, _, _, _ = net(input)
else:
out_main = net(input)
out = torch.argmax(torch.softmax(
out_main, dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / 256, y / 256), order=0)
prediction[ind] = pred
first_metric = calculate_metric_percase(prediction == 1, label == 1)
second_metric = calculate_metric_percase(prediction == 2, label == 2)
third_metric = calculate_metric_percase(prediction == 3, label == 3)
img_itk = sitk.GetImageFromArray(image.astype(np.float32))
img_itk.SetSpacing((1, 1, 10))
prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32))
prd_itk.SetSpacing((1, 1, 10))
lab_itk = sitk.GetImageFromArray(label.astype(np.float32))
lab_itk.SetSpacing((1, 1, 10))
sitk.WriteImage(prd_itk, test_save_path + case + "_pred.nii.gz")
sitk.WriteImage(img_itk, test_save_path + case + "_img.nii.gz")
sitk.WriteImage(lab_itk, test_save_path + case + "_gt.nii.gz")
return first_metric, second_metric, third_metric
def Inference(FLAGS):
with open(FLAGS.root_path + '/test.list', 'r') as f:
image_list = f.readlines()
image_list = sorted([item.replace('\n', '').split(".")[0]
for item in image_list])
snapshot_path = "../model/{}_{}_labeled/{}".format(
FLAGS.exp, FLAGS.labeled_num, FLAGS.model)
test_save_path = "../model/{}_{}_labeled/{}_predictions/".format(
FLAGS.exp, FLAGS.labeled_num, FLAGS.model)
if os.path.exists(test_save_path):
shutil.rmtree(test_save_path)
os.makedirs(test_save_path)
net = net_factory(net_type=FLAGS.model, in_chns=1,
class_num=FLAGS.num_classes)
save_mode_path = os.path.join(
snapshot_path, '{}_best_model.pth'.format(FLAGS.model))
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
first_total = 0.0
second_total = 0.0
third_total = 0.0
for case in tqdm(image_list):
first_metric, second_metric, third_metric = test_single_volume(
case, net, test_save_path, FLAGS)
first_total += np.asarray(first_metric)
second_total += np.asarray(second_metric)
third_total += np.asarray(third_metric)
avg_metric = [first_total / len(image_list), second_total /
len(image_list), third_total / len(image_list)]
return avg_metric
if __name__ == '__main__':
FLAGS = parser.parse_args()
metric = Inference(FLAGS)
print(metric)
print((metric[0]+metric[1]+metric[2])/3)
| 4,413 | 36.40678 | 76 | py |
SSL4MIS | SSL4MIS-master/code/train_deep_co_training_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.discriminator import FCDiscriminator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Deep_Co_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
rot_times = random.randrange(0,4)
rotated_unlabeled_volume_batch = torch.rot90(unlabeled_volume_batch, rot_times, [2,3])
unlabeled_rot_outputs = model(rotated_unlabeled_volume_batch)
unlabeled_rot_outputs_soft = torch.softmax(unlabeled_rot_outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = 0.5 * (torch.mean((unlabeled_rot_outputs_soft.detach() - torch.rot90(outputs_soft[args.labeled_bs:], rot_times, [2,3]))**2) + torch.mean((unlabeled_rot_outputs_soft - torch.rot90(outputs_soft[args.labeled_bs:].detach(), rot_times, [2,3]))**2))
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,350 | 41.354478 | 274 | py |
SSL4MIS | SSL4MIS-master/code/train_entropy_minimization_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Entropy_Minimization', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = losses.entropy_loss(outputs_soft, C=2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 10,388 | 41.060729 | 126 | py |
SSL4MIS | SSL4MIS-master/code/train_cross_consistency_training_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import BaseDataSets, RandomGenerator, TwoStreamBatchSampler
from utils import losses, metrics, ramps
from val_2D import test_single_volume_ds
from networks.net_factory import net_factory
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Consistency_Training', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_cct', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
kl_distance = nn.KLDivLoss(reduction='none')
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs, outputs_aux1, outputs_aux2, outputs_aux3 = model(
volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
outputs_aux1_soft = torch.softmax(outputs_aux1, dim=1)
outputs_aux2_soft = torch.softmax(outputs_aux2, dim=1)
outputs_aux3_soft = torch.softmax(outputs_aux3, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux1 = ce_loss(outputs_aux1[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux2 = ce_loss(outputs_aux2[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux3 = ce_loss(outputs_aux3[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux1 = dice_loss(
outputs_aux1_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux2 = dice_loss(
outputs_aux2_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux3 = dice_loss(
outputs_aux3_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = (loss_ce + loss_ce_aux1 + loss_ce_aux2 + loss_ce_aux3 +
loss_dice + loss_dice_aux1 + loss_dice_aux2 + loss_dice_aux3) / 8
consistency_weight = get_current_consistency_weight(iter_num // 150)
consistency_loss_aux1 = torch.mean(
(outputs_soft[args.labeled_bs:] - outputs_aux1_soft[args.labeled_bs:]) ** 2)
consistency_loss_aux2 = torch.mean(
(outputs_soft[args.labeled_bs:] - outputs_aux2_soft[args.labeled_bs:]) ** 2)
consistency_loss_aux3 = torch.mean(
(outputs_soft[args.labeled_bs:] - outputs_aux3_soft[args.labeled_bs:]) ** 2)
consistency_loss = (consistency_loss_aux1 + consistency_loss_aux2 + consistency_loss_aux3) / 3
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume_ds(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes - 1):
writer.add_scalar('info/val_{}_dice'.format(class_i + 1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i + 1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,274 | 43.154676 | 108 | py |
SSL4MIS | SSL4MIS-master/code/train_fixmatch_cta.py | import argparse
import logging
import os
import re
import random
import shutil
import sys
import time
from xml.etree.ElementInclude import default_loader
from more_itertools import sample
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.distributions import Categorical
from torchvision import transforms
from torchvision.utils import make_grid, save_image
from tqdm import tqdm
import augmentations
from PIL import Image
from dataloaders import utils
from dataloaders.dataset import (
BaseDataSets,
CTATransform,
RandomGenerator,
TwoStreamBatchSampler,
WeakStrongAugment,
)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps, util
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument("--root_path", type=str, default="../data/ACDC", help="Name of Experiment")
parser.add_argument("--exp", type=str, default="ACDC/FixMatch", help="experiment_name")
parser.add_argument("--model", type=str, default="unet", help="model_name")
parser.add_argument("--max_iterations", type=int, default=30000, help="maximum epoch number to train")
parser.add_argument("--batch_size", type=int, default=24, help="batch_size per gpu")
parser.add_argument("--deterministic", type=int, default=1, help="whether use deterministic training")
parser.add_argument("--base_lr", type=float, default=0.01, help="segmentation network learning rate")
parser.add_argument("--patch_size", type=list, default=[256, 256], help="patch size of network input")
parser.add_argument("--seed", type=int, default=1337, help="random seed")
parser.add_argument("--num_classes", type=int, default=4, help="output channel of network")
parser.add_argument("--load", default=False, action="store_true", help="restore previous checkpoint")
parser.add_argument(
"--conf_thresh",
type=float,
default=0.8,
help="confidence threshold for using pseudo-labels",
)
parser.add_argument("--labeled_bs", type=int, default=12, help="labeled_batch_size per gpu")
# parser.add_argument('--labeled_num', type=int, default=136,
parser.add_argument("--labeled_num", type=int, default=7, help="labeled data")
# costs
parser.add_argument("--ema_decay", type=float, default=0.99, help="ema_decay")
parser.add_argument("--consistency_type", type=str, default="mse", help="consistency_type")
parser.add_argument("--consistency", type=float, default=0.1, help="consistency")
parser.add_argument("--consistency_rampup", type=float, default=200.0, help="consistency_rampup")
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {
"3": 68,
"7": 136,
"14": 256,
"21": 396,
"28": 512,
"35": 664,
"140": 1312,
}
elif "Prostate":
ref_dict = {
"2": 27,
"4": 53,
"8": 120,
"12": 179,
"16": 256,
"21": 312,
"42": 623,
}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# teacher network: ema_model
# student network: model
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
def refresh_policies(db_train, cta):
db_train.ops_weak = cta.policy(probe=False, weak=True)
db_train.ops_strong = cta.policy(probe=False, weak=False)
logging.info(f"\nWeak Policy: {db_train.ops_weak}")
logging.info(f"Strong Policy: {db_train.ops_strong}")
cta = augmentations.ctaugment.CTAugment()
transform = CTATransform(args.patch_size, cta)
# sample initial weak and strong augmentation policies (CTAugment)
ops_weak = cta.policy(probe=False, weak=True)
ops_strong = cta.policy(probe=False, weak=False)
db_train = BaseDataSets(
base_dir=args.root_path,
split="train",
num=None,
transform=transform,
ops_weak=ops_weak,
ops_strong=ops_strong,
)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
model = create_model()
ema_model = create_model(ema=True)
iter_num = 0
start_epoch = 0
# instantiate optimizers
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
# if restoring previous models:
if args.load:
try:
# check if there is previous progress to be restored:
logging.info(f"Snapshot path: {snapshot_path}")
iter_num = []
for filename in os.listdir(snapshot_path):
if "model_iter" in filename:
basename, extension = os.path.splitext(filename)
iter_num.append(int(basename.split("_")[2]))
iter_num = max(iter_num)
for filename in os.listdir(snapshot_path):
if "model_iter" in filename and str(iter_num) in filename:
model_checkpoint = filename
except Exception as e:
logging.warning(f"Error finding previous checkpoints: {e}")
try:
logging.info(f"Restoring model checkpoint: {model_checkpoint}")
model, optimizer, start_epoch, performance = util.load_checkpoint(
snapshot_path + "/" + model_checkpoint, model, optimizer
)
logging.info(f"Models restored from iteration {iter_num}")
except Exception as e:
logging.warning(f"Unable to restore model checkpoint: {e}, using new model")
trainloader = DataLoader(
db_train,
batch_sampler=batch_sampler,
num_workers=4,
pin_memory=True,
worker_init_fn=worker_init_fn,
)
valloader = DataLoader(db_val, batch_size=1, shuffle=False, num_workers=1)
model.train()
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + "/log")
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iter_num = int(iter_num)
iterator = tqdm(range(start_epoch, max_epoch), ncols=70)
for epoch_num in iterator:
# track mean error for entire epoch
epoch_errors = []
# refresh augmentation policies with each new epoch
refresh_policies(db_train, cta)
for i_batch, sampled_batch in enumerate(trainloader):
weak_batch, strong_batch, label_batch = (
sampled_batch["image_weak"],
sampled_batch["image_strong"],
sampled_batch["label_aug"],
)
weak_batch, strong_batch, label_batch = (
weak_batch.cuda(),
strong_batch.cuda(),
label_batch.cuda(),
)
# handle unfavorable cropping
non_zero_ratio = torch.count_nonzero(label_batch) / (24 * 256 * 256)
if non_zero_ratio <= 0.02:
logging.info("Refreshing policy...")
refresh_policies(db_train, cta)
continue
# model preds
outputs_weak = model(weak_batch)
outputs_weak_soft = torch.softmax(outputs_weak, dim=1)
outputs_strong = model(strong_batch)
outputs_strong_soft = torch.softmax(outputs_strong, dim=1)
# getting pseudo labels
with torch.no_grad():
ema_outputs_soft = torch.softmax(ema_model(weak_batch), dim=1)
pseudo_outputs = torch.argmax(
ema_outputs_soft.detach(),
dim=1,
keepdim=False,
)
consistency_weight = get_current_consistency_weight(iter_num // 150)
# supervised loss (weak preds against ground truth)
sup_loss = ce_loss(outputs_weak[: args.labeled_bs], label_batch[:][: args.labeled_bs].long(),) + dice_loss(
outputs_weak_soft[: args.labeled_bs],
label_batch[: args.labeled_bs].unsqueeze(1),
)
# unsupervised loss (strong preds against pseudo label)
unsup_loss = ce_loss(outputs_strong[args.labeled_bs :], pseudo_outputs[args.labeled_bs :]) + dice_loss(
outputs_strong_soft[args.labeled_bs :],
pseudo_outputs[args.labeled_bs :].unsqueeze(1),
)
loss = sup_loss + consistency_weight * unsup_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
iter_num = iter_num + 1
# track batch-level error, used to update augmentation policy
epoch_errors.append(0.5 * loss.item())
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = lr_
writer.add_scalar("lr", lr_, iter_num)
writer.add_scalar("consistency_weight/consistency_weight", consistency_weight, iter_num)
writer.add_scalar("loss/model_loss", loss, iter_num)
logging.info("iteration %d : model loss : %f" % (iter_num, loss.item()))
if iter_num % 50 == 0:
# show weakly augmented image
image = weak_batch[1, 0:1, :, :]
writer.add_image("train/Image", image, iter_num)
# show strongly augmented image
image_strong = strong_batch[1, 0:1, :, :]
writer.add_image("train/StrongImage", image_strong, iter_num)
# show model prediction (strong augment)
outputs_strong = torch.argmax(outputs_strong_soft, dim=1, keepdim=True)
writer.add_image("train/model_Prediction", outputs_strong[1, ...] * 50, iter_num)
# show ground truth label
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image("train/GroundTruth", labs, iter_num)
# show generated pseudo label
pseudo_labs = pseudo_outputs[1, ...].unsqueeze(0) * 50
writer.add_image("train/PseudoLabel", pseudo_labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
ema_model.eval()
metric_list = 0.0
with torch.no_grad():
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"],
sampled_batch["label"],
ema_model,
classes=num_classes,
)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes - 1):
writer.add_scalar(
"info/model_val_{}_dice".format(class_i + 1),
metric_list[class_i, 0],
iter_num,
)
writer.add_scalar(
"info/model_val_{}_hd95".format(class_i + 1),
metric_list[class_i, 1],
iter_num,
)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar("info/model_val_mean_dice", performance, iter_num)
writer.add_scalar("info/model_val_mean_hd95", mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(
snapshot_path,
"model_iter_{}_dice_{}.pth".format(iter_num, round(best_performance, 4)),
)
save_best = os.path.join(snapshot_path, "{}_best_model.pth".format(args.model))
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
util.save_checkpoint(epoch_num, model, optimizer, loss, save_best)
logging.info(
"iteration %d : model_mean_dice : %f model_mean_hd95 : %f" % (iter_num, performance, mean_hd95)
)
model.train()
ema_model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(snapshot_path, "model_iter_" + str(iter_num) + ".pth")
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
# update policy parameter bins for sampling
mean_epoch_error = np.mean(epoch_errors)
cta.update_rates(db_train.ops_weak, 1.0 - 0.5 * mean_epoch_error)
cta.update_rates(db_train.ops_strong, 1.0 - 0.5 * mean_epoch_error)
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + "/code"):
shutil.rmtree(snapshot_path + "/code")
shutil.copytree(".", snapshot_path + "/code", shutil.ignore_patterns([".git", "__pycache__"]))
logging.basicConfig(
filename=snapshot_path + "/log.txt",
level=logging.INFO,
format="[%(asctime)s.%(msecs)03d] %(message)s",
datefmt="%H:%M:%S",
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 16,647 | 38.079812 | 119 | py |
SSL4MIS | SSL4MIS-master/code/train_fully_supervised_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Fully_Supervised', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=2,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
args = parser.parse_args()
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
model = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=args.labeled_num,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs, label_batch)
loss_dice = dice_loss(outputs_soft, label_batch.unsqueeze(1))
loss = 0.5 * (loss_dice + loss_ce)
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}/{}".format(args.exp, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 8,444 | 40.397059 | 126 | py |
SSL4MIS | SSL4MIS-master/code/train_entropy_minimization_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.discriminator import FCDiscriminator
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Entropy_Minimization', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=3,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_loss = losses.entropy_loss(outputs_soft, C=4)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 10,840 | 40.857143 | 108 | py |
SSL4MIS | SSL4MIS-master/code/train_fixmatch_standard_augs.py | import argparse
import logging
import os
import re
import random
import shutil
import sys
import time
from xml.etree.ElementInclude import default_loader
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torch.distributions import Categorical
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (
BaseDataSets,
RandomGenerator,
TwoStreamBatchSampler,
WeakStrongAugment,
)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps, util
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument("--root_path", type=str, default="../data/ACDC", help="Name of Experiment")
parser.add_argument("--exp", type=str, default="ACDC/FixMatch_standard_augs", help="experiment_name")
parser.add_argument("--model", type=str, default="unet", help="model_name")
parser.add_argument("--max_iterations", type=int, default=30000, help="maximum epoch number to train")
parser.add_argument("--batch_size", type=int, default=24, help="batch_size per gpu")
parser.add_argument("--deterministic", type=int, default=1, help="whether use deterministic training")
parser.add_argument("--base_lr", type=float, default=0.01, help="segmentation network learning rate")
parser.add_argument("--patch_size", type=list, default=[256, 256], help="patch size of network input")
parser.add_argument("--seed", type=int, default=1337, help="random seed")
parser.add_argument("--num_classes", type=int, default=4, help="output channel of network")
parser.add_argument("--load", default=False, action="store_true", help="restore previous checkpoint")
parser.add_argument(
"--conf_thresh",
type=float,
default=0.8,
help="confidence threshold for using pseudo-labels",
)
parser.add_argument("--labeled_bs", type=int, default=12, help="labeled_batch_size per gpu")
# parser.add_argument('--labeled_num', type=int, default=136,
parser.add_argument("--labeled_num", type=int, default=7, help="labeled data")
# costs
parser.add_argument("--ema_decay", type=float, default=0.99, help="ema_decay")
parser.add_argument("--consistency_type", type=str, default="mse", help="consistency_type")
parser.add_argument("--consistency", type=float, default=0.1, help="consistency")
parser.add_argument("--consistency_rampup", type=float, default=200.0, help="consistency_rampup")
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {
"3": 68,
"7": 136,
"14": 256,
"21": 396,
"28": 512,
"35": 664,
"140": 1312,
}
elif "Prostate":
ref_dict = {
"2": 27,
"4": 53,
"8": 120,
"12": 179,
"16": 256,
"21": 312,
"42": 623,
}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# teacher network: ema_model
# student network: model
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
model = net_factory(net_type=args.model, in_chns=1, class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
def get_comp_loss(weak, strong):
"""get complementary loss and adaptive sample weight.
Compares least likely prediction (from strong augment) with argmin of weak augment.
Args:
weak (batch): weakly augmented batch
strong (batch): strongly augmented batch
Returns:
comp_loss, as_weight
"""
il_output = torch.reshape(
strong,
(
args.batch_size,
args.num_classes,
args.patch_size[0] * args.patch_size[1],
),
)
# calculate entropy for image-level preds (tensor of length labeled_bs)
as_weight = 1 - (Categorical(probs=il_output).entropy() / np.log(args.patch_size[0] * args.patch_size[1]))
# batch level average of entropy
as_weight = torch.mean(as_weight)
# complementary loss
comp_labels = torch.argmin(weak.detach(), dim=1, keepdim=False)
comp_loss = as_weight * ce_loss(
torch.add(torch.negative(strong), 1),
comp_labels,
)
return comp_loss, as_weight
def normalize(tensor):
min_val = tensor.min(1, keepdim=True)[0]
max_val = tensor.max(1, keepdim=True)[0]
result = tensor - min_val
result = result / max_val
return result
db_train = BaseDataSets(
base_dir=args.root_path,
split="train",
num=None,
transform=transforms.Compose([WeakStrongAugment(args.patch_size)]),
)
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
model = create_model()
# create model for ema (this model produces pseudo-labels)
ema_model = create_model(ema=True)
iter_num = 0
start_epoch = 0
# instantiate optimizers
optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)
# if restoring previous models:
if args.load:
try:
# check if there is previous progress to be restored:
logging.info(f"Snapshot path: {snapshot_path}")
iter_num = []
for filename in os.listdir(snapshot_path):
if "model_iter" in filename:
basename, extension = os.path.splitext(filename)
iter_num.append(int(basename.split("_")[2]))
iter_num = max(iter_num)
for filename in os.listdir(snapshot_path):
if "model_iter" in filename and str(iter_num) in filename:
model_checkpoint = filename
except Exception as e:
logging.warning(f"Error finding previous checkpoints: {e}")
try:
logging.info(f"Restoring model checkpoint: {model_checkpoint}")
model, optimizer, start_epoch, performance = util.load_checkpoint(
snapshot_path + "/" + model_checkpoint, model, optimizer
)
logging.info(f"Models restored from iteration {iter_num}")
except Exception as e:
logging.warning(f"Unable to restore model checkpoint: {e}, using new model")
trainloader = DataLoader(
db_train,
batch_sampler=batch_sampler,
num_workers=4,
pin_memory=True,
worker_init_fn=worker_init_fn,
)
valloader = DataLoader(db_val, batch_size=1, shuffle=False, num_workers=1)
# set to train
model.train()
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + "/log")
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iter_num = int(iter_num)
iterator = tqdm(range(start_epoch, max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
weak_batch, strong_batch, label_batch = (
sampled_batch["image_weak"],
sampled_batch["image_strong"],
sampled_batch["label_aug"],
)
weak_batch, strong_batch, label_batch = (
weak_batch.cuda(),
strong_batch.cuda(),
label_batch.cuda(),
)
# outputs for model
outputs_weak = model(weak_batch)
outputs_weak_soft = torch.softmax(outputs_weak, dim=1)
outputs_strong = model(strong_batch)
outputs_strong_soft = torch.softmax(outputs_strong, dim=1)
# minmax normalization for softmax outputs before applying mask
pseudo_mask = (normalize(outputs_weak_soft) > args.conf_thresh).float()
outputs_weak_masked = outputs_weak_soft * pseudo_mask
pseudo_outputs = torch.argmax(outputs_weak_masked[args.labeled_bs :].detach(), dim=1, keepdim=False)
consistency_weight = get_current_consistency_weight(iter_num // 150)
# supervised loss
sup_loss = ce_loss(outputs_weak[: args.labeled_bs], label_batch[:][: args.labeled_bs].long(),) + dice_loss(
outputs_weak_soft[: args.labeled_bs],
label_batch[: args.labeled_bs].unsqueeze(1),
)
# complementary loss and adaptive sample weight for negative learning
comp_loss, as_weight = get_comp_loss(weak=outputs_weak_soft, strong=outputs_strong_soft)
# unsupervised loss
unsup_loss = (
ce_loss(outputs_strong[args.labeled_bs :], pseudo_outputs)
+ dice_loss(outputs_strong_soft[args.labeled_bs :], pseudo_outputs.unsqueeze(1))
+ as_weight * comp_loss
)
loss = sup_loss + consistency_weight * unsup_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update ema model
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
# update learning rate
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group["lr"] = lr_
iter_num = iter_num + 1
writer.add_scalar("lr", lr_, iter_num)
writer.add_scalar("consistency_weight/consistency_weight", consistency_weight, iter_num)
writer.add_scalar("loss/model_loss", loss, iter_num)
logging.info("iteration %d : model loss : %f" % (iter_num, loss.item()))
if iter_num % 50 == 0:
image = weak_batch[1, 0:1, :, :]
writer.add_image("train/Image", image, iter_num)
outputs_weak = torch.argmax(torch.softmax(outputs_weak, dim=1), dim=1, keepdim=True)
writer.add_image("train/model_Prediction", outputs_weak[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image("train/GroundTruth", labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"],
sampled_batch["label"],
model,
classes=num_classes,
)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes - 1):
writer.add_scalar(
"info/model_val_{}_dice".format(class_i + 1),
metric_list[class_i, 0],
iter_num,
)
writer.add_scalar(
"info/model_val_{}_hd95".format(class_i + 1),
metric_list[class_i, 1],
iter_num,
)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar("info/model_val_mean_dice", performance, iter_num)
writer.add_scalar("info/model_val_mean_hd95", mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(
snapshot_path,
"model_iter_{}_dice_{}.pth".format(iter_num, round(best_performance, 4)),
)
save_best = os.path.join(snapshot_path, "{}_best_model.pth".format(args.model))
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
util.save_checkpoint(epoch_num, model, optimizer, loss, save_best)
logging.info(
"iteration %d : model_mean_dice : %f model_mean_hd95 : %f" % (iter_num, performance, mean_hd95)
)
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(snapshot_path, "model_iter_" + str(iter_num) + ".pth")
util.save_checkpoint(epoch_num, model, optimizer, loss, save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + "/code"):
shutil.rmtree(snapshot_path + "/code")
shutil.copytree(".", snapshot_path + "/code", shutil.ignore_patterns([".git", "__pycache__"]))
logging.basicConfig(
filename=snapshot_path + "/log.txt",
level=logging.INFO,
format="[%(asctime)s.%(msecs)03d] %(message)s",
datefmt="%H:%M:%S",
)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 16,133 | 37.141844 | 119 | py |
SSL4MIS | SSL4MIS-master/code/train_cross_teaching_between_cnn_transformer_2D.py | # -*- coding: utf-8 -*-
# Author: Xiangde Luo
# Date: 16 Dec. 2021
# Implementation for Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer.
# # Reference:
# @article{luo2021ctbct,
# title={Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer},
# author={Luo, Xiangde and Hu, Minhao and Song, Tao and Wang, Guotai and Zhang, Shaoting},
# journal={arXiv preprint arXiv:2112.04894},
# year={2021}}
# In the original paper, we don't use the validation set to select checkpoints and use the last iteration to inference for all methods.
# In addition, we combine the validation set and test set to report the results.
# We found that the random data split has some bias (the validation set is very tough and the test set is very easy).
# Actually, this setting is also a fair comparison.
# download pre-trained model to "code/pretrained_ckpt" folder, link:https://drive.google.com/drive/folders/1UC3XOoezeum0uck4KBVGa8osahs6rKUY
import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
from config import get_config
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from networks.vision_transformer import SwinUnet as ViT_seg
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Teaching_Between_CNN_Transformer', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=16,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[224, 224],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
parser.add_argument(
'--cfg', type=str, default="../code/configs/swin_tiny_patch4_window7_224_lite.yaml", help='path to config file', )
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--zip', action='store_true',
help='use zipped dataset instead of folder dataset')
parser.add_argument('--cache-mode', type=str, default='part', choices=['no', 'full', 'part'],
help='no: no cache, '
'full: cache all data, '
'part: sharding the dataset into nonoverlapping pieces and only cache one piece')
parser.add_argument('--resume', help='resume from checkpoint')
parser.add_argument('--accumulation-steps', type=int,
help="gradient accumulation steps")
parser.add_argument('--use-checkpoint', action='store_true',
help="whether to use gradient checkpointing to save memory")
parser.add_argument('--amp-opt-level', type=str, default='O1', choices=['O0', 'O1', 'O2'],
help='mixed precision opt level, if O0, no amp is used')
parser.add_argument('--tag', help='tag of experiment')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--throughput', action='store_true',
help='Test throughput only')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=8,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
config = get_config(args)
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model1 = create_model()
model2 = ViT_seg(config, img_size=args.patch_size,
num_classes=args.num_classes).cuda()
model2.load_from(config)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance1 = 0.0
best_performance2 = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(
iter_num // 150)
loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
pseudo_outputs1 = torch.argmax(
outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_outputs2 = torch.argmax(
outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_supervision1 = dice_loss(
outputs_soft1[args.labeled_bs:], pseudo_outputs2.unsqueeze(1))
pseudo_supervision2 = dice_loss(
outputs_soft2[args.labeled_bs:], pseudo_outputs1.unsqueeze(1))
model1_loss = loss1 + consistency_weight * pseudo_supervision1
model2_loss = loss2 + consistency_weight * pseudo_supervision2
loss = model1_loss + model2_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer1.param_groups:
param_group['lr'] = lr_
for param_group in optimizer2.param_groups:
param_group['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f' % (
iter_num, model1_loss.item(), model2_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs1, dim=1), dim=1, keepdim=True)
writer.add_image('train/model1_Prediction',
outputs[1, ...] * 50, iter_num)
outputs = torch.argmax(torch.softmax(
outputs2, dim=1), dim=1, keepdim=True)
writer.add_image('train/model2_Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes, patch_size=args.patch_size)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance1 = np.mean(metric_list, axis=0)[0]
mean_hd951 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model1_val_mean_dice',
performance1, iter_num)
writer.add_scalar('info/model1_val_mean_hd95',
mean_hd951, iter_num)
if performance1 > best_performance1:
best_performance1 = performance1
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
logging.info(
'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951))
model1.train()
model2.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes, patch_size=args.patch_size)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance2 = np.mean(metric_list, axis=0)[0]
mean_hd952 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model2_val_mean_dice',
performance2, iter_num)
writer.add_scalar('info/model2_val_mean_hd95',
mean_hd952, iter_num)
if performance2 > best_performance2:
best_performance2 = performance2
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
logging.info(
'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 18,123 | 43.530713 | 142 | py |
SSL4MIS | SSL4MIS-master/code/train_adversarial_network_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.discriminator import FC3DDiscriminator
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Adversarial_Network', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--DAN_lr', type=float, default=0.0001,
help='DAN learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
num_classes = 2
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
net = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes)
model = net.cuda()
DAN = FC3DDiscriminator(num_classes=num_classes)
DAN = DAN.cuda()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
DAN_optimizer = optim.Adam(
DAN.parameters(), lr=args.DAN_lr, betas=(0.9, 0.99))
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
DAN_target = torch.tensor([1, 1, 0, 0]).cuda()
model.train()
DAN.eval()
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
DAN_outputs = DAN(
outputs_soft[args.labeled_bs:], volume_batch[args.labeled_bs:])
consistency_loss = F.cross_entropy(
DAN_outputs, (DAN_target[:args.labeled_bs]).long())
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
DAN.train()
with torch.no_grad():
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
DAN_outputs = DAN(outputs_soft, volume_batch)
DAN_loss = F.cross_entropy(DAN_outputs, DAN_target.long())
DAN_optimizer.zero_grad()
DAN_loss.backward()
DAN_optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,326 | 40.643382 | 126 | py |
SSL4MIS | SSL4MIS-master/code/val_2D.py | import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
def calculate_metric_percase(pred, gt):
pred[pred > 0] = 1
gt[gt > 0] = 1
if pred.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return dice, hd95
else:
return 0, 0
def test_single_volume(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
out = torch.argmax(torch.softmax(
net(input), dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list
def test_single_volume_ds(image, label, net, classes, patch_size=[256, 256]):
image, label = image.squeeze(0).cpu().detach(
).numpy(), label.squeeze(0).cpu().detach().numpy()
prediction = np.zeros_like(label)
for ind in range(image.shape[0]):
slice = image[ind, :, :]
x, y = slice.shape[0], slice.shape[1]
slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=0)
input = torch.from_numpy(slice).unsqueeze(
0).unsqueeze(0).float().cuda()
net.eval()
with torch.no_grad():
output_main, _, _, _ = net(input)
out = torch.argmax(torch.softmax(
output_main, dim=1), dim=1).squeeze(0)
out = out.cpu().detach().numpy()
pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0)
prediction[ind] = pred
metric_list = []
for i in range(1, classes):
metric_list.append(calculate_metric_percase(
prediction == i, label == i))
return metric_list
| 2,359 | 35.307692 | 77 | py |
SSL4MIS | SSL4MIS-master/code/test_urpc.py | import argparse
import os
import shutil
from glob import glob
import numpy
import torch
from networks.unet_3D_dv_semi import unet_3D_dv_semi
from networks.unet_3D import unet_3D
from test_urpc_util import test_all_case
def net_factory(net_type="unet_3D", num_classes=3, in_channels=1):
if net_type == "unet_3D":
net = unet_3D(n_classes=num_classes, in_channels=in_channels).cuda()
elif net_type == "unet_3D_dv_semi":
net = unet_3D_dv_semi(n_classes=num_classes,
in_channels=in_channels).cuda()
else:
net = None
return net
def Inference(FLAGS):
snapshot_path = "../model/{}/{}".format(FLAGS.exp, FLAGS.model)
num_classes = 2
test_save_path = "../model/{}/Prediction".format(FLAGS.exp)
if os.path.exists(test_save_path):
shutil.rmtree(test_save_path)
os.makedirs(test_save_path)
net = net_factory(FLAGS.model, num_classes, in_channels=1)
save_mode_path = os.path.join(
snapshot_path, '{}_best_model.pth'.format(FLAGS.model))
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
avg_metric = test_all_case(net, base_dir=FLAGS.root_path, method=FLAGS.model, test_list="test.txt", num_classes=num_classes,
patch_size=(96, 96, 96), stride_xy=64, stride_z=64, test_save_path=test_save_path)
return avg_metric
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default="BraTS2019/Uncertainty_Rectified_Pyramid_Consistency_25_labeled", help='experiment_name')
parser.add_argument('--model', type=str,
default="unet_3D_dv_semi", help='model_name')
FLAGS = parser.parse_args()
metric = Inference(FLAGS)
print(metric)
| 1,990 | 34.553571 | 128 | py |
SSL4MIS | SSL4MIS-master/code/train_regularized_dropout_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='Regularized_Dropout', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[64, 128, 128],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
parser.add_argument('--total_num', type=int, default=50,
help='total data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=4.0, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
net1 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
net2 = net_factory_3d(net_type=args.model, in_chns=1, class_num=num_classes).cuda()
model1 = kaiming_normal_init_weight(net1)
model2 = xavier_normal_init_weight(net2)
model1.train()
model2.train()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, args.total_num))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size - args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
best_performance1 = 0.0
best_performance2 = 0.0
iter_num = 0
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
max_epoch = max_iterations // len(trainloader) + 1
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
model1_loss = 0.5 * (ce_loss(outputs1[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
model2_loss = 0.5 * (ce_loss(outputs2[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
r_drop_loss = losses.compute_kl_loss(outputs1[args.labeled_bs:], outputs2[args.labeled_bs:])
loss = model1_loss + model2_loss + consistency_weight * r_drop_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group1 in optimizer1.param_groups:
param_group1['lr'] = lr_
for param_group2 in optimizer2.param_groups:
param_group2['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
writer.add_scalar('loss/r_drop_loss',
r_drop_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f r_drop_loss: %f' % (
iter_num, model1_loss.item(), model2_loss.item(), r_drop_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft1[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model1_Predicted_label',
grid_image, iter_num)
image = outputs_soft2[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Model2_Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
avg_metric1 = test_all_case(
model1, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric1[:, 0].mean() > best_performance1:
best_performance1 = avg_metric1[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
writer.add_scalar('info/model1_val_dice_score',
avg_metric1[0, 0], iter_num)
writer.add_scalar('info/model1_val_hd95',
avg_metric1[0, 1], iter_num)
logging.info(
'iteration %d : model1_dice_score : %f model1_hd95 : %f' % (
iter_num, avg_metric1[0, 0].mean(), avg_metric1[0, 1].mean()))
model1.train()
model2.eval()
avg_metric2 = test_all_case(
model2, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric2[:, 0].mean() > best_performance2:
best_performance2 = avg_metric2[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
writer.add_scalar('info/model2_val_dice_score',
avg_metric2[0, 0], iter_num)
writer.add_scalar('info/model2_val_hd95',
avg_metric2[0, 1], iter_num)
logging.info(
'iteration %d : model2_dice_score : %f model2_hd95 : %f' % (
iter_num, avg_metric2[0, 0].mean(), avg_metric2[0, 1].mean()))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 13,874 | 42.359375 | 107 | py |
SSL4MIS | SSL4MIS-master/code/train_uncertainty_rectified_pyramid_consistency_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import BaseDataSets, RandomGenerator, TwoStreamBatchSampler
from utils import losses, metrics, ramps
from val_2D import test_single_volume_ds
from networks.net_factory import net_factory
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Uncertainty_Rectified_Pyramid_Consistency', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_urpc', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=7,
help='labeled data')
# costs
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
kl_distance = nn.KLDivLoss(reduction='none')
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs, outputs_aux1, outputs_aux2, outputs_aux3 = model(
volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
outputs_aux1_soft = torch.softmax(outputs_aux1, dim=1)
outputs_aux2_soft = torch.softmax(outputs_aux2, dim=1)
outputs_aux3_soft = torch.softmax(outputs_aux3, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux1 = ce_loss(outputs_aux1[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux2 = ce_loss(outputs_aux2[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_ce_aux3 = ce_loss(outputs_aux3[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux1 = dice_loss(
outputs_aux1_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux2 = dice_loss(
outputs_aux2_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux3 = dice_loss(
outputs_aux3_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = (loss_ce+loss_ce_aux1+loss_ce_aux2+loss_ce_aux3 +
loss_dice+loss_dice_aux1+loss_dice_aux2+loss_dice_aux3)/8
preds = (outputs_soft+outputs_aux1_soft +
outputs_aux2_soft+outputs_aux3_soft)/4
variance_main = torch.sum(kl_distance(
torch.log(outputs_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_main = torch.exp(-variance_main)
variance_aux1 = torch.sum(kl_distance(
torch.log(outputs_aux1_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux1 = torch.exp(-variance_aux1)
variance_aux2 = torch.sum(kl_distance(
torch.log(outputs_aux2_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux2 = torch.exp(-variance_aux2)
variance_aux3 = torch.sum(kl_distance(
torch.log(outputs_aux3_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux3 = torch.exp(-variance_aux3)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist_main = (
preds[args.labeled_bs:] - outputs_soft[args.labeled_bs:]) ** 2
consistency_loss_main = torch.mean(
consistency_dist_main * exp_variance_main) / (torch.mean(exp_variance_main) + 1e-8) + torch.mean(variance_main)
consistency_dist_aux1 = (
preds[args.labeled_bs:] - outputs_aux1_soft[args.labeled_bs:]) ** 2
consistency_loss_aux1 = torch.mean(
consistency_dist_aux1 * exp_variance_aux1) / (torch.mean(exp_variance_aux1) + 1e-8) + torch.mean(variance_aux1)
consistency_dist_aux2 = (
preds[args.labeled_bs:] - outputs_aux2_soft[args.labeled_bs:]) ** 2
consistency_loss_aux2 = torch.mean(
consistency_dist_aux2 * exp_variance_aux2) / (torch.mean(exp_variance_aux2) + 1e-8) + torch.mean(variance_aux2)
consistency_dist_aux3 = (
preds[args.labeled_bs:] - outputs_aux3_soft[args.labeled_bs:]) ** 2
consistency_loss_aux3 = torch.mean(
consistency_dist_aux3 * exp_variance_aux3) / (torch.mean(exp_variance_aux3) + 1e-8) + torch.mean(variance_aux3)
consistency_loss = (consistency_loss_main + consistency_loss_aux1 +
consistency_loss_aux2 + consistency_loss_aux3) / 4
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume_ds(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,087 | 44.153846 | 127 | py |
SSL4MIS | SSL4MIS-master/code/train_uncertainty_rectified_pyramid_consistency_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.unet_3D_dv_semi import unet_3D_dv_semi
from utils import losses, metrics, ramps
from val_urpc_util import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/GTV', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='GTV/uncertainty_rectified_pyramid_consistency', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D_dv_semi', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=60000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.1,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=18,
help='labeled data')
parser.add_argument('--total_labeled_num', type=int, default=180,
help='total labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=400.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
num_classes = 3
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
net = unet_3D_dv_semi(n_classes=num_classes, in_channels=1)
model = net.cuda()
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, args.total_labeled_num))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
kl_distance = nn.KLDivLoss(reduction='none')
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
outputs_aux1, outputs_aux2, outputs_aux3, outputs_aux4, = model(
volume_batch)
outputs_aux1_soft = torch.softmax(outputs_aux1, dim=1)
outputs_aux2_soft = torch.softmax(outputs_aux2, dim=1)
outputs_aux3_soft = torch.softmax(outputs_aux3, dim=1)
outputs_aux4_soft = torch.softmax(outputs_aux4, dim=1)
loss_ce_aux1 = ce_loss(outputs_aux1[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_ce_aux2 = ce_loss(outputs_aux2[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_ce_aux3 = ce_loss(outputs_aux3[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_ce_aux4 = ce_loss(outputs_aux4[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice_aux1 = dice_loss(
outputs_aux1_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux2 = dice_loss(
outputs_aux2_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux3 = dice_loss(
outputs_aux3_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
loss_dice_aux4 = dice_loss(
outputs_aux4_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = (loss_ce_aux1+loss_ce_aux2+loss_ce_aux3+loss_ce_aux4 +
loss_dice_aux1+loss_dice_aux2+loss_dice_aux3+loss_dice_aux4)/8
preds = (outputs_aux1_soft +
outputs_aux2_soft+outputs_aux3_soft+outputs_aux4_soft)/4
variance_aux1 = torch.sum(kl_distance(
torch.log(outputs_aux1_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux1 = torch.exp(-variance_aux1)
variance_aux2 = torch.sum(kl_distance(
torch.log(outputs_aux2_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux2 = torch.exp(-variance_aux2)
variance_aux3 = torch.sum(kl_distance(
torch.log(outputs_aux3_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux3 = torch.exp(-variance_aux3)
variance_aux4 = torch.sum(kl_distance(
torch.log(outputs_aux4_soft[args.labeled_bs:]), preds[args.labeled_bs:]), dim=1, keepdim=True)
exp_variance_aux4 = torch.exp(-variance_aux4)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist_aux1 = (
preds[args.labeled_bs:] - outputs_aux1_soft[args.labeled_bs:]) ** 2
consistency_loss_aux1 = torch.mean(
consistency_dist_aux1 * exp_variance_aux1) / (torch.mean(exp_variance_aux1) + 1e-8) + torch.mean(variance_aux1)
consistency_dist_aux2 = (
preds[args.labeled_bs:] - outputs_aux2_soft[args.labeled_bs:]) ** 2
consistency_loss_aux2 = torch.mean(
consistency_dist_aux2 * exp_variance_aux2) / (torch.mean(exp_variance_aux2) + 1e-8) + torch.mean(variance_aux2)
consistency_dist_aux3 = (
preds[args.labeled_bs:] - outputs_aux3_soft[args.labeled_bs:]) ** 2
consistency_loss_aux3 = torch.mean(
consistency_dist_aux3 * exp_variance_aux3) / (torch.mean(exp_variance_aux3) + 1e-8) + torch.mean(variance_aux3)
consistency_dist_aux4 = (
preds[args.labeled_bs:] - outputs_aux4_soft[args.labeled_bs:]) ** 2
consistency_loss_aux4 = torch.mean(
consistency_dist_aux4 * exp_variance_aux4) / (torch.mean(exp_variance_aux4) + 1e-8) + torch.mean(variance_aux4)
consistency_loss = (consistency_loss_aux1 +
consistency_loss_aux2 + consistency_loss_aux3 + consistency_loss_aux4) / 4
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/supervised_loss',
supervised_loss, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, supervised_loss: %f' %
(iter_num, loss.item(), supervised_loss.item()))
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = torch.argmax(outputs_aux1_soft, dim=1, keepdim=True)[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1) * 100
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1) * 100
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=num_classes, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
for cls in range(1, num_classes):
writer.add_scalar('info/val_cls_{}_dice_score'.format(cls),
avg_metric[cls - 1, 0], iter_num)
writer.add_scalar('info/val_cls_{}_hd95'.format(cls),
avg_metric[cls - 1, 1], iter_num)
writer.add_scalar('info/val_mean_dice_score',
avg_metric[:, 0].mean(), iter_num)
writer.add_scalar('info/val_mean_hd95',
avg_metric[:, 1].mean(), iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (
iter_num, avg_metric[:, 0].mean(), avg_metric[:, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 14,470 | 45.085987 | 127 | py |
SSL4MIS | SSL4MIS-master/code/train_cross_pseudo_supervision_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Cross_Pseudo_Supervision', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def xavier_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model1 = create_model()
model2 = create_model()
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model1.train()
model2.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer1 = optim.SGD(model1.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
optimizer2 = optim.SGD(model2.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance1 = 0.0
best_performance2 = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
outputs1 = model1(volume_batch)
outputs_soft1 = torch.softmax(outputs1, dim=1)
outputs2 = model2(volume_batch)
outputs_soft2 = torch.softmax(outputs2, dim=1)
consistency_weight = get_current_consistency_weight(iter_num // 150)
loss1 = 0.5 * (ce_loss(outputs1[:args.labeled_bs], label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft1[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
loss2 = 0.5 * (ce_loss(outputs2[:args.labeled_bs], label_batch[:][:args.labeled_bs].long()) + dice_loss(
outputs_soft2[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1)))
pseudo_outputs1 = torch.argmax(outputs_soft1[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_outputs2 = torch.argmax(outputs_soft2[args.labeled_bs:].detach(), dim=1, keepdim=False)
pseudo_supervision1 = ce_loss(outputs1[args.labeled_bs:], pseudo_outputs2)
pseudo_supervision2 = ce_loss(outputs2[args.labeled_bs:], pseudo_outputs1)
model1_loss = loss1 + consistency_weight * pseudo_supervision1
model2_loss = loss2 + consistency_weight * pseudo_supervision2
loss = model1_loss + model2_loss
optimizer1.zero_grad()
optimizer2.zero_grad()
loss.backward()
optimizer1.step()
optimizer2.step()
iter_num = iter_num + 1
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer1.param_groups:
param_group['lr'] = lr_
for param_group in optimizer2.param_groups:
param_group['lr'] = lr_
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar(
'consistency_weight/consistency_weight', consistency_weight, iter_num)
writer.add_scalar('loss/model1_loss',
model1_loss, iter_num)
writer.add_scalar('loss/model2_loss',
model2_loss, iter_num)
logging.info('iteration %d : model1 loss : %f model2 loss : %f' % (iter_num, model1_loss.item(), model2_loss.item()))
if iter_num % 50 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs1, dim=1), dim=1, keepdim=True)
writer.add_image('train/model1_Prediction',
outputs[1, ...] * 50, iter_num)
outputs = torch.argmax(torch.softmax(
outputs2, dim=1), dim=1, keepdim=True)
writer.add_image('train/model2_Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model1.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model1, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model1_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model1_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance1 = np.mean(metric_list, axis=0)[0]
mean_hd951 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model1_val_mean_dice', performance1, iter_num)
writer.add_scalar('info/model1_val_mean_hd95', mean_hd951, iter_num)
if performance1 > best_performance1:
best_performance1 = performance1
save_mode_path = os.path.join(snapshot_path,
'model1_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance1, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model1.pth'.format(args.model))
torch.save(model1.state_dict(), save_mode_path)
torch.save(model1.state_dict(), save_best)
logging.info(
'iteration %d : model1_mean_dice : %f model1_mean_hd95 : %f' % (iter_num, performance1, mean_hd951))
model1.train()
model2.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model2, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/model2_val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/model2_val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance2 = np.mean(metric_list, axis=0)[0]
mean_hd952 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/model2_val_mean_dice', performance2, iter_num)
writer.add_scalar('info/model2_val_mean_hd95', mean_hd952, iter_num)
if performance2 > best_performance2:
best_performance2 = performance2
save_mode_path = os.path.join(snapshot_path,
'model2_iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance2)))
save_best = os.path.join(snapshot_path,
'{}_best_model2.pth'.format(args.model))
torch.save(model2.state_dict(), save_mode_path)
torch.save(model2.state_dict(), save_best)
logging.info(
'iteration %d : model2_mean_dice : %f model2_mean_hd95 : %f' % (iter_num, performance2, mean_hd952))
model2.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'model1_iter_' + str(iter_num) + '.pth')
torch.save(model1.state_dict(), save_mode_path)
logging.info("save model1 to {}".format(save_mode_path))
save_mode_path = os.path.join(
snapshot_path, 'model2_iter_' + str(iter_num) + '.pth')
torch.save(model2.state_dict(), save_mode_path)
logging.info("save model2 to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
time1 = time.time()
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 15,182 | 42.38 | 129 | py |
SSL4MIS | SSL4MIS-master/code/train_uncertainty_aware_mean_teacher_3D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.brats2019 import (BraTS2019, CenterCrop, RandomCrop,
RandomRotFlip, ToTensor,
TwoStreamBatchSampler)
from networks.net_factory_3d import net_factory_3d
from utils import losses, metrics, ramps
from val_3D import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/BraTS2019', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='BraTs2019_Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet_3D', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=4,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[96, 96, 96],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=2,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=25,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
train_data_path = args.root_path
batch_size = args.batch_size
max_iterations = args.max_iterations
num_classes = 2
def create_model(ema=False):
# Network definition
net = net_factory_3d(net_type=args.model,
in_chns=1, class_num=num_classes)
model = net.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
db_train = BraTS2019(base_dir=train_data_path,
split='train',
num=None,
transform=transforms.Compose([
RandomRotFlip(),
RandomCrop(args.patch_size),
ToTensor(),
]))
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
labeled_idxs = list(range(0, args.labeled_num))
unlabeled_idxs = list(range(args.labeled_num, 250))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
ema_model.train()
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(2)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
T = 8
_, _, d, w, h = unlabeled_volume_batch.shape
volume_batch_r = unlabeled_volume_batch.repeat(2, 1, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, 2, d, w, h]).cuda()
for i in range(T//2):
ema_inputs = volume_batch_r + \
torch.clamp(torch.randn_like(
volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
preds[2 * stride * i:2 * stride *
(i + 1)] = ema_model(ema_inputs)
preds = torch.softmax(preds, dim=1)
preds = preds.reshape(T, stride, 2, d, w, h)
preds = torch.mean(preds, dim=0)
uncertainty = -1.0 * \
torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs])
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist = losses.softmax_mse_loss(
outputs[args.labeled_bs:], ema_output) # (batch, 2, 112,112,80)
threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num,
max_iterations))*np.log(2)
mask = (uncertainty < threshold).float()
consistency_loss = torch.sum(
mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
writer.add_scalar('loss/loss', loss, iter_num)
if iter_num % 20 == 0:
image = volume_batch[0, 0:1, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=True)
writer.add_image('train/Image', grid_image, iter_num)
image = outputs_soft[0, 1:2, :, :, 20:61:10].permute(
3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Predicted_label',
grid_image, iter_num)
image = label_batch[0, :, :, 20:61:10].unsqueeze(
0).permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
grid_image = make_grid(image, 5, normalize=False)
writer.add_image('train/Groundtruth_label',
grid_image, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
avg_metric = test_all_case(
model, args.root_path, test_list="val.txt", num_classes=2, patch_size=args.patch_size,
stride_xy=64, stride_z=64)
if avg_metric[:, 0].mean() > best_performance:
best_performance = avg_metric[:, 0].mean()
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
writer.add_scalar('info/val_dice_score',
avg_metric[0, 0], iter_num)
writer.add_scalar('info/val_hd95',
avg_metric[0, 1], iter_num)
logging.info(
'iteration %d : dice_score : %f hd95 : %f' % (iter_num, avg_metric[0, 0].mean(), avg_metric[0, 1].mean()))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,302 | 41.570934 | 126 | py |
SSL4MIS | SSL4MIS-master/code/train_uncertainty_aware_mean_teacher_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Uncertainty_Aware_Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
T = 8
_, _, w, h = unlabeled_volume_batch.shape
volume_batch_r = unlabeled_volume_batch.repeat(2, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, num_classes, w, h]).cuda()
for i in range(T//2):
ema_inputs = volume_batch_r + \
torch.clamp(torch.randn_like(
volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
preds[2 * stride * i:2 * stride *
(i + 1)] = ema_model(ema_inputs)
preds = F.softmax(preds, dim=1)
preds = preds.reshape(T, stride, num_classes, w, h)
preds = torch.mean(preds, dim=0)
uncertainty = -1.0 * \
torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:args.labeled_bs][:].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
consistency_dist = losses.softmax_mse_loss(
outputs[args.labeled_bs:], ema_output) # (batch, 2, 112,112,80)
threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num,
max_iterations))*np.log(2)
mask = (uncertainty < threshold).float()
consistency_loss = torch.sum(
mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 12,976 | 41.970199 | 108 | py |
SSL4MIS | SSL4MIS-master/code/train_mean_teacher_2D.py | import argparse
import logging
import os
import random
import shutil
import sys
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.nn import BCEWithLogitsLoss
from torch.nn.modules.loss import CrossEntropyLoss
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import make_grid
from tqdm import tqdm
from dataloaders import utils
from dataloaders.dataset import (BaseDataSets, RandomGenerator,
TwoStreamBatchSampler)
from networks.net_factory import net_factory
from utils import losses, metrics, ramps
from val_2D import test_single_volume
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/ACDC', help='Name of Experiment')
parser.add_argument('--exp', type=str,
default='ACDC/Mean_Teacher', help='experiment_name')
parser.add_argument('--model', type=str,
default='unet', help='model_name')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int, default=24,
help='batch_size per gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--patch_size', type=list, default=[256, 256],
help='patch size of network input')
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--num_classes', type=int, default=4,
help='output channel of network')
# label and unlabel
parser.add_argument('--labeled_bs', type=int, default=12,
help='labeled_batch_size per gpu')
parser.add_argument('--labeled_num', type=int, default=136,
help='labeled data')
# costs
parser.add_argument('--ema_decay', type=float, default=0.99, help='ema_decay')
parser.add_argument('--consistency_type', type=str,
default="mse", help='consistency_type')
parser.add_argument('--consistency', type=float,
default=0.1, help='consistency')
parser.add_argument('--consistency_rampup', type=float,
default=200.0, help='consistency_rampup')
args = parser.parse_args()
def patients_to_slices(dataset, patiens_num):
ref_dict = None
if "ACDC" in dataset:
ref_dict = {"3": 68, "7": 136,
"14": 256, "21": 396, "28": 512, "35": 664, "140": 1312}
elif "Prostate":
ref_dict = {"2": 27, "4": 53, "8": 120,
"12": 179, "16": 256, "21": 312, "42": 623}
else:
print("Error")
return ref_dict[str(patiens_num)]
def get_current_consistency_weight(epoch):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
def train(args, snapshot_path):
base_lr = args.base_lr
num_classes = args.num_classes
batch_size = args.batch_size
max_iterations = args.max_iterations
def create_model(ema=False):
# Network definition
model = net_factory(net_type=args.model, in_chns=1,
class_num=num_classes)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
def worker_init_fn(worker_id):
random.seed(args.seed + worker_id)
db_train = BaseDataSets(base_dir=args.root_path, split="train", num=None, transform=transforms.Compose([
RandomGenerator(args.patch_size)
]))
db_val = BaseDataSets(base_dir=args.root_path, split="val")
total_slices = len(db_train)
labeled_slice = patients_to_slices(args.root_path, args.labeled_num)
print("Total silices is: {}, labeled slices is: {}".format(
total_slices, labeled_slice))
labeled_idxs = list(range(0, labeled_slice))
unlabeled_idxs = list(range(labeled_slice, total_slices))
batch_sampler = TwoStreamBatchSampler(
labeled_idxs, unlabeled_idxs, batch_size, batch_size-args.labeled_bs)
trainloader = DataLoader(db_train, batch_sampler=batch_sampler,
num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)
model.train()
valloader = DataLoader(db_val, batch_size=1, shuffle=False,
num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=base_lr,
momentum=0.9, weight_decay=0.0001)
ce_loss = CrossEntropyLoss()
dice_loss = losses.DiceLoss(num_classes)
writer = SummaryWriter(snapshot_path + '/log')
logging.info("{} iterations per epoch".format(len(trainloader)))
iter_num = 0
max_epoch = max_iterations // len(trainloader) + 1
best_performance = 0.0
iterator = tqdm(range(max_epoch), ncols=70)
for epoch_num in iterator:
for i_batch, sampled_batch in enumerate(trainloader):
volume_batch, label_batch = sampled_batch['image'], sampled_batch['label']
volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
unlabeled_volume_batch = volume_batch[args.labeled_bs:]
noise = torch.clamp(torch.randn_like(
unlabeled_volume_batch) * 0.1, -0.2, 0.2)
ema_inputs = unlabeled_volume_batch + noise
outputs = model(volume_batch)
outputs_soft = torch.softmax(outputs, dim=1)
with torch.no_grad():
ema_output = ema_model(ema_inputs)
ema_output_soft = torch.softmax(ema_output, dim=1)
loss_ce = ce_loss(outputs[:args.labeled_bs],
label_batch[:][:args.labeled_bs].long())
loss_dice = dice_loss(
outputs_soft[:args.labeled_bs], label_batch[:args.labeled_bs].unsqueeze(1))
supervised_loss = 0.5 * (loss_dice + loss_ce)
consistency_weight = get_current_consistency_weight(iter_num//150)
if iter_num < 1000:
consistency_loss = 0.0
else:
consistency_loss = torch.mean(
(outputs_soft[args.labeled_bs:]-ema_output_soft)**2)
loss = supervised_loss + consistency_weight * consistency_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, args.ema_decay, iter_num)
lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
iter_num = iter_num + 1
writer.add_scalar('info/lr', lr_, iter_num)
writer.add_scalar('info/total_loss', loss, iter_num)
writer.add_scalar('info/loss_ce', loss_ce, iter_num)
writer.add_scalar('info/loss_dice', loss_dice, iter_num)
writer.add_scalar('info/consistency_loss',
consistency_loss, iter_num)
writer.add_scalar('info/consistency_weight',
consistency_weight, iter_num)
logging.info(
'iteration %d : loss : %f, loss_ce: %f, loss_dice: %f' %
(iter_num, loss.item(), loss_ce.item(), loss_dice.item()))
if iter_num % 20 == 0:
image = volume_batch[1, 0:1, :, :]
writer.add_image('train/Image', image, iter_num)
outputs = torch.argmax(torch.softmax(
outputs, dim=1), dim=1, keepdim=True)
writer.add_image('train/Prediction',
outputs[1, ...] * 50, iter_num)
labs = label_batch[1, ...].unsqueeze(0) * 50
writer.add_image('train/GroundTruth', labs, iter_num)
if iter_num > 0 and iter_num % 200 == 0:
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in enumerate(valloader):
metric_i = test_single_volume(
sampled_batch["image"], sampled_batch["label"], model, classes=num_classes)
metric_list += np.array(metric_i)
metric_list = metric_list / len(db_val)
for class_i in range(num_classes-1):
writer.add_scalar('info/val_{}_dice'.format(class_i+1),
metric_list[class_i, 0], iter_num)
writer.add_scalar('info/val_{}_hd95'.format(class_i+1),
metric_list[class_i, 1], iter_num)
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
writer.add_scalar('info/val_mean_dice', performance, iter_num)
writer.add_scalar('info/val_mean_hd95', mean_hd95, iter_num)
if performance > best_performance:
best_performance = performance
save_mode_path = os.path.join(snapshot_path,
'iter_{}_dice_{}.pth'.format(
iter_num, round(best_performance, 4)))
save_best = os.path.join(snapshot_path,
'{}_best_model.pth'.format(args.model))
torch.save(model.state_dict(), save_mode_path)
torch.save(model.state_dict(), save_best)
logging.info(
'iteration %d : mean_dice : %f mean_hd95 : %f' % (iter_num, performance, mean_hd95))
model.train()
if iter_num % 3000 == 0:
save_mode_path = os.path.join(
snapshot_path, 'iter_' + str(iter_num) + '.pth')
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
if iter_num >= max_iterations:
break
if iter_num >= max_iterations:
iterator.close()
break
writer.close()
return "Training Finished!"
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
snapshot_path = "../model/{}_{}_labeled/{}".format(
args.exp, args.labeled_num, args.model)
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
if os.path.exists(snapshot_path + '/code'):
shutil.rmtree(snapshot_path + '/code')
shutil.copytree('.', snapshot_path + '/code',
shutil.ignore_patterns(['.git', '__pycache__']))
logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
train(args, snapshot_path)
| 11,916 | 40.961268 | 108 | py |
SSL4MIS | SSL4MIS-master/code/test_3D_util.py | import math
import h5py
import nibabel as nib
import numpy as np
import SimpleITK as sitk
import torch
import torch.nn.functional as F
from medpy import metric
from skimage.measure import label
from tqdm import tqdm
def test_single_case(net, image, stride_xy, stride_z, patch_size, num_classes=1):
w, h, d = image.shape
# if the size of image is less than patch_size, then padding it
add_pad = False
if w < patch_size[0]:
w_pad = patch_size[0]-w
add_pad = True
else:
w_pad = 0
if h < patch_size[1]:
h_pad = patch_size[1]-h
add_pad = True
else:
h_pad = 0
if d < patch_size[2]:
d_pad = patch_size[2]-d
add_pad = True
else:
d_pad = 0
wl_pad, wr_pad = w_pad//2, w_pad-w_pad//2
hl_pad, hr_pad = h_pad//2, h_pad-h_pad//2
dl_pad, dr_pad = d_pad//2, d_pad-d_pad//2
if add_pad:
image = np.pad(image, [(wl_pad, wr_pad), (hl_pad, hr_pad),
(dl_pad, dr_pad)], mode='constant', constant_values=0)
ww, hh, dd = image.shape
sx = math.ceil((ww - patch_size[0]) / stride_xy) + 1
sy = math.ceil((hh - patch_size[1]) / stride_xy) + 1
sz = math.ceil((dd - patch_size[2]) / stride_z) + 1
# print("{}, {}, {}".format(sx, sy, sz))
score_map = np.zeros((num_classes, ) + image.shape).astype(np.float32)
cnt = np.zeros(image.shape).astype(np.float32)
for x in range(0, sx):
xs = min(stride_xy*x, ww-patch_size[0])
for y in range(0, sy):
ys = min(stride_xy * y, hh-patch_size[1])
for z in range(0, sz):
zs = min(stride_z * z, dd-patch_size[2])
test_patch = image[xs:xs+patch_size[0],
ys:ys+patch_size[1], zs:zs+patch_size[2]]
test_patch = np.expand_dims(np.expand_dims(
test_patch, axis=0), axis=0).astype(np.float32)
test_patch = torch.from_numpy(test_patch).cuda()
with torch.no_grad():
y1 = net(test_patch)
# ensemble
y = torch.softmax(y1, dim=1)
y = y.cpu().data.numpy()
y = y[0, :, :, :, :]
score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= score_map[:, xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + y
cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] \
= cnt[xs:xs+patch_size[0], ys:ys+patch_size[1], zs:zs+patch_size[2]] + 1
score_map = score_map/np.expand_dims(cnt, axis=0)
label_map = np.argmax(score_map, axis=0)
if add_pad:
label_map = label_map[wl_pad:wl_pad+w,
hl_pad:hl_pad+h, dl_pad:dl_pad+d]
score_map = score_map[:, wl_pad:wl_pad +
w, hl_pad:hl_pad+h, dl_pad:dl_pad+d]
return label_map
def cal_metric(gt, pred):
if pred.sum() > 0 and gt.sum() > 0:
dice = metric.binary.dc(pred, gt)
hd95 = metric.binary.hd95(pred, gt)
return np.array([dice, hd95])
else:
return np.zeros(2)
def test_all_case(net, base_dir, method="unet_3D", test_list="full_test.list", num_classes=4, patch_size=(48, 160, 160), stride_xy=32, stride_z=24, test_save_path=None):
with open(base_dir + '/{}'.format(test_list), 'r') as f:
image_list = f.readlines()
image_list = [base_dir + "/data/{}.h5".format(
item.replace('\n', '').split(",")[0]) for item in image_list]
total_metric = np.zeros((num_classes-1, 4))
print("Testing begin")
with open(test_save_path + "/{}.txt".format(method), "a") as f:
for image_path in tqdm(image_list):
ids = image_path.split("/")[-1].replace(".h5", "")
h5f = h5py.File(image_path, 'r')
image = h5f['image'][:]
label = h5f['label'][:]
prediction = test_single_case(
net, image, stride_xy, stride_z, patch_size, num_classes=num_classes)
metric = calculate_metric_percase(prediction == 1, label == 1)
total_metric[0, :] += metric
f.writelines("{},{},{},{},{}\n".format(
ids, metric[0], metric[1], metric[2], metric[3]))
pred_itk = sitk.GetImageFromArray(prediction.astype(np.uint8))
pred_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(pred_itk, test_save_path +
"/{}_pred.nii.gz".format(ids))
img_itk = sitk.GetImageFromArray(image)
img_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(img_itk, test_save_path +
"/{}_img.nii.gz".format(ids))
lab_itk = sitk.GetImageFromArray(label.astype(np.uint8))
lab_itk.SetSpacing((1.0, 1.0, 1.0))
sitk.WriteImage(lab_itk, test_save_path +
"/{}_lab.nii.gz".format(ids))
f.writelines("Mean metrics,{},{},{},{}".format(total_metric[0, 0] / len(image_list), total_metric[0, 1] / len(
image_list), total_metric[0, 2] / len(image_list), total_metric[0, 3] / len(image_list)))
f.close()
print("Testing end")
return total_metric / len(image_list)
def cal_dice(prediction, label, num=2):
total_dice = np.zeros(num-1)
for i in range(1, num):
prediction_tmp = (prediction == i)
label_tmp = (label == i)
prediction_tmp = prediction_tmp.astype(np.float)
label_tmp = label_tmp.astype(np.float)
dice = 2 * np.sum(prediction_tmp * label_tmp) / \
(np.sum(prediction_tmp) + np.sum(label_tmp))
total_dice[i - 1] += dice
return total_dice
def calculate_metric_percase(pred, gt):
dice = metric.binary.dc(pred, gt)
ravd = abs(metric.binary.ravd(pred, gt))
hd = metric.binary.hd95(pred, gt)
asd = metric.binary.asd(pred, gt)
return np.array([dice, ravd, hd, asd])
| 6,008 | 38.27451 | 169 | py |
SSL4MIS | SSL4MIS-master/code/networks/efficient_encoder.py | import re
from typing import List
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import get_model_params, url_map
from torchvision.models.densenet import DenseNet
from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet
class EncoderMixin:
"""Add encoder functionality such as:
- output channels specification of feature tensors (produced by encoder)
- patching first convolution for arbitrary input channels
"""
@property
def out_channels(self) -> List:
"""Return channels dimensions for each tensor of forward output of encoder"""
return self._out_channels[: self._depth + 1]
def set_in_channels(self, in_channels):
"""Change first convolution chennels"""
if in_channels == 3:
return
self._in_channels = in_channels
if self._out_channels[0] == 3:
self._out_channels = tuple([in_channels] + list(self._out_channels)[1:])
patch_first_conv(model=self, in_channels=in_channels)
def patch_first_conv(model, in_channels):
"""Change first convolution layer input channels.
In case:
in_channels == 1 or in_channels == 2 -> reuse original weights
in_channels > 3 -> make random kaiming normal initialization
"""
# get first conv
for module in model.modules():
if isinstance(module, nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
else:
reset = True
weight = torch.Tensor(
module.out_channels,
module.in_channels // module.groups,
*module.kernel_size
)
module.weight = nn.parameter.Parameter(weight)
if reset:
module.reset_parameters()
class EfficientNetEncoder(EfficientNet, EncoderMixin):
def __init__(self, stage_idxs, out_channels, model_name, depth=5):
blocks_args, global_params = get_model_params(model_name, override_params=None)
super().__init__(blocks_args, global_params)
self._stage_idxs = list(stage_idxs) + [len(self._blocks)]
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
del self._fc
def forward(self, x):
features = [x]
if self._depth > 0:
x = self._swish(self._bn0(self._conv_stem(x)))
features.append(x)
if self._depth > 1:
skip_connection_idx = 0
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if idx == self._stage_idxs[skip_connection_idx] - 1:
skip_connection_idx += 1
features.append(x)
if skip_connection_idx + 1 == self._depth:
break
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("_fc.bias")
state_dict.pop("_fc.weight")
super().load_state_dict(state_dict, **kwargs)
def _get_pretrained_settings(encoder):
pretrained_settings = {
"imagenet": {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
"url": url_map[encoder],
"input_space": "RGB",
"input_range": [0, 1],
}
}
return pretrained_settings
efficient_net_encoders = {
"efficientnet-b0": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b0"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (3, 5, 9),
"model_name": "efficientnet-b0",
},
},
"efficientnet-b1": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b1"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b1",
},
},
"efficientnet-b2": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b2"),
"params": {
"out_channels": (3, 32, 24, 48, 120, 352),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b2",
},
},
"efficientnet-b3": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b3"),
"params": {
"out_channels": (3, 40, 32, 48, 136, 384),
"stage_idxs": (5, 8, 18),
"model_name": "efficientnet-b3",
},
},
"efficientnet-b4": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b4"),
"params": {
"out_channels": (3, 48, 32, 56, 160, 448),
"stage_idxs": (6, 10, 22),
"model_name": "efficientnet-b4",
},
},
"efficientnet-b5": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b5"),
"params": {
"out_channels": (3, 48, 40, 64, 176, 512),
"stage_idxs": (8, 13, 27),
"model_name": "efficientnet-b5",
},
},
"efficientnet-b6": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b6"),
"params": {
"out_channels": (3, 56, 40, 72, 200, 576),
"stage_idxs": (9, 15, 31),
"model_name": "efficientnet-b6",
},
},
"efficientnet-b7": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b7"),
"params": {
"out_channels": (3, 64, 48, 80, 224, 640),
"stage_idxs": (11, 18, 38),
"model_name": "efficientnet-b7",
},
},
}
encoders = {}
encoders.update(efficient_net_encoders)
def get_encoder(name, in_channels=3, depth=5, weights=None):
Encoder = encoders[name]["encoder"]
params = encoders[name]["params"]
params.update(depth=depth)
encoder = Encoder(**params)
if weights is not None:
settings = encoders[name]["pretrained_settings"][weights]
encoder.load_state_dict(model_zoo.load_url(settings["url"]))
encoder.set_in_channels(in_channels)
return encoder
# class ResNetEncoder(ResNet, EncoderMixin):
# def __init__(self, out_channels, depth=5, **kwargs):
# super().__init__(**kwargs)
# self._depth = depth
# self._out_channels = out_channels
# self._in_channels = 3
# del self.fc
# del self.avgpool
# def get_stages(self):
# return [
# nn.Identity(),
# nn.Sequential(self.conv1, self.bn1, self.relu),
# nn.Sequential(self.maxpool, self.layer1),
# self.layer2,
# self.layer3,
# self.layer4,
# ]
# def forward(self, x):
# stages = self.get_stages()
# features = []
# for i in range(self._depth + 1):
# x = stages[i](x)
# features.append(x)
# return features
# def load_state_dict(self, state_dict, **kwargs):
# state_dict.pop("fc.bias")
# state_dict.pop("fc.weight")
# super().load_state_dict(state_dict, **kwargs)
# resnet_encoders = {
# "resnet18": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet18"],
# "params": {
# "out_channels": (3, 64, 64, 128, 256, 512),
# "block": BasicBlock,
# "layers": [2, 2, 2, 2],
# },
# },
# "resnet34": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet34"],
# "params": {
# "out_channels": (3, 64, 64, 128, 256, 512),
# "block": BasicBlock,
# "layers": [3, 4, 6, 3],
# },
# },
# "resnet50": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet50"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 6, 3],
# },
# },
# "resnet101": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet101"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# },
# },
# "resnet152": {
# "encoder": ResNetEncoder,
# "pretrained_settings": pretrained_settings["resnet152"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 8, 36, 3],
# },
# },
# "resnext50_32x4d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "imagenet": {
# "url": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 6, 3],
# "groups": 32,
# "width_per_group": 4,
# },
# },
# "resnext101_32x8d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "imagenet": {
# "url": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# },
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# },
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 8,
# },
# },
# "resnext101_32x16d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 16,
# },
# },
# "resnext101_32x32d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 32,
# },
# },
# "resnext101_32x48d": {
# "encoder": ResNetEncoder,
# "pretrained_settings": {
# "instagram": {
# "url": "https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth",
# "input_space": "RGB",
# "input_size": [3, 224, 224],
# "input_range": [0, 1],
# "mean": [0.485, 0.456, 0.406],
# "std": [0.229, 0.224, 0.225],
# "num_classes": 1000,
# }
# },
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 2048),
# "block": Bottleneck,
# "layers": [3, 4, 23, 3],
# "groups": 32,
# "width_per_group": 48,
# },
# },
# }
# class TransitionWithSkip(nn.Module):
# def __init__(self, module):
# super().__init__()
# self.module = module
# def forward(self, x):
# for module in self.module:
# x = module(x)
# if isinstance(module, nn.ReLU):
# skip = x
# return x, skip
# class DenseNetEncoder(DenseNet, EncoderMixin):
# def __init__(self, out_channels, depth=5, **kwargs):
# super().__init__(**kwargs)
# self._out_channels = out_channels
# self._depth = depth
# self._in_channels = 3
# del self.classifier
# def make_dilated(self, stage_list, dilation_list):
# raise ValueError("DenseNet encoders do not support dilated mode "
# "due to pooling operation for downsampling!")
# def get_stages(self):
# return [
# nn.Identity(),
# nn.Sequential(self.features.conv0, self.features.norm0, self.features.relu0),
# nn.Sequential(self.features.pool0, self.features.denseblock1,
# TransitionWithSkip(self.features.transition1)),
# nn.Sequential(self.features.denseblock2, TransitionWithSkip(self.features.transition2)),
# nn.Sequential(self.features.denseblock3, TransitionWithSkip(self.features.transition3)),
# nn.Sequential(self.features.denseblock4, self.features.norm5)
# ]
# def forward(self, x):
# stages = self.get_stages()
# features = []
# for i in range(self._depth + 1):
# x = stages[i](x)
# if isinstance(x, (list, tuple)):
# x, skip = x
# features.append(skip)
# else:
# features.append(x)
# return features
# def load_state_dict(self, state_dict):
# pattern = re.compile(
# r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
# )
# for key in list(state_dict.keys()):
# res = pattern.match(key)
# if res:
# new_key = res.group(1) + res.group(2)
# state_dict[new_key] = state_dict[key]
# del state_dict[key]
# # remove linear
# state_dict.pop("classifier.bias")
# state_dict.pop("classifier.weight")
# super().load_state_dict(state_dict)
# densenet_encoders = {
# "densenet121": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet121"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1024, 1024),
# "num_init_features": 64,
# "growth_rate": 32,
# "block_config": (6, 12, 24, 16),
# },
# },
# "densenet169": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet169"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1280, 1664),
# "num_init_features": 64,
# "growth_rate": 32,
# "block_config": (6, 12, 32, 32),
# },
# },
# "densenet201": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet201"],
# "params": {
# "out_channels": (3, 64, 256, 512, 1792, 1920),
# "num_init_features": 64,
# "growth_rate": 32,
# "block_config": (6, 12, 48, 32),
# },
# },
# "densenet161": {
# "encoder": DenseNetEncoder,
# "pretrained_settings": pretrained_settings["densenet161"],
# "params": {
# "out_channels": (3, 96, 384, 768, 2112, 2208),
# "num_init_features": 96,
# "growth_rate": 48,
# "block_config": (6, 12, 36, 24),
# },
# },
# }
# net = get_encoder(name="efficientnet-b4", in_channels=1, depth=5, weights="imagenet")
#
# t = torch.rand(2, 1, 480, 480)
#
# print(len(net(t)))
| 17,641 | 31.975701 | 110 | py |
SSL4MIS | SSL4MIS-master/code/networks/pnet.py |
# -*- coding: utf-8 -*-
"""
An PyTorch implementation of the DeepIGeoS paper:
Wang, Guotai and Zuluaga, Maria A and Li, Wenqi and Pratt, Rosalind and Patel, Premal A and Aertsen, Michael and Doel, Tom and David, Anna L and Deprest, Jan and Ourselin, S{\'e}bastien and others:
DeepIGeoS: a deep interactive geodesic framework for medical image segmentation.
TPAMI (7) 2018: 1559--1572
Note that there are some modifications from the original paper, such as
the use of leaky relu here.
"""
from __future__ import division, print_function
import torch
import torch.nn as nn
class PNetBlock(nn.Module):
def __init__(self, in_channels, out_channels, dilation, padding):
super(PNetBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.dilation = dilation
self.padding = padding
self.conv1 = nn.Conv2d(self.in_chns, self.out_chns, kernel_size=3,
padding=self.padding, dilation=self.dilation, groups=1, bias=True)
self.conv2 = nn.Conv2d(self.out_chns, self.out_chns, kernel_size=3,
padding=self.padding, dilation=self.dilation, groups=1, bias=True)
self.in1 = nn.BatchNorm2d(self.out_chns)
self.in2 = nn.BatchNorm2d(self.out_chns)
self.ac1 = nn.LeakyReLU()
self.ac2 = nn.LeakyReLU()
def forward(self, x):
x = self.conv1(x)
x = self.in1(x)
x = self.ac1(x)
x = self.conv2(x)
x = self.in2(x)
x = self.ac2(x)
return x
class ConcatBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConcatBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(
self.in_chns, self.in_chns, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(
self.in_chns, self.out_chns, kernel_size=1, padding=0)
self.ac1 = nn.LeakyReLU()
self.ac2 = nn.LeakyReLU()
def forward(self, x):
x = self.conv1(x)
x = self.ac1(x)
x = self.conv2(x)
x = self.ac2(x)
return x
class OutPutBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutPutBlock, self).__init__()
self.in_chns = in_channels
self.out_chns = out_channels
self.conv1 = nn.Conv2d(
self.in_chns, self.in_chns // 2, kernel_size=1, padding=0)
self.conv2 = nn.Conv2d(
self.in_chns // 2, self.out_chns, kernel_size=1, padding=0)
self.drop1 = nn.Dropout2d(0.3)
self.drop2 = nn.Dropout2d(0.3)
self.ac1 = nn.LeakyReLU()
def forward(self, x):
x = self.drop1(x)
x = self.conv1(x)
x = self.ac1(x)
x = self.drop2(x)
x = self.conv2(x)
return x
class PNet2D(nn.Module):
def __init__(self, in_chns, out_chns, num_filters, ratios):
super(PNet2D, self).__init__()
self.in_chns = in_chns
self.out_chns = out_chns
self.ratios = ratios
self.num_filters = num_filters
self.block1 = PNetBlock(
self.in_chns, self.num_filters, self.ratios[0], padding=self.ratios[0])
self.block2 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[1], padding=self.ratios[1])
self.block3 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[2], padding=self.ratios[2])
self.block4 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[3], padding=self.ratios[3])
self.block5 = PNetBlock(
self.num_filters, self.num_filters, self.ratios[4], padding=self.ratios[4])
self.catblock = ConcatBlock(self.num_filters * 5, self.num_filters * 2)
self.out = OutPutBlock(self.num_filters * 2, self.out_chns)
def forward(self, x):
x1 = self.block1(x)
x2 = self.block2(x1)
x3 = self.block3(x2)
x4 = self.block4(x3)
x5 = self.block5(x4)
conx = torch.cat([x1, x2, x3, x4, x5], dim=1)
conx = self.catblock(conx)
out = self.out(conx)
return out
| 4,200 | 33.154472 | 202 | py |
SSL4MIS | SSL4MIS-master/code/networks/grid_attention_layer.py | import torch
from torch import nn
from torch.nn import functional as F
from networks.networks_other import init_weights
class _GridAttentionBlockND(nn.Module):
def __init__(self, in_channels, gating_channels, inter_channels=None, dimension=3, mode='concatenation',
sub_sample_factor=(2,2,2)):
super(_GridAttentionBlockND, self).__init__()
assert dimension in [2, 3]
assert mode in ['concatenation', 'concatenation_debug', 'concatenation_residual']
# Downsampling rate for the input featuremap
if isinstance(sub_sample_factor, tuple): self.sub_sample_factor = sub_sample_factor
elif isinstance(sub_sample_factor, list): self.sub_sample_factor = tuple(sub_sample_factor)
else: self.sub_sample_factor = tuple([sub_sample_factor]) * dimension
# Default parameter set
self.mode = mode
self.dimension = dimension
self.sub_sample_kernel_size = self.sub_sample_factor
# Number of channels (pixel dimensions)
self.in_channels = in_channels
self.gating_channels = gating_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
bn = nn.BatchNorm3d
self.upsample_mode = 'trilinear'
elif dimension == 2:
conv_nd = nn.Conv2d
bn = nn.BatchNorm2d
self.upsample_mode = 'bilinear'
else:
raise NotImplemented
# Output transform
self.W = nn.Sequential(
conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0),
bn(self.in_channels),
)
# Theta^T * x_ij + Phi^T * gating_signal + bias
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)
self.phi = conv_nd(in_channels=self.gating_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0, bias=True)
self.psi = conv_nd(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
# Initialise weights
for m in self.children():
init_weights(m, init_type='kaiming')
# Define the operation
if mode == 'concatenation':
self.operation_function = self._concatenation
elif mode == 'concatenation_debug':
self.operation_function = self._concatenation_debug
elif mode == 'concatenation_residual':
self.operation_function = self._concatenation_residual
else:
raise NotImplementedError('Unknown operation function.')
def forward(self, x, g):
'''
:param x: (b, c, t, h, w)
:param g: (b, g_d)
:return:
'''
output = self.operation_function(x, g)
return output
def _concatenation(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.relu(theta_x + phi_g, inplace=True)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
sigm_psi_f = F.sigmoid(self.psi(f))
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
def _concatenation_debug(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.softplus(theta_x + phi_g)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
sigm_psi_f = F.sigmoid(self.psi(f))
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
def _concatenation_residual(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)
# phi => (b, g_d) -> (b, i_c)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')
# Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = F.relu(theta_x + phi_g, inplace=True)
# psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)
f = self.psi(f).view(batch_size, 1, -1)
sigm_psi_f = F.softmax(f, dim=2).view(batch_size, 1, *theta_x.size()[2:])
# upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class GridAttentionBlock2D(_GridAttentionBlockND):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(2,2,2)):
super(GridAttentionBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=2, mode=mode,
sub_sample_factor=sub_sample_factor,
)
class GridAttentionBlock3D(_GridAttentionBlockND):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(2,2,2)):
super(GridAttentionBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=3, mode=mode,
sub_sample_factor=sub_sample_factor,
)
class _GridAttentionBlockND_TORR(nn.Module):
def __init__(self, in_channels, gating_channels, inter_channels=None, dimension=3, mode='concatenation',
sub_sample_factor=(1,1,1), bn_layer=True, use_W=True, use_phi=True, use_theta=True, use_psi=True, nonlinearity1='relu'):
super(_GridAttentionBlockND_TORR, self).__init__()
assert dimension in [2, 3]
assert mode in ['concatenation', 'concatenation_softmax',
'concatenation_sigmoid', 'concatenation_mean',
'concatenation_range_normalise', 'concatenation_mean_flow']
# Default parameter set
self.mode = mode
self.dimension = dimension
self.sub_sample_factor = sub_sample_factor if isinstance(sub_sample_factor, tuple) else tuple([sub_sample_factor])*dimension
self.sub_sample_kernel_size = self.sub_sample_factor
# Number of channels (pixel dimensions)
self.in_channels = in_channels
self.gating_channels = gating_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
bn = nn.BatchNorm3d
self.upsample_mode = 'trilinear'
elif dimension == 2:
conv_nd = nn.Conv2d
bn = nn.BatchNorm2d
self.upsample_mode = 'bilinear'
else:
raise NotImplemented
# initialise id functions
# Theta^T * x_ij + Phi^T * gating_signal + bias
self.W = lambda x: x
self.theta = lambda x: x
self.psi = lambda x: x
self.phi = lambda x: x
self.nl1 = lambda x: x
if use_W:
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0),
bn(self.in_channels),
)
else:
self.W = conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0)
if use_theta:
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)
if use_phi:
self.phi = conv_nd(in_channels=self.gating_channels, out_channels=self.inter_channels,
kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)
if use_psi:
self.psi = conv_nd(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)
if nonlinearity1:
if nonlinearity1 == 'relu':
self.nl1 = lambda x: F.relu(x, inplace=True)
if 'concatenation' in mode:
self.operation_function = self._concatenation
else:
raise NotImplementedError('Unknown operation function.')
# Initialise weights
for m in self.children():
init_weights(m, init_type='kaiming')
if use_psi and self.mode == 'concatenation_sigmoid':
nn.init.constant(self.psi.bias.data, 3.0)
if use_psi and self.mode == 'concatenation_softmax':
nn.init.constant(self.psi.bias.data, 10.0)
# if use_psi and self.mode == 'concatenation_mean':
# nn.init.constant(self.psi.bias.data, 3.0)
# if use_psi and self.mode == 'concatenation_range_normalise':
# nn.init.constant(self.psi.bias.data, 3.0)
parallel = False
if parallel:
if use_W: self.W = nn.DataParallel(self.W)
if use_phi: self.phi = nn.DataParallel(self.phi)
if use_psi: self.psi = nn.DataParallel(self.psi)
if use_theta: self.theta = nn.DataParallel(self.theta)
def forward(self, x, g):
'''
:param x: (b, c, t, h, w)
:param g: (b, g_d)
:return:
'''
output = self.operation_function(x, g)
return output
def _concatenation(self, x, g):
input_size = x.size()
batch_size = input_size[0]
assert batch_size == g.size(0)
#############################
# compute compatibility score
# theta => (b, c, t, h, w) -> (b, i_c, t, h, w)
# phi => (b, c, t, h, w) -> (b, i_c, t, h, w)
theta_x = self.theta(x)
theta_x_size = theta_x.size()
# nl(theta.x + phi.g + bias) -> f = (b, i_c, t/s1, h/s2, w/s3)
phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)
f = theta_x + phi_g
f = self.nl1(f)
psi_f = self.psi(f)
############################################
# normalisation -- scale compatibility score
# psi^T . f -> (b, 1, t/s1, h/s2, w/s3)
if self.mode == 'concatenation_softmax':
sigm_psi_f = F.softmax(psi_f.view(batch_size, 1, -1), dim=2)
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_mean':
psi_f_flat = psi_f.view(batch_size, 1, -1)
psi_f_sum = torch.sum(psi_f_flat, dim=2)#clamp(1e-6)
psi_f_sum = psi_f_sum[:,:,None].expand_as(psi_f_flat)
sigm_psi_f = psi_f_flat / psi_f_sum
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_mean_flow':
psi_f_flat = psi_f.view(batch_size, 1, -1)
ss = psi_f_flat.shape
psi_f_min = psi_f_flat.min(dim=2)[0].view(ss[0],ss[1],1)
psi_f_flat = psi_f_flat - psi_f_min
psi_f_sum = torch.sum(psi_f_flat, dim=2).view(ss[0],ss[1],1).expand_as(psi_f_flat)
sigm_psi_f = psi_f_flat / psi_f_sum
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_range_normalise':
psi_f_flat = psi_f.view(batch_size, 1, -1)
ss = psi_f_flat.shape
psi_f_max = torch.max(psi_f_flat, dim=2)[0].view(ss[0], ss[1], 1)
psi_f_min = torch.min(psi_f_flat, dim=2)[0].view(ss[0], ss[1], 1)
sigm_psi_f = (psi_f_flat - psi_f_min) / (psi_f_max - psi_f_min).expand_as(psi_f_flat)
sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])
elif self.mode == 'concatenation_sigmoid':
sigm_psi_f = F.sigmoid(psi_f)
else:
raise NotImplementedError
# sigm_psi_f is attention map! upsample the attentions and multiply
sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)
y = sigm_psi_f.expand_as(x) * x
W_y = self.W(y)
return W_y, sigm_psi_f
class GridAttentionBlock2D_TORR(_GridAttentionBlockND_TORR):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(1,1), bn_layer=True,
use_W=True, use_phi=True, use_theta=True, use_psi=True,
nonlinearity1='relu'):
super(GridAttentionBlock2D_TORR, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=2, mode=mode,
sub_sample_factor=sub_sample_factor,
bn_layer=bn_layer,
use_W=use_W,
use_phi=use_phi,
use_theta=use_theta,
use_psi=use_psi,
nonlinearity1=nonlinearity1)
class GridAttentionBlock3D_TORR(_GridAttentionBlockND_TORR):
def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',
sub_sample_factor=(1,1,1), bn_layer=True):
super(GridAttentionBlock3D_TORR, self).__init__(in_channels,
inter_channels=inter_channels,
gating_channels=gating_channels,
dimension=3, mode=mode,
sub_sample_factor=sub_sample_factor,
bn_layer=bn_layer)
if __name__ == '__main__':
from torch.autograd import Variable
mode_list = ['concatenation']
for mode in mode_list:
img = Variable(torch.rand(2, 16, 10, 10, 10))
gat = Variable(torch.rand(2, 64, 4, 4, 4))
net = GridAttentionBlock3D(in_channels=16, inter_channels=16, gating_channels=64, mode=mode, sub_sample_factor=(2,2,2))
out, sigma = net(img, gat)
print(out.size())
| 16,619 | 40.446384 | 137 | py |
SSL4MIS | SSL4MIS-master/code/networks/attention_unet.py | import torch.nn as nn
import torch
from networks.utils import UnetConv3, UnetUp3_CT, UnetGridGatingSignal3, UnetDsv3
import torch.nn.functional as F
from networks.networks_other import init_weights
from networks.grid_attention_layer import GridAttentionBlock3D
class Attention_UNet(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3,
nonlocal_mode='concatenation', attention_dsample=(2,2,2), is_batchnorm=True):
super(Attention_UNet, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.gating = UnetGridGatingSignal3(filters[4], filters[4], kernel_size=(1, 1, 1), is_batchnorm=self.is_batchnorm)
# attention blocks
self.attentionblock2 = MultiAttentionBlock(in_size=filters[1], gate_size=filters[2], inter_size=filters[1],
nonlocal_mode=nonlocal_mode, sub_sample_factor= attention_dsample)
self.attentionblock3 = MultiAttentionBlock(in_size=filters[2], gate_size=filters[3], inter_size=filters[2],
nonlocal_mode=nonlocal_mode, sub_sample_factor= attention_dsample)
self.attentionblock4 = MultiAttentionBlock(in_size=filters[3], gate_size=filters[4], inter_size=filters[3],
nonlocal_mode=nonlocal_mode, sub_sample_factor= attention_dsample)
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
# deep supervision
self.dsv4 = UnetDsv3(in_size=filters[3], out_size=n_classes, scale_factor=8)
self.dsv3 = UnetDsv3(in_size=filters[2], out_size=n_classes, scale_factor=4)
self.dsv2 = UnetDsv3(in_size=filters[1], out_size=n_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(in_channels=filters[0], out_channels=n_classes, kernel_size=1)
# final conv (without any concat)
self.final = nn.Conv3d(n_classes*4, n_classes, 1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
# Feature Extraction
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
# Gating Signal Generation
center = self.center(maxpool4)
gating = self.gating(center)
# Attention Mechanism
# Upscaling Part (Decoder)
g_conv4, att4 = self.attentionblock4(conv4, gating)
up4 = self.up_concat4(g_conv4, center)
g_conv3, att3 = self.attentionblock3(conv3, up4)
up3 = self.up_concat3(g_conv3, up4)
g_conv2, att2 = self.attentionblock2(conv2, up3)
up2 = self.up_concat2(g_conv2, up3)
up1 = self.up_concat1(conv1, up2)
# Deep Supervision
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
final = self.final(torch.cat([dsv1,dsv2,dsv3,dsv4], dim=1))
return final
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
class MultiAttentionBlock(nn.Module):
def __init__(self, in_size, gate_size, inter_size, nonlocal_mode, sub_sample_factor):
super(MultiAttentionBlock, self).__init__()
self.gate_block_1 = GridAttentionBlock3D(in_channels=in_size, gating_channels=gate_size,
inter_channels=inter_size, mode=nonlocal_mode,
sub_sample_factor= sub_sample_factor)
self.gate_block_2 = GridAttentionBlock3D(in_channels=in_size, gating_channels=gate_size,
inter_channels=inter_size, mode=nonlocal_mode,
sub_sample_factor=sub_sample_factor)
self.combine_gates = nn.Sequential(nn.Conv3d(in_size*2, in_size, kernel_size=1, stride=1, padding=0),
nn.BatchNorm3d(in_size),
nn.ReLU(inplace=True)
)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('GridAttentionBlock3D') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, input, gating_signal):
gate_1, attention_1 = self.gate_block_1(input, gating_signal)
gate_2, attention_2 = self.gate_block_2(input, gating_signal)
return self.combine_gates(torch.cat([gate_1, gate_2], 1)), torch.cat([attention_1, attention_2], 1) | 6,336 | 45.595588 | 122 | py |
SSL4MIS | SSL4MIS-master/code/networks/discriminator.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class FC3DDiscriminator(nn.Module):
def __init__(self, num_classes, ndf=64, n_channel=1):
super(FC3DDiscriminator, self).__init__()
# downsample 16
self.conv0 = nn.Conv3d(
num_classes, ndf, kernel_size=4, stride=2, padding=1)
self.conv1 = nn.Conv3d(
n_channel, ndf, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv3d(ndf, ndf*2, kernel_size=4, stride=2, padding=1)
self.conv3 = nn.Conv3d(
ndf*2, ndf*4, kernel_size=4, stride=2, padding=1)
self.conv4 = nn.Conv3d(
ndf*4, ndf*8, kernel_size=4, stride=2, padding=1)
self.avgpool = nn.AvgPool3d((6, 6, 6)) # (D/16, W/16, H/16)
self.classifier = nn.Linear(ndf*8, 2)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.dropout = nn.Dropout3d(0.5)
self.Softmax = nn.Softmax()
def forward(self, map, image):
batch_size = map.shape[0]
map_feature = self.conv0(map)
image_feature = self.conv1(image)
x = torch.add(map_feature, image_feature)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv4(x)
x = self.leaky_relu(x)
x = self.avgpool(x)
x = x.view(batch_size, -1)
x = self.classifier(x)
x = x.reshape((batch_size, 2))
# x = self.Softmax(x)
return x
class FCDiscriminator(nn.Module):
def __init__(self, num_classes, ndf=64, n_channel=1):
super(FCDiscriminator, self).__init__()
self.conv0 = nn.Conv2d(
num_classes, ndf, kernel_size=4, stride=2, padding=1)
self.conv1 = nn.Conv2d(
n_channel, ndf, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1)
self.conv3 = nn.Conv2d(
ndf*2, ndf*4, kernel_size=4, stride=2, padding=1)
self.conv4 = nn.Conv2d(
ndf*4, ndf*8, kernel_size=4, stride=2, padding=1)
self.classifier = nn.Linear(ndf*32, 2)
self.avgpool = nn.AvgPool2d((7, 7))
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
self.dropout = nn.Dropout2d(0.5)
# self.up_sample = nn.Upsample(scale_factor=32, mode='bilinear')
# self.sigmoid = nn.Sigmoid()
def forward(self, map, feature):
map_feature = self.conv0(map)
image_feature = self.conv1(feature)
x = torch.add(map_feature, image_feature)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv3(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.conv4(x)
x = self.leaky_relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
# x = self.up_sample(x)
# x = self.sigmoid(x)
return x
| 3,133 | 30.029703 | 78 | py |
SSL4MIS | SSL4MIS-master/code/networks/encoder_tool.py | from typing import List
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import get_model_params, url_map
class EncoderMixin:
"""Add encoder functionality such as:
- output channels specification of feature tensors (produced by encoder)
- patching first convolution for arbitrary input channels
"""
@property
def out_channels(self) -> List:
"""Return channels dimensions for each tensor of forward output of encoder"""
return self._out_channels[: self._depth + 1]
def set_in_channels(self, in_channels):
"""Change first convolution chennels"""
if in_channels == 3:
return
self._in_channels = in_channels
if self._out_channels[0] == 3:
self._out_channels = tuple([in_channels] + list(self._out_channels)[1:])
patch_first_conv(model=self, in_channels=in_channels)
def patch_first_conv(model, in_channels):
"""Change first convolution layer input channels.
In case:
in_channels == 1 or in_channels == 2 -> reuse original weights
in_channels > 3 -> make random kaiming normal initialization
"""
# get first conv
for module in model.modules():
if isinstance(module, nn.Conv2d):
break
# change input channels for first conv
module.in_channels = in_channels
weight = module.weight.detach()
reset = False
if in_channels == 1:
weight = weight.sum(1, keepdim=True)
elif in_channels == 2:
weight = weight[:, :2] * (3.0 / 2.0)
else:
reset = True
weight = torch.Tensor(
module.out_channels,
module.in_channels // module.groups,
*module.kernel_size
)
module.weight = nn.parameter.Parameter(weight)
if reset:
module.reset_parameters()
class EfficientNetEncoder(EfficientNet, EncoderMixin):
def __init__(self, stage_idxs, out_channels, model_name, depth=5):
blocks_args, global_params = get_model_params(model_name, override_params=None)
super().__init__(blocks_args, global_params)
self._stage_idxs = list(stage_idxs) + [len(self._blocks)]
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
del self._fc
def forward(self, x):
features = [x]
if self._depth > 0:
x = self._swish(self._bn0(self._conv_stem(x)))
features.append(x)
if self._depth > 1:
skip_connection_idx = 0
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if idx == self._stage_idxs[skip_connection_idx] - 1:
skip_connection_idx += 1
features.append(x)
if skip_connection_idx + 1 == self._depth:
break
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("_fc.bias")
state_dict.pop("_fc.weight")
super().load_state_dict(state_dict, **kwargs)
def _get_pretrained_settings(encoder):
pretrained_settings = {
"imagenet": {
"mean": [0.485, 0.456, 0.406],
"std": [0.229, 0.224, 0.225],
"url": url_map[encoder],
"input_space": "RGB",
"input_range": [0, 1],
}
}
return pretrained_settings
efficient_net_encoders = {
"efficientnet-b0": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b0"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (3, 5, 9),
"model_name": "efficientnet-b0",
},
},
"efficientnet-b1": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b1"),
"params": {
"out_channels": (3, 32, 24, 40, 112, 320),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b1",
},
},
"efficientnet-b2": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b2"),
"params": {
"out_channels": (3, 32, 24, 48, 120, 352),
"stage_idxs": (5, 8, 16),
"model_name": "efficientnet-b2",
},
},
"efficientnet-b3": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b3"),
"params": {
"out_channels": (3, 40, 32, 48, 136, 384),
"stage_idxs": (5, 8, 18),
"model_name": "efficientnet-b3",
},
},
"efficientnet-b4": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b4"),
"params": {
"out_channels": (3, 48, 32, 56, 160, 448),
"stage_idxs": (6, 10, 22),
"model_name": "efficientnet-b4",
},
},
"efficientnet-b5": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b5"),
"params": {
"out_channels": (3, 48, 40, 64, 176, 512),
"stage_idxs": (8, 13, 27),
"model_name": "efficientnet-b5",
},
},
"efficientnet-b6": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b6"),
"params": {
"out_channels": (3, 56, 40, 72, 200, 576),
"stage_idxs": (9, 15, 31),
"model_name": "efficientnet-b6",
},
},
"efficientnet-b7": {
"encoder": EfficientNetEncoder,
"pretrained_settings": _get_pretrained_settings("efficientnet-b7"),
"params": {
"out_channels": (3, 64, 48, 80, 224, 640),
"stage_idxs": (11, 18, 38),
"model_name": "efficientnet-b7",
},
},
}
encoders = {}
encoders.update(efficient_net_encoders)
def get_encoder(name, in_channels=3, depth=5, weights=None):
Encoder = encoders[name]["encoder"]
params = encoders[name]["params"]
params.update(depth=depth)
encoder = Encoder(**params)
if weights is not None:
settings = encoders[name]["pretrained_settings"][weights]
encoder.load_state_dict(model_zoo.load_url(settings["url"]))
encoder.set_in_channels(in_channels)
return encoder
| 6,765 | 30.765258 | 87 | py |
SSL4MIS | SSL4MIS-master/code/networks/utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.networks_other import init_weights
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(conv2DBatchNorm, self).__init__()
self.cb_unit = nn.Sequential(nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs
class deconv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(deconv2DBatchNorm, self).__init__()
self.dcb_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),)
def forward(self, inputs):
outputs = self.dcb_unit(inputs)
return outputs
class conv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(conv2DBatchNormRelu, self).__init__()
self.cbr_unit = nn.Sequential(nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),
nn.ReLU(inplace=True),)
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class deconv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(deconv2DBatchNormRelu, self).__init__()
self.dcbr_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size,
padding=padding, stride=stride, bias=bias),
nn.BatchNorm2d(int(n_filters)),
nn.ReLU(inplace=True),)
def forward(self, inputs):
outputs = self.dcbr_unit(inputs)
return outputs
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d'%i, conv)
in_size = out_size
else:
for i in range(1, n+1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True),)
setattr(self, 'conv%d'%i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n+1):
conv = getattr(self, 'conv%d'%i)
x = conv(x)
return x
class UnetConv3(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, kernel_size=(3,3,1), padding_size=(1,1,0), init_stride=(1,1,1)):
super(UnetConv3, self).__init__()
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.ReLU(inplace=True),)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
return outputs
class FCNConv3(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, kernel_size=(3,3,1), padding_size=(1,1,0), init_stride=(1,1,1)):
super(FCNConv3, self).__init__()
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
self.conv3 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size),
nn.ReLU(inplace=True),)
self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.ReLU(inplace=True),)
self.conv3 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size),
nn.ReLU(inplace=True),)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
return outputs
class UnetGatingSignal3(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(UnetGatingSignal3, self).__init__()
self.fmap_size = (4, 4, 4)
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, in_size//2, (1,1,1), (1,1,1), (0,0,0)),
nn.InstanceNorm3d(in_size//2),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool3d(output_size=self.fmap_size),
)
self.fc1 = nn.Linear(in_features=(in_size//2) * self.fmap_size[0] * self.fmap_size[1] * self.fmap_size[2],
out_features=out_size, bias=True)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, in_size//2, (1,1,1), (1,1,1), (0,0,0)),
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool3d(output_size=self.fmap_size),
)
self.fc1 = nn.Linear(in_features=(in_size//2) * self.fmap_size[0] * self.fmap_size[1] * self.fmap_size[2],
out_features=out_size, bias=True)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
batch_size = inputs.size(0)
outputs = self.conv1(inputs)
outputs = outputs.view(batch_size, -1)
outputs = self.fc1(outputs)
return outputs
class UnetGridGatingSignal3(nn.Module):
def __init__(self, in_size, out_size, kernel_size=(1,1,1), is_batchnorm=True):
super(UnetGridGatingSignal3, self).__init__()
if is_batchnorm:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, (1,1,1), (0,0,0)),
nn.InstanceNorm3d(out_size),
nn.ReLU(inplace=True),
)
else:
self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, (1,1,1), (0,0,0)),
nn.ReLU(inplace=True),
)
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
outputs = self.conv1(inputs)
return outputs
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(unetUp, self).__init__()
self.conv = unetConv2(in_size, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2]
outputs1 = F.pad(inputs1, padding)
return self.conv(torch.cat([outputs1, outputs2], 1))
class UnetUp3(nn.Module):
def __init__(self, in_size, out_size, is_deconv, is_batchnorm=True):
super(UnetUp3, self).__init__()
if is_deconv:
self.conv = UnetConv3(in_size, out_size, is_batchnorm)
self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=(4,4,1), stride=(2,2,1), padding=(1,1,0))
else:
self.conv = UnetConv3(in_size+out_size, out_size, is_batchnorm)
self.up = nn.Upsample(scale_factor=(2, 2, 1), mode='trilinear')
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv3') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2, 0]
outputs1 = F.pad(inputs1, padding)
return self.conv(torch.cat([outputs1, outputs2], 1))
class UnetUp3_CT(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm=True):
super(UnetUp3_CT, self).__init__()
self.conv = UnetConv3(in_size + out_size, out_size, is_batchnorm, kernel_size=(3,3,3), padding_size=(1,1,1))
self.up = nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear')
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv3') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2, 0]
outputs1 = F.pad(inputs1, padding)
return self.conv(torch.cat([outputs1, outputs2], 1))
# Squeeze-and-Excitation Network
class SqEx(nn.Module):
def __init__(self, n_features, reduction=6):
super(SqEx, self).__init__()
if n_features % reduction != 0:
raise ValueError('n_features must be divisible by reduction (default = 4)')
self.linear1 = nn.Linear(n_features, n_features // reduction, bias=False)
self.nonlin1 = nn.ReLU(inplace=True)
self.linear2 = nn.Linear(n_features // reduction, n_features, bias=False)
self.nonlin2 = nn.Sigmoid()
def forward(self, x):
y = F.avg_pool3d(x, kernel_size=x.size()[2:5])
y = y.permute(0, 2, 3, 4, 1)
y = self.nonlin1(self.linear1(y))
y = self.nonlin2(self.linear2(y))
y = y.permute(0, 4, 1, 2, 3)
y = x * y
return y
class UnetUp3_SqEx(nn.Module):
def __init__(self, in_size, out_size, is_deconv, is_batchnorm):
super(UnetUp3_SqEx, self).__init__()
if is_deconv:
self.sqex = SqEx(n_features=in_size+out_size)
self.conv = UnetConv3(in_size, out_size, is_batchnorm)
self.up = nn.ConvTranspose3d(in_size, out_size, kernel_size=(4,4,1), stride=(2,2,1), padding=(1,1,0))
else:
self.sqex = SqEx(n_features=in_size+out_size)
self.conv = UnetConv3(in_size+out_size, out_size, is_batchnorm)
self.up = nn.Upsample(scale_factor=(2, 2, 1), mode='trilinear')
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('UnetConv3') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs1, inputs2):
outputs2 = self.up(inputs2)
offset = outputs2.size()[2] - inputs1.size()[2]
padding = 2 * [offset // 2, offset // 2, 0]
outputs1 = F.pad(inputs1, padding)
concat = torch.cat([outputs1, outputs2], 1)
gated = self.sqex(concat)
return self.conv(gated)
class residualBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, n_filters, stride=1, downsample=None):
super(residualBlock, self).__init__()
self.convbnrelu1 = conv2DBatchNormRelu(in_channels, n_filters, 3, stride, 1, bias=False)
self.convbn2 = conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, bias=False)
self.downsample = downsample
self.stride = stride
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.convbnrelu1(x)
out = self.convbn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class residualBottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, n_filters, stride=1, downsample=None):
super(residualBottleneck, self).__init__()
self.convbn1 = nn.Conv2DBatchNorm(in_channels, n_filters, k_size=1, bias=False)
self.convbn2 = nn.Conv2DBatchNorm(n_filters, n_filters, k_size=3, padding=1, stride=stride, bias=False)
self.convbn3 = nn.Conv2DBatchNorm(n_filters, n_filters * 4, k_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.convbn1(x)
out = self.convbn2(out)
out = self.convbn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SeqModelFeatureExtractor(nn.Module):
def __init__(self, submodule, extracted_layers):
super(SeqModelFeatureExtractor, self).__init__()
self.submodule = submodule
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.submodule._modules.items():
x = module(x)
if name in self.extracted_layers:
outputs += [x]
return outputs + [x]
class HookBasedFeatureExtractor(nn.Module):
def __init__(self, submodule, layername, upscale=False):
super(HookBasedFeatureExtractor, self).__init__()
self.submodule = submodule
self.submodule.eval()
self.layername = layername
self.outputs_size = None
self.outputs = None
self.inputs = None
self.inputs_size = None
self.upscale = upscale
def get_input_array(self, m, i, o):
if isinstance(i, tuple):
self.inputs = [i[index].data.clone() for index in range(len(i))]
self.inputs_size = [input.size() for input in self.inputs]
else:
self.inputs = i.data.clone()
self.inputs_size = self.input.size()
print('Input Array Size: ', self.inputs_size)
def get_output_array(self, m, i, o):
if isinstance(o, tuple):
self.outputs = [o[index].data.clone() for index in range(len(o))]
self.outputs_size = [output.size() for output in self.outputs]
else:
self.outputs = o.data.clone()
self.outputs_size = self.outputs.size()
print('Output Array Size: ', self.outputs_size)
def rescale_output_array(self, newsize):
us = nn.Upsample(size=newsize[2:], mode='bilinear')
if isinstance(self.outputs, list):
for index in range(len(self.outputs)): self.outputs[index] = us(self.outputs[index]).data()
else:
self.outputs = us(self.outputs).data()
def forward(self, x):
target_layer = self.submodule._modules.get(self.layername)
# Collect the output tensor
h_inp = target_layer.register_forward_hook(self.get_input_array)
h_out = target_layer.register_forward_hook(self.get_output_array)
self.submodule(x)
h_inp.remove()
h_out.remove()
# Rescale the feature-map if it's required
if self.upscale: self.rescale_output_array(x.size())
return self.inputs, self.outputs
class UnetDsv3(nn.Module):
def __init__(self, in_size, out_size, scale_factor):
super(UnetDsv3, self).__init__()
self.dsv = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size=1, stride=1, padding=0),
nn.Upsample(scale_factor=scale_factor, mode='trilinear'), )
def forward(self, input):
return self.dsv(input)
| 18,130 | 38.159827 | 120 | py |
SSL4MIS | SSL4MIS-master/code/networks/neural_network.py | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from torch.cuda.amp import autocast
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
from torch import nn
import torch
from scipy.ndimage.filters import gaussian_filter
from typing import Union, Tuple, List
class no_op(object):
def __enter__(self):
pass
def __exit__(self, *args):
pass
def maybe_to_torch(d):
if isinstance(d, list):
d = [maybe_to_torch(i) if not isinstance(
i, torch.Tensor) else i for i in d]
elif not isinstance(d, torch.Tensor):
d = torch.from_numpy(d).float()
return d
def to_cuda(data, non_blocking=True, gpu_id=0):
if isinstance(data, list):
data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data]
else:
data = data.cuda(gpu_id, non_blocking=non_blocking)
return data
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
super(NeuralNetwork, self).__init__()
# if we have 5 pooling then our patch size must be divisible by 2**5
# for example in a 2d network that does 5 pool in x and 6 pool
self.input_shape_must_be_divisible_by = None
# in y this would be (32, 64)
# we need to know this because we need to know if we are a 2d or a 3d netowrk
self.conv_op = None # nn.Conv2d or nn.Conv3d
# this tells us how many channely we have in the output. Important for preallocation in inference
self.num_classes = None # number of channels in the output
# depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions
# during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what
# to apply in inference. For the most part this will be softmax
self.inference_apply_nonlin = lambda x: x # softmax_helper
# This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the
# center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians
# can be expensive, so it makes sense to save and reuse them.
self._gaussian_3d = self._patch_size_for_gaussian_3d = None
self._gaussian_2d = self._patch_size_for_gaussian_2d = None
def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),
use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will
detect that automatically and run the appropriate code.
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:param mixed_precision: if True, will run inference in mixed precision with autocast()
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if verbose:
print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if self.conv_op == nn.Conv2d:
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.conv_op == nn.Conv3d:
if max(mirror_axes) > 2:
raise ValueError("mirror axes. duh")
if self.training:
print(
'WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv3d:
if use_sliding_window:
res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose)
else:
res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)
elif self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, False)
else:
res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, all_in_gpu, False)
else:
raise RuntimeError(
"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D
image with that (you dummy).
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if self.conv_op == nn.Conv3d:
raise RuntimeError(
"Cannot predict 2d if the network is 3d. Dummy.")
if verbose:
print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.training:
print(
'WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 3, "data must have shape (c,x,y)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, verbose)
else:
res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, verbose)
else:
raise RuntimeError(
"Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
@staticmethod
def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:
tmp = np.zeros(patch_size)
center_coords = [i // 2 for i in patch_size]
sigmas = [i * sigma_scale for i in patch_size]
tmp[tuple(center_coords)] = 1
gaussian_importance_map = gaussian_filter(
tmp, sigmas, 0, mode='constant', cval=0)
gaussian_importance_map = gaussian_importance_map / \
np.max(gaussian_importance_map) * 1
gaussian_importance_map = gaussian_importance_map.astype(np.float32)
# gaussian_importance_map cannot be 0, otherwise we may end up with nans!
gaussian_importance_map[gaussian_importance_map == 0] = np.min(
gaussian_importance_map[gaussian_importance_map != 0])
return gaussian_importance_map
@staticmethod
def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:
assert [i >= j for i, j in zip(
image_size, patch_size)], "image size must be as large or larger than patch_size"
assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'
# our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of
# 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46
target_step_sizes_in_voxels = [i * step_size for i in patch_size]
num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j,
k in zip(image_size, target_step_sizes_in_voxels, patch_size)]
steps = []
for dim in range(len(patch_size)):
# the highest step value for this dimension is
max_step_value = image_size[dim] - patch_size[dim]
if num_steps[dim] > 1:
actual_step_size = max_step_value / (num_steps[dim] - 1)
else:
# does not matter because there is only one step at 0
actual_step_size = 99999999999
steps_here = [int(np.round(actual_step_size * i))
for i in range(num_steps[dim])]
steps.append(steps_here)
return steps
def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
if verbose:
print("step_size:", step_size)
if verbose:
print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(
x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y, z
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(
patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_3d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):
if verbose:
print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(
patch_size, sigma_scale=1. / 8)
self._gaussian_3d = gaussian_importance_map
self._patch_size_for_gaussian_3d = patch_size
else:
if verbose:
print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_3d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(
data.shape[1:], device=self.get_device())
if verbose:
print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose:
print("moving data to GPU")
data = torch.from_numpy(data).cuda(
self.get_device(), non_blocking=True)
if verbose:
print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_3d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
for z in steps[2]:
lb_z = z
ub_z = z + patch_size[2]
predicted_patch = self._internal_maybe_mirror_and_pred_3D(
data[None, :, lb_x:ub_x, lb_y:ub_y,
lb_z:ub_z], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x,
lb_y:ub_y, lb_z:ub_z] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x,
lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(
class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose:
print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose:
print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_2D_2Dconv'
if verbose:
print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(
predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_3D_3Dconv'
if verbose:
print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(
predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 8
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))
result_torch += 1 / num_results * torch.flip(pred, (4,))
if m == 2 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3,))
if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3))
if m == 4 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2,))
if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 2))
if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(
self(torch.flip(x, (4, 3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
assert len(x.shape) == 4, 'x must be (b, c, x, y)'
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 4
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3, ))
if m == 2 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2, ))
if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
if verbose:
print("step_size:", step_size)
if verbose:
print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(
x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(
patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_2d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):
if verbose:
print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(
patch_size, sigma_scale=1. / 8)
self._gaussian_2d = gaussian_importance_map
self._patch_size_for_gaussian_2d = patch_size
else:
if verbose:
print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_2d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(
data.shape[1:], device=self.get_device())
if verbose:
print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose:
print("moving data to GPU")
data = torch.from_numpy(data).cuda(
self.get_device(), non_blocking=True)
if verbose:
print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_2d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros(
[self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
predicted_patch = self._internal_maybe_mirror_and_pred_2D(
data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x,
lb_y:ub_y] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(
class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose:
print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose:
print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(
x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pseudo3D_slices: int = 5, all_in_gpu: bool = False,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd"
extra_slices = (pseudo3D_slices - 1) // 2
shp_for_pad = np.array(x.shape)
shp_for_pad[1] = extra_slices
pad = np.zeros(shp_for_pad, dtype=np.float32)
data = np.concatenate((pad, x, pad), 1)
predicted_segmentation = []
softmax_pred = []
for s in range(extra_slices, data.shape[1] - extra_slices):
d = data[:, (s - extra_slices):(s + extra_slices + 1)]
d = d.reshape((-1, d.shape[-2], d.shape[-1]))
pred_seg, softmax_pres = \
self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,
regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), step_size: float = 0.5,
regions_class_order: tuple = None, use_gaussian: bool = False,
pad_border_mode: str = "edge", pad_kwargs: dict = None,
all_in_gpu: bool = False,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(
x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs, all_in_gpu, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
if __name__ == '__main__':
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (162, 529, 529), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (162, 529, 529), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (162, 529, 529), 0.1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (60, 448, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (60, 448, 224), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (30, 224, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(30, 224, 224), (30, 224, 224), 0.125))
print(SegmentationNetwork._compute_steps_for_sliding_window(
(123, 54, 123), (246, 162, 369), 0.25))
| 45,370 | 49.189159 | 137 | py |
SSL4MIS | SSL4MIS-master/code/networks/VoxResNet.py | # -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
class SEBlock(nn.Module):
def __init__(self, in_channels, r):
super(SEBlock, self).__init__()
redu_chns = int(in_channels / r)
self.se_layers = nn.Sequential(
nn.AdaptiveAvgPool3d(1),
nn.Conv3d(in_channels, redu_chns, kernel_size=1, padding=0),
nn.ReLU(),
nn.Conv3d(redu_chns, in_channels, kernel_size=1, padding=0),
nn.ReLU())
def forward(self, x):
f = self.se_layers(x)
return f * x + x
class VoxRex(nn.Module):
def __init__(self, in_channels):
super(VoxRex, self).__init__()
self.block = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, in_channels,
kernel_size=3, padding=1, bias=False),
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, in_channels,
kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
return self.block(x)+x
class ConvBlock(nn.Module):
"""two convolution layers with batch norm and leaky relu"""
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv_conv = nn.Sequential(
nn.InstanceNorm3d(in_channels),
nn.ReLU(inplace=True),
nn.Conv3d(in_channels, out_channels,
kernel_size=3, padding=1, bias=False),
nn.InstanceNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels, out_channels,
kernel_size=3, padding=1, bias=False)
)
def forward(self, x):
return self.conv_conv(x)
class UpBlock(nn.Module):
"""Upssampling followed by ConvBlock"""
def __init__(self, in_channels, out_channels):
super(UpBlock, self).__init__()
self.up = nn.Upsample(
scale_factor=2, mode='trilinear', align_corners=True)
self.conv = ConvBlock(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class VoxResNet(nn.Module):
def __init__(self, in_chns=1, feature_chns=64, class_num=2):
super(VoxResNet, self).__init__()
self.in_chns = in_chns
self.ft_chns = feature_chns
self.n_class = class_num
self.conv1 = nn.Conv3d(in_chns, feature_chns, kernel_size=3, padding=1)
self.res1 = VoxRex(feature_chns)
self.res2 = VoxRex(feature_chns)
self.res3 = VoxRex(feature_chns)
self.res4 = VoxRex(feature_chns)
self.res5 = VoxRex(feature_chns)
self.res6 = VoxRex(feature_chns)
self.up1 = UpBlock(feature_chns * 2, feature_chns)
self.up2 = UpBlock(feature_chns * 2, feature_chns)
self.out = nn.Conv3d(feature_chns, self.n_class, kernel_size=1)
self.maxpool = nn.MaxPool3d(2)
self.upsample = nn.Upsample(
scale_factor=2, mode='trilinear', align_corners=True)
def forward(self, x):
x = self.maxpool(self.conv1(x))
x1 = self.res1(x)
x2 = self.res2(x1)
x2_pool = self.maxpool(x2)
x3 = self.res3(x2_pool)
x4 = self.maxpool(self.res4(x3))
x5 = self.res5(x4)
x6 = self.res6(x5)
up1 = self.up1(x6, x2_pool)
up2 = self.up2(up1, x)
up = self.upsample(up2)
out = self.out(up)
return out
| 3,637 | 30.094017 | 79 | py |
SSL4MIS | SSL4MIS-master/code/networks/vision_transformer.py | # coding=utf-8
# This file borrowed from Swin-UNet: https://github.com/HuCaoFighting/Swin-Unet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
from networks.swin_transformer_unet_skip_expand_decoder_sys import SwinTransformerSys
logger = logging.getLogger(__name__)
class SwinUnet(nn.Module):
def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False):
super(SwinUnet, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.config = config
self.swin_unet = SwinTransformerSys(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=self.num_classes,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
def forward(self, x):
if x.size()[1] == 1:
x = x.repeat(1,3,1,1)
logits = self.swin_unet(x)
return logits
def load_from(self, config):
pretrained_path = config.MODEL.PRETRAIN_CKPT
if pretrained_path is not None:
print("pretrained_path:{}".format(pretrained_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pretrained_dict = torch.load(pretrained_path, map_location=device)
if "model" not in pretrained_dict:
print("---start load pretrained modle by splitting---")
pretrained_dict = {k[17:]:v for k,v in pretrained_dict.items()}
for k in list(pretrained_dict.keys()):
if "output" in k:
print("delete key:{}".format(k))
del pretrained_dict[k]
msg = self.swin_unet.load_state_dict(pretrained_dict,strict=False)
# print(msg)
return
pretrained_dict = pretrained_dict['model']
print("---start load pretrained modle of swin encoder---")
model_dict = self.swin_unet.state_dict()
full_dict = copy.deepcopy(pretrained_dict)
for k, v in pretrained_dict.items():
if "layers." in k:
current_layer_num = 3-int(k[7:8])
current_k = "layers_up." + str(current_layer_num) + k[8:]
full_dict.update({current_k:v})
for k in list(full_dict.keys()):
if k in model_dict:
if full_dict[k].shape != model_dict[k].shape:
print("delete:{};shape pretrain:{};shape model:{}".format(k,v.shape,model_dict[k].shape))
del full_dict[k]
msg = self.swin_unet.load_state_dict(full_dict, strict=False)
# print(msg)
else:
print("none pretrain")
| 3,981 | 43.244444 | 113 | py |
SSL4MIS | SSL4MIS-master/code/networks/swin_transformer_unet_skip_expand_decoder_sys.py | # This file borrowed from Swin-UNet: https://github.com/HuCaoFighting/Swin-Unet
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size,
W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous(
).view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size,
window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - \
coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(
1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - \
1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index",
relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C //
self.num_heads).permute(2, 0, 3, 1, 4)
# make torchscript happy (cannot use tensor as tuple)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(
drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1,
self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(
attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
# nW*B, window_size, window_size, C
x_windows = window_partition(shifted_x, self.window_size)
# nW*B, window_size*window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
# W-MSA/SW-MSA
# nW*B, window_size*window_size, C
attn_windows = self.attn(x_windows, mask=self.attn_mask)
# merge windows
attn_windows = attn_windows.view(-1,
self.window_size, self.window_size, C)
shifted_x = window_reverse(
attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(
self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class PatchExpand(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.expand = nn.Linear(
dim, 2*dim, bias=False) if dim_scale == 2 else nn.Identity()
self.norm = norm_layer(dim // dim_scale)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c',
p1=2, p2=2, c=C//4)
x = x.view(B, -1, C//4)
x = self.norm(x)
return x
class FinalPatchExpand_X4(nn.Module):
def __init__(self, input_resolution, dim, dim_scale=4, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.dim_scale = dim_scale
self.expand = nn.Linear(dim, 16*dim, bias=False)
self.output_dim = dim
self.norm = norm_layer(self.output_dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
x = self.expand(x)
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c',
p1=self.dim_scale, p2=self.dim_scale, c=C//(self.dim_scale**2))
x = x.view(B, -1, self.output_dim)
x = self.norm(x)
return x
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (
i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(
drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(
input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class BasicLayer_up(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, upsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (
i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(
drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if upsample is not None:
self.upsample = PatchExpand(
input_resolution, dim=dim, dim_scale=2, norm_layer=norm_layer)
else:
self.upsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.upsample is not None:
x = self.upsample(x)
return x
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] //
patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim,
kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * \
(self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformerSys(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 2, 2], depths_decoder=[1, 2, 2, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, final_upsample="expand_first", **kwargs):
super().__init__()
print("SwinTransformerSys expand initial----depths:{};depths_decoder:{};drop_path_rate:{};num_classes:{}".format(depths,
depths_decoder, drop_path_rate, num_classes))
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.num_features_up = int(embed_dim * 2)
self.mlp_ratio = mlp_ratio
self.final_upsample = final_upsample
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
sum(depths))] # stochastic depth decay rule
# build encoder and bottleneck layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(
depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (
i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
# build decoder layers
self.layers_up = nn.ModuleList()
self.concat_back_dim = nn.ModuleList()
for i_layer in range(self.num_layers):
concat_linear = nn.Linear(2*int(embed_dim*2**(self.num_layers-1-i_layer)),
int(embed_dim*2**(self.num_layers-1-i_layer))) if i_layer > 0 else nn.Identity()
if i_layer == 0:
layer_up = PatchExpand(input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))), dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)), dim_scale=2, norm_layer=norm_layer)
else:
layer_up = BasicLayer_up(dim=int(embed_dim * 2 ** (self.num_layers-1-i_layer)),
input_resolution=(patches_resolution[0] // (2 ** (self.num_layers-1-i_layer)),
patches_resolution[1] // (2 ** (self.num_layers-1-i_layer))),
depth=depths[(
self.num_layers-1-i_layer)],
num_heads=num_heads[(
self.num_layers-1-i_layer)],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:(
self.num_layers-1-i_layer)]):sum(depths[:(self.num_layers-1-i_layer) + 1])],
norm_layer=norm_layer,
upsample=PatchExpand if (
i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers_up.append(layer_up)
self.concat_back_dim.append(concat_linear)
self.norm = norm_layer(self.num_features)
self.norm_up = norm_layer(self.embed_dim)
if self.final_upsample == "expand_first":
print("---final upsample expand_first---")
self.up = FinalPatchExpand_X4(input_resolution=(
img_size//patch_size, img_size//patch_size), dim_scale=4, dim=embed_dim)
self.output = nn.Conv2d(
in_channels=embed_dim, out_channels=self.num_classes, kernel_size=1, bias=False)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
#Encoder and Bottleneck
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
x_downsample = []
for layer in self.layers:
x_downsample.append(x)
x = layer(x)
x = self.norm(x) # B L C
return x, x_downsample
# Dencoder and Skip connection
def forward_up_features(self, x, x_downsample):
for inx, layer_up in enumerate(self.layers_up):
if inx == 0:
x = layer_up(x)
else:
x = torch.cat([x, x_downsample[3-inx]], -1)
x = self.concat_back_dim[inx](x)
x = layer_up(x)
x = self.norm_up(x) # B L C
return x
def up_x4(self, x):
H, W = self.patches_resolution
B, L, C = x.shape
assert L == H*W, "input features has wrong size"
if self.final_upsample == "expand_first":
x = self.up(x)
x = x.view(B, 4*H, 4*W, -1)
x = x.permute(0, 3, 1, 2) # B,C,H,W
x = self.output(x)
return x
def forward(self, x):
x, x_downsample = self.forward_features(x)
x = self.forward_up_features(x, x_downsample)
x = self.up_x4(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * \
self.patches_resolution[0] * \
self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| 33,208 | 40.253416 | 209 | py |
SSL4MIS | SSL4MIS-master/code/networks/unet.py | # -*- coding: utf-8 -*-
"""
The implementation is borrowed from: https://github.com/HiLab-git/PyMIC
"""
from __future__ import division, print_function
import numpy as np
import torch
import torch.nn as nn
from torch.distributions.uniform import Uniform
def kaiming_normal_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
def sparse_init_weight(model):
for m in model.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.sparse_(m.weight, sparsity=0.1)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return model
class ConvBlock(nn.Module):
"""two convolution layers with batch norm and leaky relu"""
def __init__(self, in_channels, out_channels, dropout_p):
super(ConvBlock, self).__init__()
self.conv_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(),
nn.Dropout(dropout_p),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU()
)
def forward(self, x):
return self.conv_conv(x)
class DownBlock(nn.Module):
"""Downsampling followed by ConvBlock"""
def __init__(self, in_channels, out_channels, dropout_p):
super(DownBlock, self).__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
ConvBlock(in_channels, out_channels, dropout_p)
)
def forward(self, x):
return self.maxpool_conv(x)
class UpBlock(nn.Module):
"""Upssampling followed by ConvBlock"""
def __init__(self, in_channels1, in_channels2, out_channels, dropout_p,
bilinear=True):
super(UpBlock, self).__init__()
self.bilinear = bilinear
if bilinear:
self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1)
self.up = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(
in_channels1, in_channels2, kernel_size=2, stride=2)
self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p)
def forward(self, x1, x2):
if self.bilinear:
x1 = self.conv1x1(x1)
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class Encoder(nn.Module):
def __init__(self, params):
super(Encoder, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
self.dropout = self.params['dropout']
assert (len(self.ft_chns) == 5)
self.in_conv = ConvBlock(
self.in_chns, self.ft_chns[0], self.dropout[0])
self.down1 = DownBlock(
self.ft_chns[0], self.ft_chns[1], self.dropout[1])
self.down2 = DownBlock(
self.ft_chns[1], self.ft_chns[2], self.dropout[2])
self.down3 = DownBlock(
self.ft_chns[2], self.ft_chns[3], self.dropout[3])
self.down4 = DownBlock(
self.ft_chns[3], self.ft_chns[4], self.dropout[4])
def forward(self, x):
x0 = self.in_conv(x)
x1 = self.down1(x0)
x2 = self.down2(x1)
x3 = self.down3(x2)
x4 = self.down4(x3)
return [x0, x1, x2, x3, x4]
class Decoder(nn.Module):
def __init__(self, params):
super(Decoder, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
assert (len(self.ft_chns) == 5)
self.up1 = UpBlock(
self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)
self.up2 = UpBlock(
self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)
self.up3 = UpBlock(
self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)
self.up4 = UpBlock(
self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size=3, padding=1)
def forward(self, feature):
x0 = feature[0]
x1 = feature[1]
x2 = feature[2]
x3 = feature[3]
x4 = feature[4]
x = self.up1(x4, x3)
x = self.up2(x, x2)
x = self.up3(x, x1)
x = self.up4(x, x0)
output = self.out_conv(x)
return output
class Decoder_DS(nn.Module):
def __init__(self, params):
super(Decoder_DS, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
assert (len(self.ft_chns) == 5)
self.up1 = UpBlock(
self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)
self.up2 = UpBlock(
self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)
self.up3 = UpBlock(
self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)
self.up4 = UpBlock(
self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp1 = nn.Conv2d(self.ft_chns[1], self.n_class,
kernel_size=3, padding=1)
def forward(self, feature, shape):
x0 = feature[0]
x1 = feature[1]
x2 = feature[2]
x3 = feature[3]
x4 = feature[4]
x = self.up1(x4, x3)
dp3_out_seg = self.out_conv_dp3(x)
dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape)
x = self.up2(x, x2)
dp2_out_seg = self.out_conv_dp2(x)
dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape)
x = self.up3(x, x1)
dp1_out_seg = self.out_conv_dp1(x)
dp1_out_seg = torch.nn.functional.interpolate(dp1_out_seg, shape)
x = self.up4(x, x0)
dp0_out_seg = self.out_conv(x)
return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg
class Decoder_URPC(nn.Module):
def __init__(self, params):
super(Decoder_URPC, self).__init__()
self.params = params
self.in_chns = self.params['in_chns']
self.ft_chns = self.params['feature_chns']
self.n_class = self.params['class_num']
self.bilinear = self.params['bilinear']
assert (len(self.ft_chns) == 5)
self.up1 = UpBlock(
self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)
self.up2 = UpBlock(
self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)
self.up3 = UpBlock(
self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)
self.up4 = UpBlock(
self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)
self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp4 = nn.Conv2d(self.ft_chns[4], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp3 = nn.Conv2d(self.ft_chns[3], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp2 = nn.Conv2d(self.ft_chns[2], self.n_class,
kernel_size=3, padding=1)
self.out_conv_dp1 = nn.Conv2d(self.ft_chns[1], self.n_class,
kernel_size=3, padding=1)
self.feature_noise = FeatureNoise()
def forward(self, feature, shape):
x0 = feature[0]
x1 = feature[1]
x2 = feature[2]
x3 = feature[3]
x4 = feature[4]
x = self.up1(x4, x3)
if self.training:
dp3_out_seg = self.out_conv_dp3(Dropout(x, p=0.5))
else:
dp3_out_seg = self.out_conv_dp3(x)
dp3_out_seg = torch.nn.functional.interpolate(dp3_out_seg, shape)
x = self.up2(x, x2)
if self.training:
dp2_out_seg = self.out_conv_dp2(FeatureDropout(x))
else:
dp2_out_seg = self.out_conv_dp2(x)
dp2_out_seg = torch.nn.functional.interpolate(dp2_out_seg, shape)
x = self.up3(x, x1)
if self.training:
dp1_out_seg = self.out_conv_dp1(self.feature_noise(x))
else:
dp1_out_seg = self.out_conv_dp1(x)
dp1_out_seg = torch.nn.functional.interpolate(dp1_out_seg, shape)
x = self.up4(x, x0)
dp0_out_seg = self.out_conv(x)
return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg
def Dropout(x, p=0.3):
x = torch.nn.functional.dropout(x, p)
return x
def FeatureDropout(x):
attention = torch.mean(x, dim=1, keepdim=True)
max_val, _ = torch.max(attention.view(
x.size(0), -1), dim=1, keepdim=True)
threshold = max_val * np.random.uniform(0.7, 0.9)
threshold = threshold.view(x.size(0), 1, 1, 1).expand_as(attention)
drop_mask = (attention < threshold).float()
x = x.mul(drop_mask)
return x
class FeatureNoise(nn.Module):
def __init__(self, uniform_range=0.3):
super(FeatureNoise, self).__init__()
self.uni_dist = Uniform(-uniform_range, uniform_range)
def feature_based_noise(self, x):
noise_vector = self.uni_dist.sample(
x.shape[1:]).to(x.device).unsqueeze(0)
x_noise = x.mul(noise_vector) + x
return x_noise
def forward(self, x):
x = self.feature_based_noise(x)
return x
class UNet(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.decoder = Decoder(params)
def forward(self, x):
feature = self.encoder(x)
output = self.decoder(feature)
return output
class UNet_CCT(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet_CCT, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.main_decoder = Decoder(params)
self.aux_decoder1 = Decoder(params)
self.aux_decoder2 = Decoder(params)
self.aux_decoder3 = Decoder(params)
def forward(self, x):
feature = self.encoder(x)
main_seg = self.main_decoder(feature)
aux1_feature = [FeatureNoise()(i) for i in feature]
aux_seg1 = self.aux_decoder1(aux1_feature)
aux2_feature = [Dropout(i) for i in feature]
aux_seg2 = self.aux_decoder2(aux2_feature)
aux3_feature = [FeatureDropout(i) for i in feature]
aux_seg3 = self.aux_decoder3(aux3_feature)
return main_seg, aux_seg1, aux_seg2, aux_seg3
class UNet_URPC(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet_URPC, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.decoder = Decoder_URPC(params)
def forward(self, x):
shape = x.shape[2:]
feature = self.encoder(x)
dp1_out_seg, dp2_out_seg, dp3_out_seg, dp4_out_seg = self.decoder(
feature, shape)
return dp1_out_seg, dp2_out_seg, dp3_out_seg, dp4_out_seg
class UNet_DS(nn.Module):
def __init__(self, in_chns, class_num):
super(UNet_DS, self).__init__()
params = {'in_chns': in_chns,
'feature_chns': [16, 32, 64, 128, 256],
'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],
'class_num': class_num,
'bilinear': False,
'acti_func': 'relu'}
self.encoder = Encoder(params)
self.decoder = Decoder_DS(params)
def forward(self, x):
shape = x.shape[2:]
feature = self.encoder(x)
dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg = self.decoder(
feature, shape)
return dp0_out_seg, dp1_out_seg, dp2_out_seg, dp3_out_seg
| 13,801 | 34.030457 | 79 | py |
SSL4MIS | SSL4MIS-master/code/networks/efficientunet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from networks.attention import *
from networks.efficient_encoder import get_encoder
def initialize_decoder(module):
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
encoder_channels = encoder_channels[1:] # remove first skip with same spatial resolution
encoder_channels = encoder_channels[::-1] # reverse channels to start from head of encoder
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(
head_channels, head_channels, use_batchnorm=use_batchnorm
)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
class Effi_UNet(nn.Module):
"""Unet_ is a fully convolution neural network for image semantic segmentation
Args:
encoder_name: name of classification model (without last dense layers) used as feature
extractor to build segmentation model.
encoder_depth (int): number of stages used in decoder, larger depth - more features are generated.
e.g. for depth=3 encoder will generate list of features with following spatial shapes
[(H,W), (H/2, W/2), (H/4, W/4), (H/8, W/8)], so in general the deepest feature tensor will have
spatial resolution (H/(2^depth), W/(2^depth)]
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
decoder_channels: list of numbers of ``Conv2D`` layer filters in decoder blocks
decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used. If 'inplace' InplaceABN will be used, allows to decrease memory consumption.
One of [True, False, 'inplace']
decoder_attention_type: attention module used in decoder of the model
One of [``None``, ``scse``]
in_channels: number of input channels for model, default is 3.
classes: a number of classes for output (output shape - ``(batch, classes, h, w)``).
activation: activation function to apply after final convolution;
One of [``sigmoid``, ``softmax``, ``logsoftmax``, ``identity``, callable, None]
aux_params: if specified model will have additional classification auxiliary output
build on top of encoder, supported params:
- classes (int): number of classes
- pooling (str): one of 'max', 'avg'. Default is 'avg'.
- dropout (float): dropout factor in [0, 1)
- activation (str): activation function to apply "sigmoid"/"softmax" (could be None to return logits)
Returns:
``torch.nn.Module``: **Unet**
.. _Unet:
https://arxiv.org/pdf/1505.04597
"""
def __init__(
self,
encoder_name: str = "resnet34",
encoder_depth: int = 5,
encoder_weights: str = "imagenet",
decoder_use_batchnorm=True,
decoder_channels=(256, 128, 64, 32, 16),
decoder_attention_type=None,
in_channels: int = 3,
classes: int = 1):
super().__init__()
self.encoder = get_encoder(
encoder_name,
in_channels=in_channels,
depth=encoder_depth,
weights=encoder_weights,
)
self.decoder = UnetDecoder(
encoder_channels=self.encoder.out_channels,
decoder_channels=decoder_channels,
n_blocks=encoder_depth,
use_batchnorm=decoder_use_batchnorm,
center=True if encoder_name.startswith("vgg") else False,
attention_type=decoder_attention_type,
)
initialize_decoder(self.decoder)
self.classifier = nn.Conv2d(decoder_channels[-1], classes, 1)
def forward(self, x):
"""Sequentially pass `x` trough model`s encoder, decoder and heads"""
features = self.encoder(x)
decoder_output = self.decoder(*features)
output = self.classifier(decoder_output)
return output
# unet = UNet('efficientnet-b3', encoder_weights='imagenet', in_channels=1, classes=1, decoder_attention_type="scse")
# t = torch.rand(2, 1, 224, 224)
# print(unet)
# print(unet(t).shape)
| 7,930 | 34.725225 | 117 | py |
SSL4MIS | SSL4MIS-master/code/networks/nnunet.py | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import torch.nn.functional as F
from torch import nn
import torch
import numpy as np
from networks.neural_network import SegmentationNetwork
import torch.nn.functional
def softmax_helper(x): return F.softmax(x, 1)
class InitWeights_He(object):
def __init__(self, neg_slope=1e-2):
self.neg_slope = neg_slope
def __call__(self, module):
if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d):
module.weight = nn.init.kaiming_normal_(
module.weight, a=self.neg_slope)
if module.bias is not None:
module.bias = nn.init.constant_(module.bias, 0)
class ConvDropoutNormNonlin(nn.Module):
"""
fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.
"""
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1,
'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(
input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.lrelu(self.instnorm(x))
class ConvDropoutNonlinNorm(ConvDropoutNormNonlin):
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.instnorm(self.lrelu(x))
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
'''
stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers
:param input_feature_channels:
:param output_feature_channels:
:param num_convs:
:param dilation:
:param kernel_size:
:param padding:
:param dropout:
:param initial_stride:
:param conv_op:
:param norm_op:
:param dropout_op:
:param inplace:
:param neg_slope:
:param norm_affine:
:param conv_bias:
'''
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1,
'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([basic_block(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[basic_block(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def print_module_training_status(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
nn.BatchNorm1d):
print(str(module), module.training)
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class Generic_UNet(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D = 19739648
use_this_for_batch_size_computation_3D = 520000000 # 505789440
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None, basic_block=ConvDropoutNormNonlin,
seg_output_use_bias=False):
"""
basically more flexible than v1, architecture is the same
Does this look complicated? Nah bro. Functionality > usability
This does everything you need, including world peace.
Questions? -> f.isensee@dkfz.de
"""
super(Generic_UNet, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
else:
raise ValueError(
"unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.input_shape_must_be_divisible_by = np.prod(
pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])
if max_num_features is None:
if self.conv_op == nn.Conv3d:
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
# determine the first stride
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d - 1]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
# add convolutions
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(
np.round(output_features * feat_map_mul_on_downscale))
output_features = min(output_features, self.max_num_features)
# now the bottleneck.
# determine the first stride
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
# the output of the last conv must match the number of features from the skip connection if we are not using
# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
# done by the transposed conv
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))
# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
# now lets build the localization pathway
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[
-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_pool - 1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(
3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(
Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],
pool_op_kernel_sizes[-(u + 1)], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]
self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,
1, 1, 0, 1, 1, seg_output_use_bias))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(
np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_pool - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),
mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(
self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(
self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
if self.weightInitializer is not None:
self.apply(self.weightInitializer)
# self.apply(print_module_training_status)
def forward(self, x):
skips = []
seg_outputs = []
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(x)
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](x)
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](x)
seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))
if self._deep_supervision and self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in
zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
@staticmethod
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,
conv_per_stage=2):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param deep_supervision:
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:param num_modalities:
:param num_classes:
:param pool_op_kernel_sizes:
:return:
"""
if not isinstance(num_pool_per_axis, np.ndarray):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +
num_modalities * np.prod(map_size, dtype=np.int64) +
num_classes * np.prod(map_size, dtype=np.int64))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min(num_feat * 2, max_num_features)
# conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv
num_blocks = (conv_per_stage * 2 +
1) if p < (npool - 1) else conv_per_stage
tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat
if deep_supervision and p < (npool - 2):
tmp += np.prod(map_size, dtype=np.int64) * num_classes
# print(p, map_size, num_feat, tmp)
return tmp
default_dict = {
"base_num_features": 16,
"conv_per_stage": 2,
"initial_lr": 0.01,
"lr_scheduler": None,
"lr_scheduler_eps": 0.001,
"lr_scheduler_patience": 30,
"lr_threshold": 1e-06,
"max_num_epochs": 1000,
"net_conv_kernel_sizes": [[1, 3, 3], [1, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]],
"net_num_pool_op_kernel_sizes": [[1, 2, 2], [1, 2, 2], [2, 2, 2], [2, 2, 2], [1, 2, 2], [1, 2, 2]],
"net_pool_per_axis": [2, 6, 6],
"num_batches_per_epoch": 250,
"num_classes": 3,
"num_input_channels": 1,
"transpose_backward": [0, 1, 2],
"transpose_forward": [0, 1, 2],
}
def initialize_network(threeD=True, num_classes=2):
"""
This is specific to the U-Net and must be adapted for other network architectures
:return:
"""
# self.print_to_log_file(self.net_num_pool_op_kernel_sizes)
# self.print_to_log_file(self.net_conv_kernel_sizes)
if threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
default_dict["num_classes"] = num_classes
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
network = Generic_UNet(default_dict["num_input_channels"], default_dict["base_num_features"], default_dict["num_classes"], len(default_dict["net_num_pool_op_kernel_sizes"]),
default_dict["conv_per_stage"], 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(
1e-2),
default_dict["net_num_pool_op_kernel_sizes"], default_dict["net_conv_kernel_sizes"], False, True, True)
print("nnUNet have {} paramerters in total".format(
sum(x.numel() for x in network.parameters())))
return network.cuda()
# input = torch.FloatTensor(1, 1, 32, 192, 192)
# input_var = input.cuda()
# model = initialize_network(threeD=True)
# out = model(input_var)
# print(out.size()) | 23,919 | 43.71028 | 177 | py |
SSL4MIS | SSL4MIS-master/code/networks/networks_other.py | import functools
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import init
from torch.optim import lr_scheduler
###############################################################################
# Functions
###############################################################################
def weights_init_normal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
#print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
#print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def adjust_learning_rate(optimizer, lr):
"""Sets the learning rate to a fixed number"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_scheduler(optimizer, opt):
print('opt.lr_policy = [{}]'.format(opt.lr_policy))
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
elif opt.lr_policy == 'step2':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
print('schedular=plateau')
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, threshold=0.01, patience=5)
elif opt.lr_policy == 'plateau2':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'step_warmstart':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 100:
lr_l = 1
elif 100 <= epoch < 200:
lr_l = 0.1
elif 200 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step_warmstart2':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 50:
lr_l = 1
elif 50 <= epoch < 100:
lr_l = 0.1
elif 100 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
init_weights(netG, init_type=init_type)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def get_n_parameters(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
return num_params
def measure_fp_bp_time(model, x, y):
# synchronize gpu time and measure fp
torch.cuda.synchronize()
t0 = time.time()
y_pred = model(x)
torch.cuda.synchronize()
elapsed_fp = time.time() - t0
if isinstance(y_pred, tuple):
y_pred = sum(y_p.sum() for y_p in y_pred)
else:
y_pred = y_pred.sum()
# zero gradients, synchronize time and measure
model.zero_grad()
t0 = time.time()
#y_pred.backward(y)
y_pred.backward()
torch.cuda.synchronize()
elapsed_bp = time.time() - t0
return elapsed_fp, elapsed_bp
def benchmark_fp_bp_time(model, x, y, n_trial=1000):
# transfer the model on GPU
model.cuda()
# DRY RUNS
for i in range(10):
_, _ = measure_fp_bp_time(model, x, y)
print('DONE WITH DRY RUNS, NOW BENCHMARKING')
# START BENCHMARKING
t_forward = []
t_backward = []
print('trial: {}'.format(n_trial))
for i in range(n_trial):
t_fp, t_bp = measure_fp_bp_time(model, x, y)
t_forward.append(t_fp)
t_backward.append(t_bp)
# free memory
del model
return np.mean(t_forward), np.mean(t_backward)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
| 20,202 | 37.118868 | 151 | py |
SSL4MIS | SSL4MIS-master/code/networks/vnet.py | import torch
from torch import nn
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class VNet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False):
super(VNet, self).__init__()
self.has_dropout = has_dropout
self.block_one = ConvBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = ConvBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = ConvBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = ConvBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = ConvBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = ConvBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
return out
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if __name__ == '__main__':
# compute FLOPS & PARAMETERS
from thop import profile
from thop import clever_format
model = VNet(n_channels=1, n_classes=2)
input = torch.randn(4, 1, 112, 112, 80)
flops, params = profile(model, inputs=(input,))
print(flops, params)
macs, params = clever_format([flops, params], "%.3f")
print(macs, params)
print("VNet have {} paramerters in total".format(sum(x.numel() for x in model.parameters()))) | 9,541 | 35.984496 | 110 | py |
SSL4MIS | SSL4MIS-master/code/networks/attention.py | import torch.nn as nn
try:
from inplace_abn import InPlaceABN
except ImportError:
InPlaceABN = None
class Conv2dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
if use_batchnorm == "inplace" and InPlaceABN is None:
raise RuntimeError(
"In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. "
+ "To install see: https://github.com/mapillary/inplace_abn"
)
super().__init__()
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
if use_batchnorm == "inplace":
bn = InPlaceABN(out_channels, activation="leaky_relu", activation_param=0.0)
relu = nn.Identity()
elif use_batchnorm and use_batchnorm != "inplace":
bn = nn.BatchNorm2d(out_channels)
else:
bn = nn.Identity()
super(Conv2dReLU, self).__init__(conv, bn, relu)
class SCSEModule(nn.Module):
def __init__(self, in_channels, reduction=16):
super().__init__()
self.cSE = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // reduction, 1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // reduction, in_channels, 1),
nn.Sigmoid(),
)
self.sSE = nn.Sequential(nn.Conv2d(in_channels, 1, 1), nn.Sigmoid())
def forward(self, x):
return x * self.cSE(x) + x * self.sSE(x)
class Activation(nn.Module):
def __init__(self, name, **params):
super().__init__()
if name is None or name == 'identity':
self.activation = nn.Identity(**params)
elif name == 'sigmoid':
self.activation = nn.Sigmoid()
elif name == 'softmax2d':
self.activation = nn.Softmax(dim=1, **params)
elif name == 'softmax':
self.activation = nn.Softmax(**params)
elif name == 'logsoftmax':
self.activation = nn.LogSoftmax(**params)
elif callable(name):
self.activation = name(**params)
else:
raise ValueError('Activation should be callable/sigmoid/softmax/logsoftmax/None; got {}'.format(name))
def forward(self, x):
return self.activation(x)
class Attention(nn.Module):
def __init__(self, name, **params):
super().__init__()
if name is None:
self.attention = nn.Identity(**params)
elif name == 'scse':
self.attention = SCSEModule(**params)
else:
raise ValueError("Attention {} is not implemented".format(name))
def forward(self, x):
return self.attention(x)
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
| 3,104 | 26.972973 | 114 | py |
SSL4MIS | SSL4MIS-master/code/networks/enet.py | import torch.nn as nn
import torch
class InitialBlock(nn.Module):
"""The initial block is composed of two branches:
1. a main branch which performs a regular convolution with stride 2;
2. an extension branch which performs max-pooling.
Doing both operations in parallel and concatenating their results
allows for efficient downsampling and expansion. The main branch
outputs 13 feature maps while the extension branch outputs 3, for a
total of 16 feature maps after concatenation.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number output channels.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
bias=False,
relu=True):
super().__init__()
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - As stated above the number of output channels for this
# branch is the total minus 3, since the remaining channels come from
# the extension branch
self.main_branch = nn.Conv2d(
in_channels,
out_channels - in_channels,
kernel_size=3,
stride=2,
padding=1,
bias=bias)
# Extension branch
self.ext_branch = nn.MaxPool2d(3, stride=2, padding=1)
# Initialize batch normalization to be used after concatenation
self.batch_norm = nn.BatchNorm2d(out_channels)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
main = self.main_branch(x)
ext = self.ext_branch(x)
# Concatenate branches
out = torch.cat((main, ext), 1)
# Apply batch normalization
out = self.batch_norm(out)
return self.out_activation(out)
class RegularBottleneck(nn.Module):
"""Regular bottlenecks are the main building block of ENet.
Main branch:
1. Shortcut connection.
Extension branch:
1. 1x1 convolution which decreases the number of channels by
``internal_ratio``, also called a projection;
2. regular, dilated or asymmetric convolution;
3. 1x1 convolution which increases the number of channels back to
``channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- channels (int): the number of input and output channels.
- internal_ratio (int, optional): a scale factor applied to
``channels`` used to compute the number of
channels after the projection. eg. given ``channels`` equal to 128 and
internal_ratio equal to 2 the number of channels after the projection
is 64. Default: 4.
- kernel_size (int, optional): the kernel size of the filters used in
the convolution layer described above in item 2 of the extension
branch. Default: 3.
- padding (int, optional): zero-padding added to both sides of the
input. Default: 0.
- dilation (int, optional): spacing between kernel elements for the
convolution described in item 2 of the extension branch. Default: 1.
asymmetric (bool, optional): flags if the convolution described in
item 2 of the extension branch is asymmetric or not. Default: False.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
channels,
internal_ratio=4,
kernel_size=3,
padding=0,
dilation=1,
asymmetric=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}."
.format(channels, internal_ratio))
internal_channels = channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - shortcut connection
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution, and,
# finally, a regularizer (spatial dropout). Number of channels is constant.
# 1x1 projection convolution
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
channels,
internal_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# If the convolution is asymmetric we split the main convolution in
# two. Eg. for a 5x5 asymmetric convolution we have two convolution:
# the first is 5x1 and the second is 1x5.
if asymmetric:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(kernel_size, 1),
stride=1,
padding=(padding, 0),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation(),
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, padding),
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
else:
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
dilation=dilation,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after adding the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
main = x
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class DownsamplingBottleneck(nn.Module):
"""Downsampling bottlenecks further downsample the feature map size.
Main branch:
1. max pooling with stride 2; indices are saved to be used for
unpooling later.
Extension branch:
1. 2x2 convolution with stride 2 that decreases the number of channels
by ``internal_ratio``, also called a projection;
2. regular convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``channels``
used to compute the number of channels after the projection. eg. given
``channels`` equal to 128 and internal_ratio equal to 2 the number of
channels after the projection is 64. Default: 4.
- return_indices (bool, optional): if ``True``, will return the max
indices along with the outputs. Useful when unpooling later.
- dropout_prob (float, optional): probability of an element to be
zeroed. Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if
``True``. Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
return_indices=False,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Store parameters that are needed later
self.return_indices = return_indices
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_max1 = nn.MaxPool2d(
2,
stride=2,
return_indices=return_indices)
# Extension branch - 2x2 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 2x2 projection convolution with stride 2
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# Convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels,
internal_channels,
kernel_size=3,
stride=1,
padding=1,
bias=bias), nn.BatchNorm2d(internal_channels), activation())
# 1x1 expansion convolution
self.ext_conv3 = nn.Sequential(
nn.Conv2d(
internal_channels,
out_channels,
kernel_size=1,
stride=1,
bias=bias), nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x):
# Main branch shortcut
if self.return_indices:
main, max_indices = self.main_max1(x)
else:
main = self.main_max1(x)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_conv2(ext)
ext = self.ext_conv3(ext)
ext = self.ext_regul(ext)
# Main branch channel padding
n, ch_ext, h, w = ext.size()
ch_main = main.size()[1]
padding = torch.zeros(n, ch_ext - ch_main, h, w)
# Before concatenating, check if main is on the CPU or GPU and
# convert padding accordingly
if main.is_cuda:
padding = padding.cuda()
# Concatenate
main = torch.cat((main, padding), 1)
# Add main and extension branches
out = main + ext
return self.out_activation(out), max_indices
class UpsamplingBottleneck(nn.Module):
"""The upsampling bottlenecks upsample the feature map resolution using max
pooling indices stored from the corresponding downsampling bottleneck.
Main branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. max unpool layer using the max pool indices from the corresponding
downsampling max pool layer.
Extension branch:
1. 1x1 convolution with stride 1 that decreases the number of channels by
``internal_ratio``, also called a projection;
2. transposed convolution (by default, 3x3);
3. 1x1 convolution which increases the number of channels to
``out_channels``, also called an expansion;
4. dropout as a regularizer.
Keyword arguments:
- in_channels (int): the number of input channels.
- out_channels (int): the number of output channels.
- internal_ratio (int, optional): a scale factor applied to ``in_channels``
used to compute the number of channels after the projection. eg. given
``in_channels`` equal to 128 and ``internal_ratio`` equal to 2 the number
of channels after the projection is 64. Default: 4.
- dropout_prob (float, optional): probability of an element to be zeroed.
Default: 0 (no dropout).
- bias (bool, optional): Adds a learnable bias to the output if ``True``.
Default: False.
- relu (bool, optional): When ``True`` ReLU is used as the activation
function; otherwise, PReLU is used. Default: True.
"""
def __init__(self,
in_channels,
out_channels,
internal_ratio=4,
dropout_prob=0,
bias=False,
relu=True):
super().__init__()
# Check in the internal_scale parameter is within the expected range
# [1, channels]
if internal_ratio <= 1 or internal_ratio > in_channels:
raise RuntimeError("Value out of range. Expected value in the "
"interval [1, {0}], got internal_scale={1}. "
.format(in_channels, internal_ratio))
internal_channels = in_channels // internal_ratio
if relu:
activation = nn.ReLU
else:
activation = nn.PReLU
# Main branch - max pooling followed by feature map (channels) padding
self.main_conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels))
# Remember that the stride is the same as the kernel_size, just like
# the max pooling layers
self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2)
# Extension branch - 1x1 convolution, followed by a regular, dilated or
# asymmetric convolution, followed by another 1x1 convolution. Number
# of channels is doubled.
# 1x1 projection convolution with stride 1
self.ext_conv1 = nn.Sequential(
nn.Conv2d(
in_channels, internal_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(internal_channels), activation())
# Transposed convolution
self.ext_tconv1 = nn.ConvTranspose2d(
internal_channels,
internal_channels,
kernel_size=2,
stride=2,
bias=bias)
self.ext_tconv1_bnorm = nn.BatchNorm2d(internal_channels)
self.ext_tconv1_activation = activation()
# 1x1 expansion convolution
self.ext_conv2 = nn.Sequential(
nn.Conv2d(
internal_channels, out_channels, kernel_size=1, bias=bias),
nn.BatchNorm2d(out_channels), activation())
self.ext_regul = nn.Dropout2d(p=dropout_prob)
# PReLU layer to apply after concatenating the branches
self.out_activation = activation()
def forward(self, x, max_indices, output_size):
# Main branch shortcut
main = self.main_conv1(x)
main = self.main_unpool1(
main, max_indices, output_size=output_size)
# Extension branch
ext = self.ext_conv1(x)
ext = self.ext_tconv1(ext, output_size=output_size)
ext = self.ext_tconv1_bnorm(ext)
ext = self.ext_tconv1_activation(ext)
ext = self.ext_conv2(ext)
ext = self.ext_regul(ext)
# Add main and extension branches
out = main + ext
return self.out_activation(out)
class ENet(nn.Module):
"""Generate the ENet model.
Keyword arguments:
- num_classes (int): the number of classes to segment.
- encoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the encoder blocks/layers; otherwise, PReLU
is used. Default: False.
- decoder_relu (bool, optional): When ``True`` ReLU is used as the
activation function in the decoder blocks/layers; otherwise, PReLU
is used. Default: True.
"""
def __init__(self, in_channels, num_classes, encoder_relu=False, decoder_relu=True):
super().__init__()
self.initial_block = InitialBlock(in_channels, 16, relu=encoder_relu)
# Stage 1 - Encoder
self.downsample1_0 = DownsamplingBottleneck(
16,
64,
return_indices=True,
dropout_prob=0.01,
relu=encoder_relu)
self.regular1_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_3 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
self.regular1_4 = RegularBottleneck(
64, padding=1, dropout_prob=0.01, relu=encoder_relu)
# Stage 2 - Encoder
self.downsample2_0 = DownsamplingBottleneck(
64,
128,
return_indices=True,
dropout_prob=0.1,
relu=encoder_relu)
self.regular2_1 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_2 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_3 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_4 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular2_5 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated2_6 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric2_7 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated2_8 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 3 - Encoder
self.regular3_0 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_1 = RegularBottleneck(
128, dilation=2, padding=2, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_2 = RegularBottleneck(
128,
kernel_size=5,
padding=2,
asymmetric=True,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_3 = RegularBottleneck(
128, dilation=4, padding=4, dropout_prob=0.1, relu=encoder_relu)
self.regular3_4 = RegularBottleneck(
128, padding=1, dropout_prob=0.1, relu=encoder_relu)
self.dilated3_5 = RegularBottleneck(
128, dilation=8, padding=8, dropout_prob=0.1, relu=encoder_relu)
self.asymmetric3_6 = RegularBottleneck(
128,
kernel_size=5,
asymmetric=True,
padding=2,
dropout_prob=0.1,
relu=encoder_relu)
self.dilated3_7 = RegularBottleneck(
128, dilation=16, padding=16, dropout_prob=0.1, relu=encoder_relu)
# Stage 4 - Decoder
self.upsample4_0 = UpsamplingBottleneck(
128, 64, dropout_prob=0.1, relu=decoder_relu)
self.regular4_1 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.regular4_2 = RegularBottleneck(
64, padding=1, dropout_prob=0.1, relu=decoder_relu)
# Stage 5 - Decoder
self.upsample5_0 = UpsamplingBottleneck(
64, 16, dropout_prob=0.1, relu=decoder_relu)
self.regular5_1 = RegularBottleneck(
16, padding=1, dropout_prob=0.1, relu=decoder_relu)
self.transposed_conv = nn.ConvTranspose2d(
16,
num_classes,
kernel_size=3,
stride=2,
padding=1,
bias=False)
def forward(self, x):
# Initial block
input_size = x.size()
x = self.initial_block(x)
# Stage 1 - Encoder
stage1_input_size = x.size()
x, max_indices1_0 = self.downsample1_0(x)
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder
stage2_input_size = x.size()
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
x = self.regular3_0(x)
x = self.dilated3_1(x)
x = self.asymmetric3_2(x)
x = self.dilated3_3(x)
x = self.regular3_4(x)
x = self.dilated3_5(x)
x = self.asymmetric3_6(x)
x = self.dilated3_7(x)
# Stage 4 - Decoder
x = self.upsample4_0(x, max_indices2_0, output_size=stage2_input_size)
x = self.regular4_1(x)
x = self.regular4_2(x)
# Stage 5 - Decoder
x = self.upsample5_0(x, max_indices1_0, output_size=stage1_input_size)
x = self.regular5_1(x)
x = self.transposed_conv(x, output_size=input_size)
return x
| 22,927 | 36.281301 | 88 | py |
SSL4MIS | SSL4MIS-master/code/networks/unet_3D_dv_semi.py | """
This file is adapted from https://github.com/ozan-oktay/Attention-Gated-Networks
"""
import math
import torch
import torch.nn as nn
from networks.utils import UnetConv3, UnetUp3, UnetUp3_CT, UnetDsv3
import torch.nn.functional as F
from networks.networks_other import init_weights
class unet_3D_dv_semi(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(unet_3D_dv_semi, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
# deep supervision
self.dsv4 = UnetDsv3(
in_size=filters[3], out_size=n_classes, scale_factor=8)
self.dsv3 = UnetDsv3(
in_size=filters[2], out_size=n_classes, scale_factor=4)
self.dsv2 = UnetDsv3(
in_size=filters[1], out_size=n_classes, scale_factor=2)
self.dsv1 = nn.Conv3d(
in_channels=filters[0], out_channels=n_classes, kernel_size=1)
self.dropout1 = nn.Dropout3d(p=0.5)
self.dropout2 = nn.Dropout3d(p=0.3)
self.dropout3 = nn.Dropout3d(p=0.2)
self.dropout4 = nn.Dropout3d(p=0.1)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up4 = self.dropout1(up4)
up3 = self.up_concat3(conv3, up4)
up3 = self.dropout2(up3)
up2 = self.up_concat2(conv2, up3)
up2 = self.dropout3(up2)
up1 = self.up_concat1(conv1, up2)
up1 = self.dropout4(up1)
# Deep Supervision
dsv4 = self.dsv4(up4)
dsv3 = self.dsv3(up3)
dsv2 = self.dsv2(up2)
dsv1 = self.dsv1(up1)
return dsv1, dsv2, dsv3, dsv4
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
| 3,865 | 33.212389 | 104 | py |
SSL4MIS | SSL4MIS-master/code/networks/unet_3D.py | # -*- coding: utf-8 -*-
"""
An implementation of the 3D U-Net paper:
Özgün Çiçek, Ahmed Abdulkadir, Soeren S. Lienkamp, Thomas Brox, Olaf Ronneberger:
3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation.
MICCAI (2) 2016: 424-432
Note that there are some modifications from the original paper, such as
the use of batch normalization, dropout, and leaky relu here.
The implementation is borrowed from: https://github.com/ozan-oktay/Attention-Gated-Networks
"""
import math
import torch.nn as nn
import torch.nn.functional as F
from networks.networks_other import init_weights
from networks.utils import UnetConv3, UnetUp3, UnetUp3_CT
class unet_3D(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(unet_3D, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = UnetConv3(self.in_channels, filters[0], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool1 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv2 = UnetConv3(filters[0], filters[1], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool2 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv3 = UnetConv3(filters[1], filters[2], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool3 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.conv4 = UnetConv3(filters[2], filters[3], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
self.maxpool4 = nn.MaxPool3d(kernel_size=(2, 2, 2))
self.center = UnetConv3(filters[3], filters[4], self.is_batchnorm, kernel_size=(
3, 3, 3), padding_size=(1, 1, 1))
# upsampling
self.up_concat4 = UnetUp3_CT(filters[4], filters[3], is_batchnorm)
self.up_concat3 = UnetUp3_CT(filters[3], filters[2], is_batchnorm)
self.up_concat2 = UnetUp3_CT(filters[2], filters[1], is_batchnorm)
self.up_concat1 = UnetUp3_CT(filters[1], filters[0], is_batchnorm)
# final conv (without any concat)
self.final = nn.Conv3d(filters[0], n_classes, 1)
self.dropout1 = nn.Dropout(p=0.3)
self.dropout2 = nn.Dropout(p=0.3)
# initialise weights
for m in self.modules():
if isinstance(m, nn.Conv3d):
init_weights(m, init_type='kaiming')
elif isinstance(m, nn.BatchNorm3d):
init_weights(m, init_type='kaiming')
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
center = self.dropout1(center)
up4 = self.up_concat4(conv4, center)
up3 = self.up_concat3(conv3, up4)
up2 = self.up_concat2(conv2, up3)
up1 = self.up_concat1(conv1, up2)
up1 = self.dropout2(up1)
final = self.final(up1)
return final
@staticmethod
def apply_argmax_softmax(pred):
log_p = F.softmax(pred, dim=1)
return log_p
| 3,617 | 34.821782 | 104 | py |
SSL4MIS | SSL4MIS-master/code/augmentations/ctaugment.py | # https://raw.githubusercontent.com/google-research/fixmatch/master/libml/ctaugment.py
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control Theory based self-augmentation, modified from https://github.com/vfdev-5/FixMatch-pytorch"""
import random
import torch
from collections import namedtuple
import numpy as np
from scipy.ndimage.interpolation import zoom
from PIL import Image, ImageOps, ImageEnhance, ImageFilter
OPS = {}
OP = namedtuple("OP", ("f", "bins"))
Sample = namedtuple("Sample", ("train", "probe"))
def register(*bins):
def wrap(f):
OPS[f.__name__] = OP(f, bins)
return f
return wrap
class CTAugment(object):
def __init__(self, depth=2, th=0.85, decay=0.99):
self.decay = decay
self.depth = depth
self.th = th
self.rates = {}
for k, op in OPS.items():
self.rates[k] = tuple([np.ones(x, "f") for x in op.bins])
def rate_to_p(self, rate):
p = rate + (1 - self.decay) # Avoid to have all zero.
p = p / p.max()
p[p < self.th] = 0
return p
def policy(self, probe, weak):
num_strong_ops = 11
kl_weak = list(OPS.keys())[num_strong_ops:]
kl_strong = list(OPS.keys())[:num_strong_ops]
if weak:
kl = kl_weak
else:
kl = kl_strong
v = []
if probe:
for _ in range(self.depth):
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
v.append(OP(k, rnd.tolist()))
return v
for _ in range(self.depth):
vt = []
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
for r, bin in zip(rnd, bins):
p = self.rate_to_p(bin)
value = np.random.choice(p.shape[0], p=p / p.sum())
vt.append((value + r) / p.shape[0])
v.append(OP(k, vt))
return v
def update_rates(self, policy, proximity):
for k, bins in policy:
for p, rate in zip(bins, self.rates[k]):
p = int(p * len(rate) * 0.999)
rate[p] = rate[p] * self.decay + proximity * (1 - self.decay)
print(f"\t {k} weights updated")
def stats(self):
return "\n".join(
"%-16s %s"
% (
k,
" / ".join(
" ".join("%.2f" % x for x in self.rate_to_p(rate))
for rate in self.rates[k]
),
)
for k in sorted(OPS.keys())
)
def _enhance(x, op, level):
return op(x).enhance(0.1 + 1.9 * level)
def _imageop(x, op, level):
return Image.blend(x, op(x), level)
def _filter(x, op, level):
return Image.blend(x, x.filter(op), level)
@register(17)
def autocontrast(x, level):
return _imageop(x, ImageOps.autocontrast, level)
@register(17)
def brightness(x, brightness):
return _enhance(x, ImageEnhance.Brightness, brightness)
@register(17)
def color(x, color):
return _enhance(x, ImageEnhance.Color, color)
@register(17)
def contrast(x, contrast):
return _enhance(x, ImageEnhance.Contrast, contrast)
@register(17)
def equalize(x, level):
return _imageop(x, ImageOps.equalize, level)
@register(17)
def invert(x, level):
return _imageop(x, ImageOps.invert, level)
@register(8)
def posterize(x, level):
level = 1 + int(level * 7.999)
return ImageOps.posterize(x, level)
@register(17)
def solarize(x, th):
th = int(th * 255.999)
return ImageOps.solarize(x, th)
@register(17)
def smooth(x, level):
return _filter(x, ImageFilter.SMOOTH, level)
@register(17)
def blur(x, level):
return _filter(x, ImageFilter.BLUR, level)
@register(17)
def sharpness(x, sharpness):
return _enhance(x, ImageEnhance.Sharpness, sharpness)
# weak after here
@register(17)
def cutout(x, level):
"""Apply cutout to pil_img at the specified level."""
size = 1 + int(level * min(x.size) * 0.499)
img_height, img_width = x.size
height_loc = np.random.randint(low=img_height // 2, high=img_height)
width_loc = np.random.randint(low=img_height // 2, high=img_width)
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (
min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2),
)
pixels = x.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
x.putpixel((i, j), 0) # set the color accordingly
return x
@register()
def identity(x):
return x
@register(17, 6)
def rescale(x, scale, method):
s = x.size
scale *= 0.25
crop = (scale * s[0], scale * s[1], s[0] * (1 - scale), s[1] * (1 - scale))
methods = (
Image.ANTIALIAS,
Image.BICUBIC,
Image.BILINEAR,
Image.BOX,
Image.HAMMING,
Image.NEAREST,
)
method = methods[int(method * 5.99)]
return x.crop(crop).resize(x.size, method)
@register(17)
def rotate(x, angle):
angle = int(np.round((2 * angle - 1) * 45))
return x.rotate(angle)
@register(17)
def shear_x(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, shear, 0, 0, 1, 0))
@register(17)
def shear_y(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, shear, 1, 0))
@register(17)
def translate_x(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, delta, 0, 1, 0))
@register(17)
def translate_y(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, delta))
| 6,431 | 25.146341 | 103 | py |
SSL4MIS | SSL4MIS-master/code/dataloaders/brats2019.py | import os
import torch
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
import itertools
from torch.utils.data.sampler import Sampler
class BraTS2019(Dataset):
""" BraTS2019 Dataset """
def __init__(self, base_dir=None, split='train', num=None, transform=None):
self._base_dir = base_dir
self.transform = transform
self.sample_list = []
train_path = self._base_dir+'/train.txt'
test_path = self._base_dir+'/val.txt'
if split == 'train':
with open(train_path, 'r') as f:
self.image_list = f.readlines()
elif split == 'test':
with open(test_path, 'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n', '').split(",")[0] for item in self.image_list]
if num is not None:
self.image_list = self.image_list[:num]
print("total {} samples".format(len(self.image_list)))
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
image_name = self.image_list[idx]
h5f = h5py.File(self._base_dir + "/data/{}.h5".format(image_name), 'r')
image = h5f['image'][:]
label = h5f['label'][:]
sample = {'image': image, 'label': label.astype(np.uint8)}
if self.transform:
sample = self.transform(sample)
return sample
class CenterCrop(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
(w, h, d) = image.shape
w1 = int(round((w - self.output_size[0]) / 2.))
h1 = int(round((h - self.output_size[1]) / 2.))
d1 = int(round((d - self.output_size[2]) / 2.))
label = label[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label}
class RandomCrop(object):
"""
Crop randomly the image in a sample
Args:
output_size (int): Desired output size
"""
def __init__(self, output_size, with_sdf=False):
self.output_size = output_size
self.with_sdf = with_sdf
def __call__(self, sample):
image, label = sample['image'], sample['label']
if self.with_sdf:
sdf = sample['sdf']
# pad the sample if necessary
if label.shape[0] <= self.output_size[0] or label.shape[1] <= self.output_size[1] or label.shape[2] <= \
self.output_size[2]:
pw = max((self.output_size[0] - label.shape[0]) // 2 + 3, 0)
ph = max((self.output_size[1] - label.shape[1]) // 2 + 3, 0)
pd = max((self.output_size[2] - label.shape[2]) // 2 + 3, 0)
image = np.pad(image, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
label = np.pad(label, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
if self.with_sdf:
sdf = np.pad(sdf, [(pw, pw), (ph, ph), (pd, pd)],
mode='constant', constant_values=0)
(w, h, d) = image.shape
# if np.random.uniform() > 0.33:
# w1 = np.random.randint((w - self.output_size[0])//4, 3*(w - self.output_size[0])//4)
# h1 = np.random.randint((h - self.output_size[1])//4, 3*(h - self.output_size[1])//4)
# else:
w1 = np.random.randint(0, w - self.output_size[0])
h1 = np.random.randint(0, h - self.output_size[1])
d1 = np.random.randint(0, d - self.output_size[2])
label = label[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
image = image[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
if self.with_sdf:
sdf = sdf[w1:w1 + self.output_size[0], h1:h1 +
self.output_size[1], d1:d1 + self.output_size[2]]
return {'image': image, 'label': label, 'sdf': sdf}
else:
return {'image': image, 'label': label}
class RandomRotFlip(object):
"""
Crop randomly flip the dataset in a sample
Args:
output_size (int): Desired output size
"""
def __call__(self, sample):
image, label = sample['image'], sample['label']
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return {'image': image, 'label': label}
class RandomNoise(object):
def __init__(self, mu=0, sigma=0.1):
self.mu = mu
self.sigma = sigma
def __call__(self, sample):
image, label = sample['image'], sample['label']
noise = np.clip(self.sigma * np.random.randn(
image.shape[0], image.shape[1], image.shape[2]), -2*self.sigma, 2*self.sigma)
noise = noise + self.mu
image = image + noise
return {'image': image, 'label': label}
class CreateOnehotLabel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, sample):
image, label = sample['image'], sample['label']
onehot_label = np.zeros(
(self.num_classes, label.shape[0], label.shape[1], label.shape[2]), dtype=np.float32)
for i in range(self.num_classes):
onehot_label[i, :, :, :] = (label == i).astype(np.float32)
return {'image': image, 'label': label, 'onehot_label': onehot_label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
image = image.reshape(
1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
if 'onehot_label' in sample:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long(),
'onehot_label': torch.from_numpy(sample['onehot_label']).long()}
else:
return {'image': torch.from_numpy(image), 'label': torch.from_numpy(sample['label']).long()}
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args) | 8,814 | 36.194093 | 112 | py |
SSL4MIS | SSL4MIS-master/code/dataloaders/utils.py | import os
import torch
import numpy as np
import torch.nn as nn
# import matplotlib.pyplot as plt
from skimage import measure
import scipy.ndimage as nd
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def get_cityscapes_labels():
return np.array([
# [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32]])
def get_pascal_labels():
"""Load the mapping that associates pascal classes with label colors
Returns:
np.ndarray with dimensions (21, 3)
"""
return np.asarray([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]])
def encode_segmap(mask):
"""Encode segmentation label images as pascal classes
Args:
mask (np.ndarray): raw segmentation label image of dimension
(M, N, 3), in which the Pascal classes are encoded as colours.
Returns:
(np.ndarray): class map with dimensions (M,N), where the value at
a given location is the integer denoting the class index.
"""
mask = mask.astype(int)
label_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.int16)
for ii, label in enumerate(get_pascal_labels()):
label_mask[np.where(np.all(mask == label, axis=-1))[:2]] = ii
label_mask = label_mask.astype(int)
return label_mask
def decode_seg_map_sequence(label_masks, dataset='pascal'):
rgb_masks = []
for label_mask in label_masks:
rgb_mask = decode_segmap(label_mask, dataset)
rgb_masks.append(rgb_mask)
rgb_masks = torch.from_numpy(np.array(rgb_masks).transpose([0, 3, 1, 2]))
return rgb_masks
def decode_segmap(label_mask, dataset, plot=False):
"""Decode segmentation class labels into a color image
Args:
label_mask (np.ndarray): an (M,N) array of integer values denoting
the class label at each spatial location.
plot (bool, optional): whether to show the resulting color image
in a figure.
Returns:
(np.ndarray, optional): the resulting decoded color image.
"""
if dataset == 'pascal':
n_classes = 21
label_colours = get_pascal_labels()
elif dataset == 'cityscapes':
n_classes = 19
label_colours = get_cityscapes_labels()
else:
raise NotImplementedError
r = label_mask.copy()
g = label_mask.copy()
b = label_mask.copy()
for ll in range(0, n_classes):
r[label_mask == ll] = label_colours[ll, 0]
g[label_mask == ll] = label_colours[ll, 1]
b[label_mask == ll] = label_colours[ll, 2]
rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
# for key, val in param.items():
# log_file.write(key + ':' + str(val) + '\n')
log_file.write(str(param))
log_file.close()
def cross_entropy2d(logit, target, ignore_index=255, weight=None, size_average=True, batch_average=True):
n, c, h, w = logit.size()
# logit = logit.permute(0, 2, 3, 1)
target = target.squeeze(1)
if weight is None:
criterion = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, size_average=False)
else:
criterion = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(weight)).float().cuda(), ignore_index=ignore_index, size_average=False)
loss = criterion(logit, target.long())
if size_average:
loss /= (h * w)
if batch_average:
loss /= n
return loss
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
def get_iou(pred, gt, n_classes=21):
total_iou = 0.0
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
intersect = [0] * n_classes
union = [0] * n_classes
for j in range(n_classes):
match = (pred_tmp == j) + (gt_tmp == j)
it = torch.sum(match == 2).item()
un = torch.sum(match > 0).item()
intersect[j] += it
union[j] += un
iou = []
for k in range(n_classes):
if union[k] == 0:
continue
iou.append(intersect[k] / union[k])
img_iou = (sum(iou) / len(iou))
total_iou += img_iou
return total_iou
def get_dice(pred, gt):
total_dice = 0.0
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
pred_tmp = pred[i]
gt_tmp = gt[i]
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
print(dice)
total_dice += dice
return total_dice
def get_mc_dice(pred, gt, num=2):
# num is the total number of classes, include the background
total_dice = np.zeros(num-1)
pred = pred.long()
gt = gt.long()
for i in range(len(pred)):
for j in range(1, num):
pred_tmp = (pred[i]==j)
gt_tmp = (gt[i]==j)
dice = 2.0*torch.sum(pred_tmp*gt_tmp).item()/(1.0+torch.sum(pred_tmp**2)+torch.sum(gt_tmp**2)).item()
total_dice[j-1] +=dice
return total_dice
def post_processing(prediction):
prediction = nd.binary_fill_holes(prediction)
label_cc, num_cc = measure.label(prediction,return_num=True)
total_cc = np.sum(prediction)
measure.regionprops(label_cc)
for cc in range(1,num_cc+1):
single_cc = (label_cc==cc)
single_vol = np.sum(single_cc)
if single_vol/total_cc<0.2:
prediction[single_cc]=0
return prediction
| 6,731 | 30.311628 | 144 | py |
SSL4MIS | SSL4MIS-master/code/dataloaders/dataset.py | import os
import cv2
import torch
import random
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
from scipy.ndimage.interpolation import zoom
from torchvision import transforms
import itertools
from scipy import ndimage
from torch.utils.data.sampler import Sampler
import augmentations
from augmentations.ctaugment import OPS
import matplotlib.pyplot as plt
from PIL import Image
class BaseDataSets(Dataset):
def __init__(
self,
base_dir=None,
split="train",
num=None,
transform=None,
ops_weak=None,
ops_strong=None,
):
self._base_dir = base_dir
self.sample_list = []
self.split = split
self.transform = transform
self.ops_weak = ops_weak
self.ops_strong = ops_strong
assert bool(ops_weak) == bool(
ops_strong
), "For using CTAugment learned policies, provide both weak and strong batch augmentation policy"
if self.split == "train":
with open(self._base_dir + "/train_slices.list", "r") as f1:
self.sample_list = f1.readlines()
self.sample_list = [item.replace("\n", "") for item in self.sample_list]
elif self.split == "val":
with open(self._base_dir + "/val.list", "r") as f:
self.sample_list = f.readlines()
self.sample_list = [item.replace("\n", "") for item in self.sample_list]
if num is not None and self.split == "train":
self.sample_list = self.sample_list[:num]
print("total {} samples".format(len(self.sample_list)))
def __len__(self):
return len(self.sample_list)
def __getitem__(self, idx):
case = self.sample_list[idx]
if self.split == "train":
h5f = h5py.File(self._base_dir + "/data/slices/{}.h5".format(case), "r")
else:
h5f = h5py.File(self._base_dir + "/data/{}.h5".format(case), "r")
image = h5f["image"][:]
label = h5f["label"][:]
sample = {"image": image, "label": label}
if self.split == "train":
if None not in (self.ops_weak, self.ops_strong):
sample = self.transform(sample, self.ops_weak, self.ops_strong)
else:
sample = self.transform(sample)
sample["idx"] = idx
return sample
def random_rot_flip(image, label=None):
k = np.random.randint(0, 4)
image = np.rot90(image, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
if label is not None:
label = np.rot90(label, k)
label = np.flip(label, axis=axis).copy()
return image, label
else:
return image
def random_rotate(image, label):
angle = np.random.randint(-20, 20)
image = ndimage.rotate(image, angle, order=0, reshape=False)
label = ndimage.rotate(label, angle, order=0, reshape=False)
return image, label
def color_jitter(image):
if not torch.is_tensor(image):
np_to_tensor = transforms.ToTensor()
image = np_to_tensor(image)
# s is the strength of color distortion.
s = 1.0
jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
return jitter(image)
class CTATransform(object):
def __init__(self, output_size, cta):
self.output_size = output_size
self.cta = cta
def __call__(self, sample, ops_weak, ops_strong):
image, label = sample["image"], sample["label"]
image = self.resize(image)
label = self.resize(label)
to_tensor = transforms.ToTensor()
# fix dimensions
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
# apply augmentations
image_weak = augmentations.cta_apply(transforms.ToPILImage()(image), ops_weak)
image_strong = augmentations.cta_apply(image_weak, ops_strong)
label_aug = augmentations.cta_apply(transforms.ToPILImage()(label), ops_weak)
label_aug = to_tensor(label_aug).squeeze(0)
label_aug = torch.round(255 * label_aug).int()
sample = {
"image_weak": to_tensor(image_weak),
"image_strong": to_tensor(image_strong),
"label_aug": label_aug,
}
return sample
def cta_apply(self, pil_img, ops):
if ops is None:
return pil_img
for op, args in ops:
pil_img = OPS[op].f(pil_img, *args)
return pil_img
def resize(self, image):
x, y = image.shape
return zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
class RandomGenerator(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample["image"], sample["label"]
# ind = random.randrange(0, img.shape[0])
# image = img[ind, ...]
# label = lab[ind, ...]
if random.random() > 0.5:
image, label = random_rot_flip(image, label)
elif random.random() > 0.5:
image, label = random_rotate(image, label)
x, y = image.shape
image = zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
label = zoom(label, (self.output_size[0] / x, self.output_size[1] / y), order=0)
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
sample = {"image": image, "label": label}
return sample
class WeakStrongAugment(object):
"""returns weakly and strongly augmented images
Args:
object (tuple): output size of network
"""
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample["image"], sample["label"]
image = self.resize(image)
label = self.resize(label)
# weak augmentation is rotation / flip
image_weak, label = random_rot_flip(image, label)
# strong augmentation is color jitter
image_strong = color_jitter(image_weak).type("torch.FloatTensor")
# fix dimensions
image = torch.from_numpy(image.astype(np.float32)).unsqueeze(0)
image_weak = torch.from_numpy(image_weak.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
sample = {
"image": image,
"image_weak": image_weak,
"image_strong": image_strong,
"label_aug": label,
}
return sample
def resize(self, image):
x, y = image.shape
return zoom(image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch) in zip(
grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size),
)
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 8,440 | 32.232283 | 105 | py |
SSL4MIS | SSL4MIS-master/code/utils/losses.py | import torch
from torch.nn import functional as F
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def dice_loss1(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target)
z_sum = torch.sum(score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def entropy_loss(p, C=2):
# p N*C*W*H*D
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1) / \
torch.tensor(np.log(C)).cuda()
ent = torch.mean(y1)
return ent
def softmax_dice_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
n = input_logits.shape[1]
dice = 0
for i in range(0, n):
dice += dice_loss1(input_softmax[:, i], target_softmax[:, i])
mean_dice = dice / n
return mean_dice
def entropy_loss_map(p, C=2):
ent = -1*torch.sum(p * torch.log(p + 1e-6), dim=1,
keepdim=True)/torch.tensor(np.log(C)).cuda()
return ent
def softmax_mse_loss(input_logits, target_logits, sigmoid=False):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
if sigmoid:
input_softmax = torch.sigmoid(input_logits)
target_softmax = torch.sigmoid(target_logits)
else:
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
mse_loss = (input_softmax-target_softmax)**2
return mse_loss
def softmax_kl_loss(input_logits, target_logits, sigmoid=False):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
if sigmoid:
input_log_softmax = torch.log(torch.sigmoid(input_logits))
target_softmax = torch.sigmoid(target_logits)
else:
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='mean')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
return torch.mean((input1 - input2)**2)
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)):
self.alpha = torch.Tensor([alpha, 1-alpha])
if isinstance(alpha, list):
self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
# N,C,H,W => N,C,H*W
input = input.view(input.size(0), input.size(1), -1)
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1, 1)
logpt = F.log_softmax(input, dim=1)
logpt = logpt.gather(1, target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None:
if self.alpha.type() != input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1))
logpt = logpt * Variable(at)
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average:
return loss.mean()
else:
return loss.sum()
class DiceLoss(nn.Module):
def __init__(self, n_classes):
super(DiceLoss, self).__init__()
self.n_classes = n_classes
def _one_hot_encoder(self, input_tensor):
tensor_list = []
for i in range(self.n_classes):
temp_prob = input_tensor == i * torch.ones_like(input_tensor)
tensor_list.append(temp_prob)
output_tensor = torch.cat(tensor_list, dim=1)
return output_tensor.float()
def _dice_loss(self, score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def forward(self, inputs, target, weight=None, softmax=False):
if softmax:
inputs = torch.softmax(inputs, dim=1)
target = self._one_hot_encoder(target)
if weight is None:
weight = [1] * self.n_classes
assert inputs.size() == target.size(), 'predict & target shape do not match'
class_wise_dice = []
loss = 0.0
for i in range(0, self.n_classes):
dice = self._dice_loss(inputs[:, i], target[:, i])
class_wise_dice.append(1.0 - dice.item())
loss += dice * weight[i]
return loss / self.n_classes
def entropy_minmization(p):
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1)
ent = torch.mean(y1)
return ent
def entropy_map(p):
ent_map = -1*torch.sum(p * torch.log(p + 1e-6), dim=1,
keepdim=True)
return ent_map
def compute_kl_loss(p, q):
p_loss = F.kl_div(F.log_softmax(p, dim=-1),
F.softmax(q, dim=-1), reduction='none')
q_loss = F.kl_div(F.log_softmax(q, dim=-1),
F.softmax(p, dim=-1), reduction='none')
# Using function "sum" and "mean" are depending on your task
p_loss = p_loss.mean()
q_loss = q_loss.mean()
loss = (p_loss + q_loss) / 2
return loss
| 6,990 | 30.777273 | 85 | py |
SSL4MIS | SSL4MIS-master/code/utils/util.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import numpy as np
import re
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg
import torch
from torch.utils.data.sampler import Sampler
import torch.distributed as dist
import networks
# many issues with this function
def load_model(path):
"""Loads model and return it without DataParallel table."""
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
for key in checkpoint["state_dict"]:
print(key)
# size of the top layer
N = checkpoint["state_dict"]["decoder.out_conv.bias"].size()
# build skeleton of the model
sob = "sobel.0.weight" in checkpoint["state_dict"].keys()
model = models.__dict__[checkpoint["arch"]](sobel=sob, out=int(N[0]))
# deal with a dataparallel table
def rename_key(key):
if not "module" in key:
return key
return "".join(key.split(".module"))
checkpoint["state_dict"] = {
rename_key(key): val for key, val in checkpoint["state_dict"].items()
}
# load weights
model.load_state_dict(checkpoint["state_dict"])
print("Loaded")
else:
model = None
print("=> no checkpoint found at '{}'".format(path))
return model
def load_checkpoint(path, model, optimizer, from_ddp=False):
"""loads previous checkpoint
Args:
path (str): path to checkpoint
model (model): model to restore checkpoint to
optimizer (optimizer): torch optimizer to load optimizer state_dict to
from_ddp (bool, optional): load DistributedDataParallel checkpoint to regular model. Defaults to False.
Returns:
model, optimizer, epoch_num, loss
"""
# load checkpoint
checkpoint = torch.load(path)
# transfer state_dict from checkpoint to model
model.load_state_dict(checkpoint["state_dict"])
# transfer optimizer state_dict from checkpoint to model
optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
# track loss
loss = checkpoint["loss"]
return model, optimizer, checkpoint["epoch"], loss.item()
def restore_model(logger, snapshot_path, model_num=None):
"""wrapper function to read log dir and load restore a previous checkpoint
Args:
logger (Logger): logger object (for info output to console)
snapshot_path (str): path to checkpoint directory
Returns:
model, optimizer, start_epoch, performance
"""
try:
# check if there is previous progress to be restored:
logger.info(f"Snapshot path: {snapshot_path}")
iter_num = []
name = "model_iter"
if model_num:
name = model_num
for filename in os.listdir(snapshot_path):
if name in filename:
basename, extension = os.path.splitext(filename)
iter_num.append(int(basename.split("_")[2]))
iter_num = max(iter_num)
for filename in os.listdir(snapshot_path):
if name in filename and str(iter_num) in filename:
model_checkpoint = filename
except Exception as e:
logger.warning(f"Error finding previous checkpoints: {e}")
try:
logger.info(f"Restoring model checkpoint: {model_checkpoint}")
model, optimizer, start_epoch, performance = load_checkpoint(
snapshot_path + "/" + model_checkpoint, model, optimizer
)
logger.info(f"Models restored from iteration {iter_num}")
return model, optimizer, start_epoch, performance
except Exception as e:
logger.warning(f"Unable to restore model checkpoint: {e}, using new model")
def save_checkpoint(epoch, model, optimizer, loss, path):
"""Saves model as checkpoint"""
torch.save(
{
"epoch": epoch,
"state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": loss,
},
path,
)
class UnifLabelSampler(Sampler):
"""Samples elements uniformely accross pseudolabels.
Args:
N (int): size of returned iterator.
images_lists: dict of key (target), value (list of data with this target)
"""
def __init__(self, N, images_lists):
self.N = N
self.images_lists = images_lists
self.indexes = self.generate_indexes_epoch()
def generate_indexes_epoch(self):
size_per_pseudolabel = int(self.N / len(self.images_lists)) + 1
res = np.zeros(size_per_pseudolabel * len(self.images_lists))
for i in range(len(self.images_lists)):
indexes = np.random.choice(
self.images_lists[i],
size_per_pseudolabel,
replace=(len(self.images_lists[i]) <= size_per_pseudolabel),
)
res[i * size_per_pseudolabel : (i + 1) * size_per_pseudolabel] = indexes
np.random.shuffle(res)
return res[: self.N].astype("int")
def __iter__(self):
return iter(self.indexes)
def __len__(self):
return self.N
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def learning_rate_decay(optimizer, t, lr_0):
for param_group in optimizer.param_groups:
lr = lr_0 / np.sqrt(1 + lr_0 * param_group["weight_decay"] * t)
param_group["lr"] = lr
class Logger:
"""Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), "wb") as fp:
pickle.dump(self.data, fp, -1)
def compute_sdf(img_gt, out_shape):
"""
compute the signed distance map of binary mask
input: segmentation, shape = (batch_size, x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""
img_gt = img_gt.astype(np.uint8)
normalized_sdf = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
posmask = img_gt[b].astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode="inner").astype(
np.uint8
)
sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (
posdis - np.min(posdis)
) / (np.max(posdis) - np.min(posdis))
sdf[boundary == 1] = 0
normalized_sdf[b] = sdf
# assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
# assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
return normalized_sdf
# set up process group for distributed computing
def distributed_setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
print("setting up dist process group now")
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def load_ddp_to_nddp(state_dict):
pattern = re.compile("module")
for k, v in state_dict.items():
if re.search("module", k):
model_dict[re.sub(pattern, "", k)] = v
else:
model_dict = state_dict
return model_dict
| 8,190 | 31.121569 | 111 | py |
MERL-LB | MERL-LB-main/mp_test_nn_load.py | import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
# state_dict = torch.load("24_f_-342.436_-0.029.pth")
# agent.load_state_dict(state_dict)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.job_seq_num = 5
args.method = "igd"
args.tag = "user_load_test02"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
args.checkpoint_path = (
"output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
)
job_num_list = range(2, 10)
# user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
if user_load_rate > 1.1:
break
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_load_exp2.csv"))
| 11,123 | 32.506024 | 131 | py |
MERL-LB | MERL-LB-main/mp.py | import torch
from torchvision import datasets, transforms
from tqdm import tqdm
device_ids = [0, 1, 2, 3] # 可用GPU
BATCH_SIZE = 64
transform = transforms.Compose([transforms.ToTensor()])
data_train = datasets.MNIST(root="./data/", transform=transform, train=True, download=True)
data_test = datasets.MNIST(root="./data/", transform=transform, train=False)
data_loader_train = torch.utils.data.DataLoader(
dataset=data_train,
# 单卡batch size * 卡数
batch_size=BATCH_SIZE * len(device_ids),
shuffle=True,
num_workers=2,
)
data_loader_test = torch.utils.data.DataLoader(
dataset=data_test, batch_size=BATCH_SIZE * len(device_ids), shuffle=True, num_workers=2
)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(stride=2, kernel_size=2),
)
self.dense = torch.nn.Sequential(
torch.nn.Linear(14 * 14 * 128, 1024),
torch.nn.ReLU(),
torch.nn.Dropout(p=0.5),
torch.nn.Linear(1024, 10),
)
def forward(self, x):
x = self.conv1(x)
x = x.view(-1, 14 * 14 * 128)
x = self.dense(x)
return x
model = Model()
# 指定要用到的设备
model = torch.nn.DataParallel(model, device_ids=device_ids)
# 模型加载到设备0
model = model.cuda()
cost = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
n_epochs = 50
for epoch in range(n_epochs):
running_loss = 0.0
running_correct = 0
print("Epoch {}/{}".format(epoch, n_epochs))
print("-" * 10)
for data in tqdm(data_loader_train):
X_train, y_train = data
# 指定设备0
X_train, y_train = X_train.cuda(), y_train.cuda()
outputs = model(X_train)
_, pred = torch.max(outputs.data, 1)
optimizer.zero_grad()
loss = cost(outputs, y_train)
loss.backward()
optimizer.step()
running_loss += loss.data.item()
running_correct += torch.sum(pred == y_train.data)
testing_correct = 0
for data in data_loader_test:
X_test, y_test = data
# 指定设备1
X_test, y_test = X_test.cuda(), y_test.cuda()
outputs = model(X_test)
_, pred = torch.max(outputs.data, 1)
testing_correct += torch.sum(pred == y_test.data)
print(
"Loss is:{:.4f}, Train Accuracy is:{:.4f}%, Test Accuracy is:{:.4f}".format(
torch.true_divide(running_loss, len(data_train)),
torch.true_divide(100 * running_correct, len(data_train)),
torch.true_divide(100 * testing_correct, len(data_test)),
)
)
torch.save(model.state_dict(), "model_parameter.pkl")
| 2,903 | 30.225806 | 91 | py |
MERL-LB | MERL-LB-main/mp_train_nn_nsga2_one.py | import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
p1, p2 = self.random_select_parent(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
if individual.train_fitness is not None:
continue
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga"
args.job_seq_num = 1
args.tag = "run05"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness min", min_elite_fitness[0], ga.generation)
| 21,461 | 34.299342 | 96 | py |
MERL-LB | MERL-LB-main/mp_train_nn_nsga2.py | import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
p1, p2 = self.random_select_parent(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga"
args.job_seq_num = 1
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Train/Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Train/Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Train/Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness min", min_elite_fitness[0], ga.generation)
| 21,419 | 34.346535 | 96 | py |
MERL-LB | MERL-LB-main/mp_test_nn_nsga.py | import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
# state_dict = torch.load("24_f_-342.436_-0.029.pth")
# agent.load_state_dict(state_dict)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "wsga"
args.tag = "t30_wsga_run05_g13440"
args.max_time = 30 * 60
args.job_seq_num = 5
args.actual = True
root_path = "output/train/wsga/run05/elite/g13440_0"
file_names = os.listdir(root_path)
file_names.remove("mean_fitness_record.npy")
scores = [item.split(".pth")[0] for item in file_names]
scores = ["".join(item.split("-")) for item in scores]
scores = [list(map(float, item.split("_")[1:])) for item in scores]
b_score = np.array(scores)[:, 1]
index = np.argsort(b_score)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result1 = []
result2 = []
for i in index:
file_name = file_names[i]
args.checkpoint_path = os.path.join(root_path, file_name)
score = scores[i]
print(f"Test b{score[1]:.3f} d{score[0]:.3f}")
save_dir = os.path.join(
root_dir,
f"b{score[1]:.3f}_d{score[0]:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for j in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, j, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((i, score[1], score[0], std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
result1.append((i, score[1], score[0], *mean_fitness, *std_fitness))
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean balance fitness: {:.4f} mean duration fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std balance fitness: {:.4f} std duration fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result1,
columns=[
"id",
"train_balance",
"train_duration",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"id",
"train_balance",
"train_duration",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 11,135 | 32.643505 | 131 | py |
MERL-LB | MERL-LB-main/mp_test_sigma.py | import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device("cpu")
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd", "nei_nsga"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
elif method in ["ppo"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
with torch.no_grad():
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
del agent
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "igd"
args.tag = "user_sigam_test02"
args.actual = False
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e10000_s0_d401.1772_b15.8262"
# )
args.checkpoint_path = (
"output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
)
# args.checkpoint_path = "output/train/nei_nsga/g30000_0/12_-218.78153_-174.13751.pth"
# job_num_list = range(2, 10)
user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 6, dtype=np.int32)
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
# user_load_rate = (
# max_job_num
# / 2
# * args.max_res_req
# / 2
# * args.max_job_len
# / 2
# / args.res_capacity
# / args.machine_num
# )
# if user_load_rate > 1.1:
# break
print(f"Test user_sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
# pool = Pool(10)
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma // 5, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 13,433 | 31.686131 | 97 | py |
MERL-LB | MERL-LB-main/mp_test_load.py | import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd", "nei_nsga"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
elif method in ["ppo"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "ppo"
args.tag = "user_load_test02"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e10000_s0_d401.1772_b15.8262"
# )
args.checkpoint_path = "output/train/ppo/run_0/model/e16679_s9_d376.1445_b18.8828_actor.pth"
job_num_list = range(2, 10)
# user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for max_job_num in job_num_list:
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
if user_load_rate > 1.1:
break
print(f"Test user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"user_load_rate_{user_load_rate:.3f}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(10)
# pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 13,101 | 31.59204 | 97 | py |
MERL-LB | MERL-LB-main/sp_train_nn_dqn.py | import os
import random
import numpy as np
import torch
from collections import namedtuple, deque
from itertools import count
from config.dqn import *
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from torch.utils.tensorboard import SummaryWriter
Transition = namedtuple(
"Transition",
(
"state",
"action_mask",
"action",
"next_state",
"next_action_mask",
"reward",
"done",
),
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ReplayMemory(object):
def __init__(self, capacity):
self.memory = deque([], maxlen=capacity)
def push(self, *args):
self.memory.append(Transition(*args))
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
fc = []
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
class EpsScheduler:
def __init__(self, max, mini, step) -> None:
self.max = max
self.mini = mini
self.curr = max
self.step = (max - mini) / step
def update(self):
self.max = self.max - self.step
@property
def eps(self):
return self.max
class DoubelDQN:
def __init__(self, args) -> None:
self.args = args
self.learn_step_counter = 0
self.action_index = 0
self.steps_done = 0
self._build_net()
self.eps = EpsScheduler(args.eps_start, args.eps_end, args.num_episodes)
def _build_net(self):
self.policy_net = Actor().to(device)
self.target_net = Actor().to(device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=1e-3)
self.memory = ReplayMemory(5000)
def choose_action(self, obs, absolute=False):
self.steps_done += 1
state, action_mask = self.obs_format(obs)
if not absolute and random.random() < self.eps.eps:
random_prob = torch.rand((1, self.args.machine_num)).to(device)
random_prob[action_mask == False] += -1e9
action = torch.argmax(random_prob, dim=-1).cpu().item()
else:
predict = self.policy_net(state)
predict[action_mask == False] += -1e9
action = torch.argmax(predict, dim=1).cpu().item()
return action
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state.to(device), action_mask.to(device)
def remember(self, obs, action, next_obs, reward, done):
state, action_mask = self.obs_format(obs)
if next_obs is None:
# 避免为None报错 会导致bug吗?
next_state, next_action_mask = state, action_mask
else:
next_state, next_action_mask = self.obs_format(next_obs)
action = torch.tensor(np.array([[action]]), dtype=torch.int64).to(device)
reward = torch.tensor(np.array([reward]), dtype=torch.float).to(device)
done = torch.tensor(np.array([done]), dtype=torch.bool).to(device)
self.memory.push(
state,
action_mask,
action,
next_state,
next_action_mask,
reward,
done,
)
def update_target_net(self):
self.target_net.load_state_dict(self.policy_net.state_dict())
def learn(self):
if len(self.memory) < self.args.batch_size:
return
transitions = self.memory.sample(self.args.batch_size)
batch = Transition(*zip(*transitions))
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward) # n*2
# reward 归一化
reward_batch = (reward_batch - torch.mean(reward_batch, dim=0)) / (
torch.std(reward_batch, dim=0) + 1e-7
)
# 两个目标的均值作为reward
reward_batch = torch.mean(reward_batch, dim=-1)
# 单目标 std 或者 运行时长
# reward_batch = reward_batch[:, 0]
non_final_mask = torch.cat(batch.done) == False
non_final_next_states = torch.cat(batch.state)[non_final_mask]
non_final_next_action_mask = torch.cat(batch.next_action_mask)[non_final_mask]
# for each batch state according to policy_net
policy_predict = self.policy_net(state_batch)
state_action_values = policy_predict.gather(1, action_batch)
# state value or 0 in case the state was final.
next_state_values = torch.zeros(self.args.batch_size, device=device)
# action mask
target_predict = self.target_net(non_final_next_states) # B*10
target_predict[non_final_next_action_mask == False] = -torch.inf
next_state_values[non_final_mask] = target_predict.max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * self.args.gamma) + reward_batch
# Compute Huber loss
criterion = nn.SmoothL1Loss()
loss = criterion(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
# for param in self.policy_net.parameters():
# param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def save(self, save_path):
torch.save(self.target_net.state_dict(), save_path + "_target_net.pth")
torch.save(self.policy_net.state_dict(), save_path + "_policy_net.pth")
if __name__ == "__main__":
args = parse_args()
args.method = "dqn"
args.tag = "run_02"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "model")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
env = DatacenterEnv(args)
dqn = DoubelDQN(args)
score_list = []
fitness_list = []
EP = []
for i_episode in range(args.num_episodes):
print("i_episode: ", i_episode)
# Initialize the environment and state
seq_index = i_episode % args.job_seq_num
env.seq_index = seq_index
obs = env.reset()
score = np.zeros(2)
for t in count():
# Select and perform an action
action = dqn.choose_action(obs)
next_obs, reward, done, info = env.step(action)
score += reward
if done:
print("done")
# Store the transition in memory
dqn.remember(obs, action, next_obs, reward, done)
# Move to the next state
obs = next_obs
# Perform one step of the optimization (on the policy network)
dqn.learn()
if done:
dqn.eps.update()
print("eps: ", dqn.eps.eps)
break
score_list.append(score)
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([runtime_fitness, std_fitness])
# 记录fitness
writer.add_scalar("current/duration_score", fitness[0], i_episode)
writer.add_scalar("current/balance_score", fitness[1], i_episode)
print("train fitness", fitness)
fitness_list.append(fitness)
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
print("train mean fitness", fitness_mean)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 记录fitness
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 保存模型
model_save_path = os.path.join(
model_save_dir,
f"e{i_episode}_s{seq_index}_d{fitness_mean[0]:.4f}_b{fitness_mean[1]:.4f}",
)
dqn.save(model_save_path)
if i_episode % args.target_update == 0:
dqn.update_target_net()
print("Complete")
| 12,168 | 31.97832 | 96 | py |
MERL-LB | MERL-LB-main/mp_train_nn_ga.py | import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
# machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
# def eval_individual_in_env(args, genes, seq_index):
# args.seed = 5
# env = DatacenterEnv(args)
# env.seq_index = seq_index
# env.reset()
# individual = Individual(genes)
# individual.update()
# obs = env.reset()
# done = False
# action_list = []
# reward_list = []
# while not done:
# action = individual.agent.choose_action(obs)
# obs, reward, done, _ = env.step(action)
# action_list.append(action)
# reward_list.append(reward)
# if args.ga_fitness_type == "std":
# # 计算标准差
# machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
# machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
# machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
# std_fitness = np.sum(machines_occupancy_mean_std)
# fitness = -std_fitness
# elif args.ga_fitness_type == "runtime":
# # 计算运行时长
# machines_finish_time_record = np.array(env.machines_finish_time_record)
# runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
# fitness = -runtime_fitness
# elif args.ga_fitness_type == "double":
# # 计算标准差
# machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
# machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
# machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
# std_fitness = np.mean(machines_occupancy_mean_std)
# # 计算运行时长
# machines_finish_time_record = np.array(env.machines_finish_time_record)
# runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
# fitness = np.array([-runtime_fitness, -std_fitness])
# print("eval", fitness)
# individual.eval_fitness = fitness
# return individual
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
def elitism_selection(self):
# 归一化
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
population_sorted_index = population_sorted_index[-self.p_size :]
elitism_population = [self.population[index] for index in population_sorted_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
return elite_change_num
def roulette_wheel_selection(self, size) -> List[Individual]:
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
p1, p2 = self.roulette_wheel_selection(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
def evolve(self):
# # 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
if self.args.job_seq_num == 1 and individual.train_fitness is not None:
continue
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# def eval(self):
# # 多进程
# population_mp = []
# population_num = self.args.ga_parent_size + self.args.ga_children_size
# pool = Pool(min(cpu_count(), population_num))
# for individual in self.population:
# # 在坏境中运行个体获得个体适应度
# finish_individual = pool.apply_async(
# eval_individual_in_env,
# args=(
# self.args,
# individual.job_genes,
# self.seq_index,
# ),
# )
# population_mp.append(finish_individual)
# pool.close()
# pool.join()
if __name__ == "__main__":
args = parse_args()
args.method = "wsga"
args.job_seq_num = 1
args.tag = "run06"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
# elite_fitness_list = -elite_fitness_list[:, -2:] * [[1, args.res_capacity**2]]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Duration fitness min", min_elite_fitness[0], ga.generation)
| 18,403 | 34.460501 | 96 | py |
MERL-LB | MERL-LB-main/mp_train_nn_deepjs_no_mask.py | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.utils.data import Dataset, DataLoader
from config.deepjs import *
from envs.datacenter_env.env import DatacenterEnv
from multiprocessing import Pool, cpu_count
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def predict(self, input, action_mask=None, absolute=True):
predict = self(input)
# if action_mask is not None:
# predict[action_mask == False] += -1e8
if absolute:
action = torch.argmax(predict, dim=1).cpu().item()
else:
action_probs = torch.softmax(predict, dim=-1)
action_probs = action_probs.detach().cpu().numpy()
action_probs = action_probs[0]
action_list = list(range(len(action_probs)))
action = np.random.choice(action_list, p=action_probs)
# action_dist = Categorical(action_probs)
# action = action_dist.sample().cpu().item()
return action
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def choose_action(self, obs, absolute=True):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask, absolute)
# action = self.job_actor.predict(job_input)
return action
class JobShopDataset(Dataset):
def __init__(self, obs_data, action_data, advantage_data) -> None:
self.obs_data = [i for item in obs_data for i in item]
self.action_data = [i for item in action_data for i in item]
self.advantage_data = [i for item in advantage_data for i in item]
def __getitem__(self, index):
obs = self.obs_data[index]
action = self.action_data[index]
advantage = self.advantage_data[index]
state, action_mask = self.obs_format(obs)
return state, action_mask, action, advantage
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state, action_mask
def __len__(self):
return len(self.action_data)
class InputDrive:
def __init__(self, args) -> None:
self.args = args
self.seq_index = 0
self.seq_num = args.job_seq_num
self.agent = Agent()
self.prob = 0.8
self.prob_step = 2 / self.args.epoch
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def get_one_experience(self, args, seed, model_state_dict, seq_index, prob=0):
# 初始化环境
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
# 初始化agent
agent = Agent()
agent.load_state_dict(model_state_dict)
# 设置随机种子
self.set_seed(seed)
# 收集轨迹
obs = env.reset()
done = False
trajectory = []
agent.eval()
with torch.no_grad():
while not done:
action = agent.choose_action(obs, absolute=False)
next_obs, reward, done, _ = env.step(action)
trajectory.append([obs, action, reward, next_obs, done])
obs = next_obs
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return trajectory, fitness, env.curr_time
# 计算折扣累积reward
def get_discount_reward(self, trajectory, reward_index):
# 统计reward
reward = []
for item in trajectory:
reward.append(item[reward_index])
# reward 标准化
norm_reward_batch = (reward - np.mean(reward, axis=0)) / (np.std(reward, axis=0))
# 归一化
# norm_reward_batch = (reward - np.min(reward, axis=0)) / (
# np.max(reward, axis=0) - np.min(reward, axis=0)
# )
# 目标权重相同
mean_reward = np.sum(norm_reward_batch, axis=-1)
# mean_reward = norm_reward_batch[:, 0]
# mean_reward = np.sum(reward, axis=-1)
# 计算折扣累积reward
trajectory_len = len(trajectory)
discount_reward = np.zeros(trajectory_len)
for index in reversed(range(trajectory_len - 1)):
discount_reward[index] = mean_reward[index] + self.args.gamma * mean_reward[index + 1]
return discount_reward
# 收集经验
def get_experience(self, seq_index):
# 多线程收集经验
pool = Pool(min(cpu_count(), self.args.experience_num))
all_record = []
for seed in range(self.args.experience_num):
record = pool.apply_async(
self.get_one_experience,
args=(
self.args,
seed,
self.agent.state_dict(),
seq_index,
self.prob,
),
)
all_record.append(record)
pool.close()
pool.join()
all_trajectory = []
all_fitness = []
end_time_list = []
for record in all_record:
trajectory, fitness, end_time = record.get()
all_trajectory.append(trajectory)
all_fitness.append(fitness)
end_time_list.append(end_time)
return all_trajectory, all_fitness, end_time_list
# 计算baseline
def get_advantage(self, all_trajectory):
# 计算累积reward
all_reward = []
all_reward_flat = []
max_reward_len = 0
for trajectory in all_trajectory:
max_reward_len = max(max_reward_len, len(trajectory))
reward = []
for item in trajectory:
reward.append(item[2])
all_reward_flat.append(item[2])
all_reward.append(reward)
all_reward_flat = np.array(all_reward_flat)
reward_mean = np.mean(all_reward_flat, axis=0)
reward_std = np.std(all_reward_flat, axis=0)
all_discount_reward = []
for reward in all_reward:
norm_reward = (reward - reward_mean) / (reward_std + 1e-7)
mean_reward = np.mean(norm_reward, axis=-1)
# mean_reward = np.sum(norm_reward * [[0.2, 0.8]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0.8, 0.2]], axis=-1)
# mean_reward = np.sum(norm_reward * [[1, 0]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0, 1]], axis=-1)
# mean_reward = np.sum(np.array(reward) * np.array([[1 / 600, 1 / 50]]), axis=-1)
reward_len = len(reward)
discount_reward = np.zeros(reward_len)
for index in reversed(range(reward_len - 1)):
discount_reward[index] = (
mean_reward[index] + self.args.gamma * mean_reward[index + 1]
)
all_discount_reward.append(discount_reward)
# padding
all_padded_discount_reward = [
np.concatenate([discount_reward, np.zeros(max_reward_len - len(discount_reward))])
for discount_reward in all_discount_reward
]
# 计算baseline
baseline = np.mean(all_padded_discount_reward, axis=0)
# 计算advantage
all_advantage = [
discount_reward - baseline[: len(discount_reward)]
for discount_reward in all_discount_reward
]
return all_advantage
def train(self):
optimizer = optim.AdamW(self.agent.parameters(), lr=self.args.lr)
best_fitness = [np.array([np.inf, np.inf])] * self.args.job_seq_num
i_episode = 0
EP = []
fitness_list = []
for epoch in range(self.args.epoch):
for seq_index in range(self.args.job_seq_num):
# 收集经验
all_trajectory, all_fitness, end_time_list = self.get_experience(seq_index)
all_obs = []
all_action = []
for trajectory in all_trajectory:
_obs = []
_action = []
for item in trajectory:
_obs.append(item[0])
_action.append(item[1])
all_obs.append(_obs)
all_action.append(_action)
# 结果汇总
mean_fitness = -np.mean(all_fitness, axis=0)
print(f"train epoch {epoch} seq_index {seq_index} i_episode {i_episode}")
print("mean_fitness: ", mean_fitness)
# writer.add_scalar(
# "current/ws_score",
# mean_fitness[0] / 600 + mean_fitness[1] / 50,
# i_episode,
# )
fitness_list.append(mean_fitness)
writer.add_scalar("Train/End time max", max(end_time_list), i_episode)
writer.add_scalar("Train/End time min", min(end_time_list), i_episode)
writer.add_scalar("Train/End time mean", np.mean(end_time_list), i_episode)
# 记录fitness
writer.add_scalar("current/duration_score", mean_fitness[0], i_episode)
writer.add_scalar("current/balance_score", mean_fitness[1], i_episode)
# 记录 mean fitness
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 模型保存
model_name = (
f"e{i_episode}_s{seq_index}_d{mean_fitness[0]:.4f}_b{mean_fitness[1]:.4f}"
)
model_save_path = os.path.join(model_save_dir, model_name)
torch.save(self.agent.job_actor.state_dict(), model_save_path)
# 计算advantage
all_advantage = self.get_advantage(all_trajectory)
# 训练模型
# 构建dataloader
dataset = JobShopDataset(
obs_data=all_obs,
action_data=all_action,
advantage_data=all_advantage,
)
dataloader = DataLoader(dataset, batch_size=512, shuffle=False, num_workers=10)
# 清空梯度
optimizer.zero_grad()
self.agent.train()
# 梯度累加
for batch in dataloader:
state, action_mask, action, advantage = batch
action_predict = self.agent.job_actor(state)
# 直接赋值会导致无法梯度回传
# TODO 如何把mask用上?
# action_predict[action_mask == False] += -1e9
action_predict = torch.squeeze(action_predict, dim=1)
action_probs = torch.softmax(action_predict, dim=-1)
action_dist = Categorical(action_probs)
action_logprobs = action_dist.log_prob(action)
"""
优化目标是loss越小越好
advantage大于0说明该动作好要增大该动作的概率 即减小 -action_logprobs * advantage
"""
loss = -action_logprobs * advantage
# 一次梯度回传
loss.mean().backward()
# 梯度更新
optimizer.step()
i_episode += 1
# 更新随机权重
self.prob = max(self.prob - self.prob_step, self.prob)
if __name__ == "__main__":
args = parse_args()
args.method = "ns_deepjs"
args.tag = "run04_no_mask_no_absolute"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "models")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
inputdrive = InputDrive(args)
inputdrive.train()
| 18,402 | 35.586481 | 98 | py |
MERL-LB | MERL-LB-main/mp_test.py | import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
elif method in ["ppo"]:
agent = Agent()
# agent = Agent(absolute=False)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "ppo"
# args.checkpoint_path = "output/train/deepjs/run02/models/e10001_s1_d497.6165_b14.0890"
# args.checkpoint_path = "output/train/deepjs/run03/models/e3700_s0_d274.3077_b199.8079"
# args.checkpoint_path = "output/train/deepjs/run01/models/e19000_s0_d386.8642_b19.4361"
# args.checkpoint_path = "output/train/deepjs/run01/models/e19850_s0_d275.4718_b194.5685"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/24_-326.97737_-13.71405.pth"
# args.checkpoint_path = "/root/workspace/project/version3/output/train/ppo/run_0/model/e10001_s1_d407.9307_b16.3444_actor.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/20_-336.39251_-12.79905.pth"
# args.checkpoint_path = "output/train/ppo/run_0/model/e16679_s9_d376.1445_b18.8828_actor.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
# )
# args.max_time = 30 * 60
# args.job_seq_num = 5
args.tag = "best_run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
set_seed()
# mutil process
mutil_process = []
# pool = Pool(cpu_count())
pool = Pool(10)
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
result1 = [(*mean_fitness, *std_fitness)]
df = pd.DataFrame(
result1,
columns=[
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(save_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
fitness_record,
columns=[
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(save_dir, f"all_data.csv"))
print("done")
| 12,458 | 32.312834 | 131 | py |
MERL-LB | MERL-LB-main/sp_train_nn_ppo.py | import os
import random
import numpy as np
import torch
from collections import namedtuple, deque
from itertools import count
from config.ppo import *
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from torch.utils.tensorboard import SummaryWriter
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Transition = namedtuple(
"Transition",
(
"state",
"action_mask",
"action",
"action_logprobs",
"reward",
"done",
),
)
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.reset()
def push(self, *args):
"""Save a transition"""
self.memory.append(Transition(*args))
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def reset(self):
self.memory = deque([], maxlen=self.capacity)
def __len__(self):
return len(self.memory)
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
fc = []
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
class Critic(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
fc = []
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
x = torch.sum(x, dim=-1)
return x
class PPO:
def __init__(
self,
args,
) -> None:
self.args = args
self.learn_step_counter = 0
self.action_logprobs = None # 缓存
self._build_net()
def _build_net(self):
self.actor = Actor().to(device)
self.critic = Critic().to(device)
self.memory = ReplayMemory(5000)
self.optimizer = torch.optim.Adam(
[
{"params": self.actor.parameters(), "lr": args.ppo_actor_lr},
{"params": self.critic.parameters(), "lr": args.ppo_critic_lr},
]
)
self.critic_loss = nn.MSELoss()
def choose_action(self, obs, absolute=False):
state, action_mask = self.obs_format(obs)
predict = self.actor(state)
predict[action_mask == False] += -torch.inf
if not absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state.to(device), action_mask.to(device)
def remember(self, obs, action, reward, done):
state, action_mask = self.obs_format(obs)
action_logprobs = self.action_logprobs
action = torch.tensor(action, dtype=torch.int32)
self.memory.push(
state,
action_mask,
action,
action_logprobs,
reward,
done,
)
def learn(self):
if len(self.memory) < self.args.ppo_update_timestep:
return
transitions = self.memory.memory
batch = Transition(*zip(*transitions))
state_batch = torch.cat(batch.state, dim=0).to(device)
action_batch = torch.vstack(batch.action).to(device)
action_mask_batch = torch.cat(batch.action_mask, dim=0).to(device)
action_logprobs_batch = torch.vstack(batch.action_logprobs).to(device)
reward_batch = np.array(batch.reward)
done_batch = np.array(batch.done)
# reward 标准化
reward_batch = (reward_batch - np.mean(reward_batch, axis=0)) / (
np.std(reward_batch, axis=0) + 1e-7
)
# reward 缩放
# reward_batch = reward_batch * np.array([[0.001, 1]])
# # 归一化
# norm_reward_batch = (reward_batch - np.min(reward_batch, axis=0)) / (
# np.max(reward_batch, axis=0) - np.min(reward_batch, axis=0)
# )
# mean_reward_batch = np.mean(norm_reward_batch, axis=-1)
# 无归一化 或 标准化
# mean_reward_batch = np.sum(reward_batch, axis=-1)
# mean_reward_batch = reward_batch[:, 0]
# Monte Carlo estimate of returns
# cumulate_rewards = []
# discounted_reward = 0
# for reward, is_terminal in zip(
# reversed(mean_reward_batch), reversed(done_batch)
# ):
# if is_terminal:
# discounted_reward = 0
# discounted_reward = reward + (self.args.ppo_gamma * discounted_reward)
# cumulate_rewards.insert(0, discounted_reward)
# cumulate_rewards = torch.tensor(cumulate_rewards, dtype=torch.float32).to(
# device
# )
# 标准化
# cumulate_rewards = (cumulate_rewards - cumulate_rewards.mean()) / (
# cumulate_rewards.std() + 1e-7
# )
cumulate_rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(reward_batch), reversed(done_batch)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.args.ppo_gamma * discounted_reward)
cumulate_rewards.insert(0, discounted_reward)
cumulate_rewards = torch.tensor(cumulate_rewards, dtype=torch.float32).to(device)
# 标准化
cumulate_rewards = (cumulate_rewards - cumulate_rewards.mean(dim=0)) / (
cumulate_rewards.std(dim=0) + 1e-7
)
# 合并两个目标的reward
cumulate_rewards = cumulate_rewards * torch.tensor([[0.5, 0.5]]).to(device)
cumulate_rewards = torch.sum(cumulate_rewards, dim=-1)
# cumulate_rewards = cumulate_rewards[:, 0]
# Optimize policy for K epochs
for epoch in range(self.args.ppo_epochs):
new_action_predict = self.actor(state_batch)
new_action_predict[action_mask_batch == False] += -torch.inf
new_action_probs = torch.softmax(new_action_predict, dim=-1)
new_action_dist = Categorical(new_action_probs)
new_action_entropy = new_action_dist.entropy()
new_action_logprobs = new_action_dist.log_prob(action_batch.reshape(-1))
state_values = self.critic(state_batch)
advantages = cumulate_rewards - state_values.detach()
ratios = torch.exp(new_action_logprobs - action_logprobs_batch.reshape(-1))
surr1 = ratios * advantages
surr2 = (
torch.clamp(ratios, 1 - self.args.ppo_eps_clip, 1 + self.args.ppo_eps_clip)
* advantages
)
# loss = -advantages
loss = (
-torch.min(surr1, surr2)
+ 0.5 * self.critic_loss(state_values, cumulate_rewards)
- 0.01 * new_action_entropy
)
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.learn_step_counter += 1
# TODO Copy new weights into old policy
# self.policy_old.load_state_dict(self.policy.state_dict()
# 清空缓冲区
self.memory.reset()
def save(self, save_path):
torch.save(self.actor.state_dict(), save_path + "_actor.pth")
torch.save(self.critic.state_dict(), save_path + "_critic.pth")
if __name__ == "__main__":
args = parse_args()
args.method = "ppo"
args.tag = "run_0"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "model")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
env = DatacenterEnv(args)
ppo = PPO(args)
score_list = []
fitness_list = []
EP = []
for i_episode in range(args.num_episodes):
print("i_episode: ", i_episode)
# Initialize the environment and state
seq_index = i_episode % args.job_seq_num
env.seq_index = seq_index
obs = env.reset()
score = np.zeros(2)
for t in count():
# Select and perform an action
action = ppo.choose_action(obs)
next_obs, reward, done, info = env.step(action)
score += reward
if done:
print("done")
# Store the transition in memory
ppo.remember(obs, action, reward, done)
# Move to the next state
obs = next_obs
# Perform one step of the optimization (on the policy network)
ppo.learn()
if done:
ppo.memory.reset() # 是否需要清除缓冲呢?
break
score_list.append(score)
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([runtime_fitness, std_fitness])
# 记录fitness
writer.add_scalar("current/duration_score", fitness[0], i_episode)
writer.add_scalar("current/balance_score", fitness[1], i_episode)
print("train fitness", fitness)
fitness_list.append(fitness)
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
print("train mean fitness", fitness_mean)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 记录fitness
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 保存模型
model_save_path = os.path.join(
model_save_dir,
f"e{i_episode}_s{seq_index}_d{fitness_mean[0]:.4f}_b{fitness_mean[1]:.4f}",
)
ppo.save(model_save_path)
print("Complete")
| 14,034 | 32.023529 | 96 | py |
MERL-LB | MERL-LB-main/mp_test_nn_sigma.py | import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
# state_dict = torch.load("24_f_-342.436_-0.029.pth")
# agent.load_state_dict(state_dict)
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "nsga"
args.tag = "nsga_run05_g20000_12"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
args.checkpoint_path = "output/train/nsga/run05/elite/g20000_0/0_-455.58486_-12.92719.pth"
# user_sigam_list = np.linspace(0, 7.5 * 60 // 3, 10, dtype=np.int32)
user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for user_sigma in user_sigam_list:
print(f"Test user sigma {user_sigma}")
save_dir = os.path.join(
root_dir,
f"user_sigma_{user_sigma}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.user_sigma = user_sigma
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_sigma, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_sigma",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_sigma",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"{ args.method}_user_sigma_exp2.csv"))
| 10,899 | 33.169279 | 131 | py |
MERL-LB | MERL-LB-main/mp_train_nn_deepjs_no_mask_ppo.py | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.utils.data import Dataset, DataLoader
from config.deepjs import *
from envs.datacenter_env.env import DatacenterEnv
from multiprocessing import Pool, cpu_count
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def predict(self, input, action_mask=None, absolute=True, return_log_prob=False):
predict = self(input)
# if action_mask is not None:
# predict[action_mask == False] += -1e8
if absolute:
action = torch.argmax(predict, dim=1).cpu().item()
else:
action_probs = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_probs)
action = action_dist.sample()
action_log_prob = action_dist.log_prob(action)
action = action.cpu().item()
action_log_prob = action_log_prob.cpu().item()
if return_log_prob:
return action, action_log_prob
else:
return action
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def choose_action(self, obs, absolute=True, return_log_prob=False):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action, log_prob = self.job_actor.predict(job_input, action_mask, absolute, return_log_prob)
# action = self.job_actor.predict(job_input)
if return_log_prob:
return action, log_prob
else:
return action
class JobShopDataset(Dataset):
def __init__(self, obs_data, action_data, action_prob_data, advantage_data) -> None:
self.obs_data = [i for item in obs_data for i in item]
self.action_data = [i for item in action_data for i in item]
self.action_prob_data = [i for item in action_prob_data for i in item]
self.advantage_data = [i for item in advantage_data for i in item]
def __getitem__(self, index):
obs = self.obs_data[index]
action = self.action_data[index]
action_prob = self.action_prob_data[index]
advantage = self.advantage_data[index]
state, action_mask = self.obs_format(obs)
return state, action_mask, action_prob, action, advantage
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state, action_mask
def __len__(self):
return len(self.action_data)
class InputDrive:
def __init__(self, args) -> None:
self.args = args
self.seq_index = 0
self.seq_num = args.job_seq_num
self.agent = Agent()
self.prob = 0.8
self.prob_step = 2 / self.args.epoch
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def get_one_experience(self, args, seed, model_state_dict, seq_index, prob=0):
# 初始化环境
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
# 初始化agent
agent = Agent()
agent.load_state_dict(model_state_dict)
# 设置随机种子
self.set_seed(seed)
# 收集轨迹
obs = env.reset()
done = False
trajectory = []
agent.eval()
with torch.no_grad():
while not done:
action, log_prob = agent.choose_action(obs, absolute=False, return_log_prob=True)
next_obs, reward, done, _ = env.step(action)
trajectory.append([obs, action, reward, next_obs, done, log_prob])
obs = next_obs
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return trajectory, fitness, env.curr_time
# 计算折扣累积reward
def get_discount_reward(self, trajectory, reward_index):
# 统计reward
reward = []
for item in trajectory:
reward.append(item[reward_index])
# reward 标准化
norm_reward_batch = (reward - np.mean(reward, axis=0)) / (np.std(reward, axis=0))
# 归一化
# norm_reward_batch = (reward - np.min(reward, axis=0)) / (
# np.max(reward, axis=0) - np.min(reward, axis=0)
# )
# 目标权重相同
mean_reward = np.sum(norm_reward_batch, axis=-1)
# mean_reward = norm_reward_batch[:, 0]
# mean_reward = np.sum(reward, axis=-1)
# 计算折扣累积reward
trajectory_len = len(trajectory)
discount_reward = np.zeros(trajectory_len)
for index in reversed(range(trajectory_len - 1)):
discount_reward[index] = mean_reward[index] + self.args.gamma * mean_reward[index + 1]
return discount_reward
# 收集经验
def get_experience(self, seq_index):
# 多线程收集经验
pool = Pool(min(cpu_count(), self.args.experience_num))
all_record = []
for seed in range(self.args.experience_num):
record = pool.apply_async(
self.get_one_experience,
args=(
self.args,
seed,
self.agent.state_dict(),
seq_index,
self.prob,
),
)
all_record.append(record)
pool.close()
pool.join()
all_trajectory = []
all_fitness = []
end_time_list = []
for record in all_record:
trajectory, fitness, end_time = record.get()
all_trajectory.append(trajectory)
all_fitness.append(fitness)
end_time_list.append(end_time)
return all_trajectory, all_fitness, end_time_list
# 计算baseline
def get_advantage(self, all_trajectory):
# 计算累积reward
all_reward = []
all_reward_flat = []
max_reward_len = 0
for trajectory in all_trajectory:
max_reward_len = max(max_reward_len, len(trajectory))
reward = []
for item in trajectory:
reward.append(item[2])
all_reward_flat.append(item[2])
all_reward.append(reward)
all_reward_flat = np.array(all_reward_flat)
reward_mean = np.mean(all_reward_flat, axis=0)
reward_std = np.std(all_reward_flat, axis=0)
all_discount_reward = []
for reward in all_reward:
norm_reward = (reward - reward_mean) / (reward_std + 1e-7)
mean_reward = np.mean(norm_reward, axis=-1)
# mean_reward = np.sum(norm_reward * [[0.2, 0.8]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0.8, 0.2]], axis=-1)
# mean_reward = np.sum(norm_reward * [[1, 0]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0, 1]], axis=-1)
# mean_reward = np.sum(np.array(reward) * np.array([[1 / 600, 1 / 50]]), axis=-1)
reward_len = len(reward)
discount_reward = np.zeros(reward_len)
for index in reversed(range(reward_len - 1)):
discount_reward[index] = (
mean_reward[index] + self.args.gamma * mean_reward[index + 1]
)
all_discount_reward.append(discount_reward)
# padding
all_padded_discount_reward = [
np.concatenate([discount_reward, np.zeros(max_reward_len - len(discount_reward))])
for discount_reward in all_discount_reward
]
# 计算baseline
baseline = np.mean(all_padded_discount_reward, axis=0)
# 计算advantage
all_advantage = [
discount_reward - baseline[: len(discount_reward)]
for discount_reward in all_discount_reward
]
return all_advantage
def train(self):
optimizer = optim.AdamW(self.agent.parameters(), lr=self.args.lr)
best_fitness = [np.array([np.inf, np.inf])] * self.args.job_seq_num
i_episode = 0
EP = []
fitness_list = []
for epoch in range(self.args.epoch):
for seq_index in range(self.args.job_seq_num):
# 收集经验
all_trajectory, all_fitness, end_time_list = self.get_experience(seq_index)
all_obs = []
all_action = []
all_action_prob = []
for trajectory in all_trajectory:
_obs = []
_action = []
_action_prob = []
for item in trajectory:
_obs.append(item[0])
_action.append(item[1])
_action_prob.append(item[-1])
all_obs.append(_obs)
all_action.append(_action)
all_action_prob.append(_action_prob)
# 结果汇总
mean_fitness = -np.mean(all_fitness, axis=0)
print(f"train epoch {epoch} seq_index {seq_index} i_episode {i_episode}")
print("mean_fitness: ", mean_fitness)
# writer.add_scalar(
# "current/ws_score",
# mean_fitness[0] / 600 + mean_fitness[1] / 50,
# i_episode,
# )
fitness_list.append(mean_fitness)
writer.add_scalar("Train/End time max", max(end_time_list), i_episode)
writer.add_scalar("Train/End time min", min(end_time_list), i_episode)
writer.add_scalar("Train/End time mean", np.mean(end_time_list), i_episode)
# 记录fitness
writer.add_scalar("current/duration_score", mean_fitness[0], i_episode)
writer.add_scalar("current/balance_score", mean_fitness[1], i_episode)
# 记录 mean fitness
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 模型保存
model_name = (
f"e{i_episode}_s{seq_index}_d{mean_fitness[0]:.4f}_b{mean_fitness[1]:.4f}"
)
model_save_path = os.path.join(model_save_dir, model_name)
torch.save(self.agent.job_actor.state_dict(), model_save_path)
# 计算advantage
all_advantage = self.get_advantage(all_trajectory)
# 训练模型
# 构建dataloader
dataset = JobShopDataset(
obs_data=all_obs,
action_data=all_action,
action_prob_data=all_action_prob,
advantage_data=all_advantage,
)
dataloader = DataLoader(dataset, batch_size=512, shuffle=False, num_workers=10)
# 清空梯度
optimizer.zero_grad()
self.agent.train()
# 梯度累加
for batch in dataloader:
state, action_mask, old_action_probs, action, advantage = batch
# 新Actor
action_predict = self.agent.job_actor(state)
# 直接赋值会导致无法梯度回传
# TODO 如何把mask用上?
# action_predict[action_mask == False] += -1e9
action_predict = torch.squeeze(action_predict, dim=1)
action_probs = torch.softmax(action_predict, dim=-1)
action_dist = Categorical(action_probs)
action_entropy = action_dist.entropy()
action_logprobs = action_dist.log_prob(action)
"""
优化目标是loss越小越好
advantage大于0说明该动作好要增大该动作的概率 即减小 -action_logprobs * advantage
"""
# 旧advantage
# loss = -action_logprobs * advantage
ratios = torch.exp(action_logprobs - old_action_probs)
surr1 = ratios * advantage
surr2 = torch.clamp(ratios, 1 - 0.2, 1 + 0.2) * advantage
loss = -torch.min(surr1, surr2) - 0.01 * action_entropy
# 一次梯度回传
loss.mean().backward()
# 梯度更新
optimizer.step()
i_episode += 1
# 更新随机权重
self.prob = max(self.prob - self.prob_step, self.prob)
if __name__ == "__main__":
args = parse_args()
args.method = "ns_deepjs"
args.tag = "run03_no_mask_ppo"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "models")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
inputdrive = InputDrive(args)
inputdrive.train()
| 19,410 | 36.185824 | 100 | py |
MERL-LB | MERL-LB-main/mp_train_nn_moead.py | import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.moead import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id1, id2, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([runtime_fitness, std_fitness])
return id1, id2, fitness
class MOEAD:
def __init__(self, args) -> None:
self.args = args
self.EP: List[Individual] = [] # 最优曲面
self.EP_N_ID = [] # 最优曲面
self.N = args.moead_n # 权重划分数量
self.M = args.moead_m # 目标个数
self.T = args.moead_t # 邻居个数
self.B = [] # 邻居下标 根据权重相似度计算
self.Z = [0, 0] # 理想点 最小值就是[0,0]所以理想点为0
self.population: List[Individual] = [] # 种群
self.generation = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
# 初始化
self.set_weight()
self.get_neighbor()
self.generate_ancestor()
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def set_weight(self):
# 划分权重
self.W = np.zeros((self.N, self.M))
W = np.linspace(0, 1, self.N)
self.W[:, 0] = W
self.W[:, 1] = 1 - W
def get_neighbor(self):
# 计算权重的T个邻居
for i in range(self.N):
bi = self.W[i]
distance = np.sum((self.W - bi) ** 2, axis=1)
neighbor = np.argsort(distance)
self.B.append(neighbor[1 : self.T + 1])
def generate_ancestor(self):
# 初代种群
for _ in range(self.N):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
if np.random.random() < self.args.mutate_rate * 2:
mutation_array = np.random.random(c_genes.shape) < self.args.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.args.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# 产生子代
def generate_children(self, p1: Individual, p2: Individual):
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
return c1, c2
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
all_evaluate_list: list[list[individual]] = []
for pi in range(self.N):
Bi = self.B[pi] # 邻居集合
# 随机选择邻居进行交叉变异
k = random.randint(0, len(Bi) - 1)
l = random.randint(0, len(Bi) - 1)
ki = Bi[k]
li = Bi[l]
xp = self.population[pi]
xk = self.population[ki]
xl = self.population[li]
c1, c2 = self.generate_children(xp, xk)
c3, c4 = self.generate_children(xk, xl)
evaluate_list = [xp, xk, xl, c1, c2, c3, c4]
all_evaluate_list.append(evaluate_list)
# 评估这些模型
pool = Pool(cpu_count())
mutil_process = []
for id1 in range(self.N):
for id2, individual in enumerate(all_evaluate_list[id1]):
# 跳过已经评估过的个体 加速训练
if individual.train_fitness is not None:
continue
one_process = pool.apply_async(
run_individual_in_env,
args=(
id1,
id2,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id1, id2, fitness = one_process.get()
all_evaluate_list[id1][id2].train_fitness = fitness
# 根据结果进行迭代
elite_change_num = 0
for pi in range(self.N):
evaluate_list = all_evaluate_list[pi]
fitness_list = []
for individual in evaluate_list:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
tchebycheff_list = fitness_list * self.W[pi]
# 取最大值作为比较
tchebycheff_list = np.max(tchebycheff_list, axis=-1).reshape(-1)
best_i1 = np.argmin(tchebycheff_list[:3])
best_i2 = np.argmin(tchebycheff_list)
best_i = best_i2
# 以一定概率进行详细比较 避免陷入局部最优
mi = random.randint(0, self.M - 1)
if random.random() < 0.5:
if (
evaluate_list[best_i1].train_fitness[mi]
< evaluate_list[best_i2].train_fitness[mi]
):
best_i = best_i1
best_individual = evaluate_list[best_i]
# # 没有找到更好的解则跳过更新
# if best_i == 0:
# continue
# self.population[pi] = best_individual
# 更新邻居
for nj in self.B[pi]:
nei_individual = self.population[nj]
nei_tchebycheff = np.max(np.array(nei_individual.train_fitness) * self.W[pi])
cur_tchebycheff = np.max(np.array(best_individual.train_fitness) * self.W[pi])
if cur_tchebycheff < nei_tchebycheff:
self.population[nj] = best_individual
elite_change_num += 1
# 更新EP
if abs(tchebycheff_list[best_i2] - tchebycheff_list[0]) > 1:
remove_list = []
n = 0
for individual in self.EP:
if np.all(best_individual.train_fitness < individual.train_fitness):
remove_list.append(individual)
elif np.all(best_individual.train_fitness > individual.train_fitness):
n += 1
if n != 0:
break
if n == 0:
for individual in remove_list:
self.EP.remove(individual)
self.EP.append(best_individual)
# 保存前沿
self.save_population(self.EP, "elite")
self.save_population(self.population, "population")
self.generation += 1
self.seq_index = (self.seq_index + 1) % self.seq_num
elite_fitness_list = []
for individual in self.EP:
elite_fitness_list.append(individual.train_fitness)
population_fitness_list = []
for individual in self.population:
population_fitness_list.append(individual.train_fitness)
return elite_change_num, elite_fitness_list, population_fitness_list
if __name__ == "__main__":
args = parse_args()
args.method = "moead"
args.job_seq_num = 1
args.tag = "run02"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
moead = MOEAD(args)
moead.setup_seed()
fitness_list = []
while True:
print("=" * 100)
print(f"evolve generation {moead.generation}")
elite_change_num, elite_fitness_list, population_fitness_list = moead.evolve()
# log to tensorbord
writer.add_scalar("Elite change num", elite_change_num, moead.generation)
elite_fitness_list = np.array(elite_fitness_list)
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Elite Target Distribution")
plt.legend()
writer.add_figure("Elite Target Distribution", figure, moead.generation)
plt.close()
population_fitness_list = np.array(population_fitness_list)
y = population_fitness_list[:, 0]
x = population_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Population Target Distribution")
plt.legend()
writer.add_figure("Population Target Distribution", figure, moead.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Balance fitness max", max_elite_fitness[1], moead.generation)
writer.add_scalar("Duration fitness max", max_elite_fitness[0], moead.generation)
writer.add_scalar("Balance fitness min", min_elite_fitness[1], moead.generation)
writer.add_scalar("Duration fitness min", min_elite_fitness[0], moead.generation)
| 16,252 | 33.877682 | 96 | py |
MERL-LB | MERL-LB-main/mp_train_nn_deepjs.py | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.distributions import Categorical
from torch.utils.data import Dataset, DataLoader
from config.deepjs import *
from envs.datacenter_env.env import DatacenterEnv
from multiprocessing import Pool, cpu_count
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def predict(self, input, action_mask=None, absolute=True):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if absolute:
action = torch.argmax(predict, dim=1).cpu().item()
else:
action_probs = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_probs)
action = action_dist.sample().cpu().item()
return action
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def choose_action(self, obs, absolute=True):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask, absolute)
# action = self.job_actor.predict(job_input)
return action
class JobShopDataset(Dataset):
def __init__(self, obs_data, action_data, advantage_data) -> None:
self.obs_data = [i for item in obs_data for i in item]
self.action_data = [i for item in action_data for i in item]
self.advantage_data = [i for item in advantage_data for i in item]
def __getitem__(self, index):
obs = self.obs_data[index]
action = self.action_data[index]
advantage = self.advantage_data[index]
state, action_mask = self.obs_format(obs)
return state, action_mask, action, advantage
def obs_format(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
state = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
return state, action_mask
def __len__(self):
return len(self.action_data)
class InputDrive:
def __init__(self, args) -> None:
self.args = args
self.seq_index = 0
self.seq_num = args.job_seq_num
self.agent = Agent()
self.prob = 0.8
self.prob_step = 2 / self.args.epoch
def set_seed(self, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def get_one_experience(self, args, seed, model_state_dict, seq_index, prob=0):
# 初始化环境
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
# 初始化agent
agent = Agent()
agent.load_state_dict(model_state_dict)
# 设置随机种子
self.set_seed(seed)
# 收集轨迹
obs = env.reset()
done = False
trajectory = []
agent.eval()
with torch.no_grad():
while not done:
action = agent.choose_action(obs, absolute=False)
next_obs, reward, done, _ = env.step(action)
trajectory.append([obs, action, reward, next_obs, done])
obs = next_obs
# 收集fitness
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record)
fitness = np.array([-runtime_fitness, -std_fitness])
return trajectory, fitness
# 计算折扣累积reward
def get_discount_reward(self, trajectory, reward_index):
# 统计reward
reward = []
for item in trajectory:
reward.append(item[reward_index])
# reward 标准化
# norm_reward_batch = (reward - np.mean(reward, axis=0)) / (np.std(reward, axis=0))
# 归一化
# norm_reward_batch = (reward - np.min(reward, axis=0)) / (
# np.max(reward, axis=0) - np.min(reward, axis=0)
# )
# 目标权重相同
mean_reward = np.sum(
np.clip(reward, a_min=[-500, -200], a_max=[0, 0]) / [-500, -200], axis=-1
)
# mean_reward = norm_reward_batch[:, 0]
# mean_reward = np.sum(reward, axis=-1)
# 计算折扣累积reward
trajectory_len = len(trajectory)
discount_reward = np.zeros(trajectory_len)
for index in reversed(range(trajectory_len - 1)):
discount_reward[index] = mean_reward[index] + self.args.gamma * mean_reward[index + 1]
return discount_reward
# 收集经验
def get_experience(self, seq_index):
# 多线程收集经验
pool = Pool(min(cpu_count(), self.args.experience_num))
all_record = []
for seed in range(self.args.experience_num):
record = pool.apply_async(
self.get_one_experience,
args=(
self.args,
seed,
self.agent.state_dict(),
seq_index,
self.prob,
),
)
all_record.append(record)
pool.close()
pool.join()
all_trajectory = []
all_fitness = []
for record in all_record:
trajectory, fitness = record.get()
all_trajectory.append(trajectory)
all_fitness.append(fitness)
return all_trajectory, all_fitness
# 计算baseline
def get_advantage(self, all_trajectory):
# 计算累积reward
all_reward = []
all_reward_flat = []
max_reward_len = 0
for trajectory in all_trajectory:
max_reward_len = max(max_reward_len, len(trajectory))
reward = []
for item in trajectory:
reward.append(item[2])
all_reward_flat.append(item[2])
all_reward.append(reward)
all_reward_flat = np.array(all_reward_flat)
reward_mean = np.mean(all_reward_flat, axis=0)
reward_std = np.std(all_reward_flat, axis=0)
all_discount_reward = []
for reward in all_reward:
# norm_reward = (reward - reward_mean) / (reward_std + 1e-7)
# mean_reward = np.mean(norm_reward, axis=-1)
# mean_reward = np.sum(norm_reward * [[0.2, 0.8]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0.8, 0.2]], axis=-1)
# mean_reward = np.sum(norm_reward * [[1, 0]], axis=-1)
# mean_reward = np.sum(norm_reward * [[0, 1]], axis=-1)
# mean_reward = np.sum(np.array(reward) * np.array([[1 / 600, 1 / 50]]), axis=-1)
mean_reward = np.sum(
(np.clip(reward, a_min=[-500, -200], a_max=[0, 0]) - [-500, -200]) / [500, 200],
axis=-1,
)
reward_len = len(reward)
discount_reward = np.zeros(reward_len)
for index in reversed(range(reward_len - 1)):
discount_reward[index] = (
mean_reward[index] + self.args.gamma * mean_reward[index + 1]
)
all_discount_reward.append(discount_reward)
# padding
all_padded_discount_reward = [
np.concatenate([discount_reward, np.zeros(max_reward_len - len(discount_reward))])
for discount_reward in all_discount_reward
]
# 计算baseline
baseline = np.mean(all_padded_discount_reward, axis=0)
# 计算advantage
all_advantage = [
discount_reward - baseline[: len(discount_reward)]
for discount_reward in all_discount_reward
]
return all_advantage
def train(self):
optimizer = optim.AdamW(self.agent.parameters(), lr=self.args.lr)
best_fitness = [np.array([np.inf, np.inf])] * self.args.job_seq_num
i_episode = 0
EP = []
fitness_list = []
for epoch in range(self.args.epoch):
for seq_index in range(self.args.job_seq_num):
# 收集经验
all_trajectory, all_fitness = self.get_experience(seq_index)
all_obs = []
all_action = []
for trajectory in all_trajectory:
_obs = []
_action = []
for item in trajectory:
_obs.append(item[0])
_action.append(item[1])
all_obs.append(_obs)
all_action.append(_action)
# 结果汇总
mean_fitness = -np.mean(all_fitness, axis=0)
print(f"train epoch {epoch} seq_index {seq_index} i_episode {i_episode}")
print("mean_fitness: ", mean_fitness)
# writer.add_scalar(
# "current/ws_score",
# mean_fitness[0] / 600 + mean_fitness[1] / 50,
# i_episode,
# )
fitness_list.append(mean_fitness)
# 记录fitness
writer.add_scalar("current/duration_score", mean_fitness[0], i_episode)
writer.add_scalar("current/balance_score", mean_fitness[1], i_episode)
# 记录 mean fitness
fitness_mean = np.mean(fitness_list[-args.job_seq_num :], axis=0)
writer.add_scalar("mean/duration_score", fitness_mean[0], i_episode)
writer.add_scalar("mean/balance_score", fitness_mean[1], i_episode)
# 记录最优非支配曲面
d_n = 0
remove_list = []
for item in EP:
_, item_fitness = item
if np.all(fitness_mean < item_fitness):
remove_list.append(item)
if np.all(fitness_mean > item_fitness):
d_n += 1
if d_n != 0:
break
if d_n == 0:
for item in remove_list:
EP.remove(item)
EP.append((i_episode, fitness_mean))
# 打印曲面
EP_fitness = np.array([i[1] for i in EP])
x = EP_fitness[:, 1]
y = EP_fitness[:, 0]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Target distribution", figure, i_episode)
plt.close()
# 模型保存
model_name = (
f"e{i_episode}_s{seq_index}_d{mean_fitness[0]:.4f}_b{mean_fitness[1]:.4f}"
)
model_save_path = os.path.join(model_save_dir, model_name)
torch.save(self.agent.job_actor.state_dict(), model_save_path)
# 计算advantage
all_advantage = self.get_advantage(all_trajectory)
# 训练模型
# 构建dataloader
dataset = JobShopDataset(
obs_data=all_obs,
action_data=all_action,
advantage_data=all_advantage,
)
dataloader = DataLoader(dataset, batch_size=512, shuffle=False, num_workers=10)
# 清空梯度
optimizer.zero_grad()
self.agent.train()
# 梯度累加
for batch in dataloader:
state, action_mask, action, advantage = batch
action_predict = self.agent.job_actor(state)
# 直接赋值会导致无法梯度回传
# TODO 如何把mask用上?
action_predict[action_mask == False] += -1e9
action_predict = torch.squeeze(action_predict, dim=1)
action_probs = torch.softmax(action_predict, dim=-1)
action_dist = Categorical(action_probs)
action_logprobs = action_dist.log_prob(action)
"""
优化目标是loss越小越好
advantage大于0说明该动作好要增大该动作的概率 即减小 -action_logprobs * advantage
"""
loss = -action_logprobs * advantage
# 一次梯度回传
loss.mean().backward()
# 梯度更新
optimizer.step()
i_episode += 1
# 更新随机权重
self.prob = max(self.prob - self.prob_step, self.prob)
if __name__ == "__main__":
args = parse_args()
args.method = "ws_deepjs"
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
model_save_dir = os.path.join(save_dir, "models")
os.makedirs(model_save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
inputdrive = InputDrive(args)
inputdrive.train()
| 17,996 | 34.994 | 98 | py |
MERL-LB | MERL-LB-main/mp_test_nn.py | import os
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
checkpoint_path = "output/train/nsga/run02/elite/g3382_0/15_-349.95341_-19.68042.pth"
# checkpoint_path = "output/one_job/ga/reward_sum/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024.pth"
agent = Agent()
state_dict = torch.load(checkpoint_path)
agent.load_state_dict(state_dict)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"job_num_{seq_index}.npy"),
machines_job_num_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
machines_job_num_record = np.array(env.machines_job_num_record)
np.save(
os.path.join(data_save_path, f"end_job_num_{seq_index}.npy"),
machines_job_num_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
# args.max_job_num = 2
# args.machine_num = 20
args.save_path = "output/test/GA/reward_mean/run01_m10/final_population_false/g_3921_f_-341.265_-0.036/24_f_-342.436_-0.029"
args.save_path = "/root/workspace/project/version3/output/train/ppo/run_0/model/e10000_s0_d408.3441_b16.2197_actor.pth"
# args.save_path = "output/test/ga/reward_mean/run02_m15/final_population/g_9796_f_-310.773_-0.026/24_f_-308.432_-0.024/test_m_10"
save_dir = args.save_path
model_save_path = os.path.join(save_dir, "models")
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
os.makedirs(model_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
# mutil process
mutil_process = []
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(test_one_path, args=(args, i, data_save_path, fig_save_path))
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(std_fitness[0], std_fitness[1])
)
print("done")
| 9,255 | 34.328244 | 134 | py |
MERL-LB | MERL-LB-main/mp_train_nn_nsga2_neighbor.py | import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
# 改进子代生成方式
def generate_children(self):
children_population = []
# 对精英排序
fitness_list = []
for item in self.elitism_population:
fitness_list.append(item.train_fitness)
fitness_list = np.array(fitness_list)
fitness0_index = np.argsort(fitness_list[:, 0])
fitness1_index = np.argsort(fitness_list[:, 1])
a = np.arange(0, len(fitness_list))
neighbor = []
for i in range(len(fitness_list)):
distance = abs(i - a)
nei = np.argsort(distance)
neighbor.append(nei[1 : 5 + 1])
self.exploration_rate = 1
if self.generation < 100:
self.exploration_rate = 0.5 + 0.5 * (1 - self.generation / 100)
elif self.generation < 2000:
self.exploration_rate = 0.1 + 0.4 * (1 - self.generation / 2000)
else:
self.exploration_rate = 0.1
# 1000 探索
# 5000 待收缩
#
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
if random.random() < self.exploration_rate:
p1, p2 = self.random_select_parent(2)
else:
rd_index1 = random.randint(0, len(fitness0_index) - 1)
rd_index2 = neighbor[rd_index1][random.randint(0, 5 - 1)]
if random.random() < 0.5:
p1 = self.elitism_population[fitness0_index[rd_index1]]
p2 = self.elitism_population[fitness0_index[rd_index2]]
else:
p1 = self.elitism_population[fitness1_index[rd_index1]]
p2 = self.elitism_population[fitness1_index[rd_index2]]
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(pool_num)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga_nei"
args.job_seq_num = 1
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Train/Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Train/Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Train/Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness min", min_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Exploration rate", ga.exploration_rate, ga.generation)
| 23,003 | 34.665116 | 96 | py |
MERL-LB | MERL-LB-main/mp_test_server_num.py | import os
import random
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from itertools import count
from multiprocessing import Pool, cpu_count
from config.test import *
from envs.datacenter_env.env import DatacenterEnv
from utils import *
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device("cpu")
class RR:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
self.action_index = 0
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action = self.action_index
for i in range(self.machine_num):
action = (action + 1) % self.machine_num
if action_mask[action] == True:
self.action_index = action
break
return action
class RD:
def __init__(self, machine_num) -> None:
self.machine_num = machine_num
def select_action(self, obs):
_, _, _, _, _, action_mask = obs
action_prob = np.random.random(self.machine_num)
action_prob = (action_prob + action_mask) / 2
action = np.argmax(action_prob)
return action
class LG:
def select_action(self, obs):
_, job_run_time, _, machines_run_time, _, action_mask = obs
gap = np.abs(machines_run_time - job_run_time)
gap[action_mask == False] = 1e9
action = np.argmin(gap)
return action
class LC:
def select_action(self, obs):
_, _, _, _, jobs_num, action_mask = obs
jobs_num[action_mask == False] = 1e9
action = np.argmin(jobs_num)
return action
class Actor(nn.Module):
def __init__(self, absolute=True, dim_list=[126, 32, 1]):
super().__init__()
self.absolute = absolute
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
if not self.absolute:
action_prob = torch.softmax(predict, dim=-1)
action_dist = Categorical(action_prob)
action = action_dist.sample()
self.action_logprobs = action_dist.log_prob(action).detach()
action = action.cpu().item()
else:
action = torch.argmax(predict, dim=1).cpu().item()
return action
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self, absolute=True):
super(Agent, self).__init__()
self.job_actor = Actor(absolute=absolute)
def update(self, job_weights):
self.job_actor.update(job_weights)
def select_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
action = self.job_actor.predict(job_input, action_mask)
# action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
def get_agent(args):
method = args.method
if method == "rr":
agent = RR(args.machine_num)
elif method == "rd":
agent = RD(args.machine_num)
elif method == "lg":
agent = LG()
elif method == "lc":
agent = LC()
elif method in ["nsga", "wsga", "deepjs", "igd", "nei_nsga"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
elif method in ["ppo"]:
agent = Agent()
state_dict = torch.load(args.checkpoint_path)
agent.job_actor.load_state_dict(state_dict)
agent.job_actor.eval()
return agent
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置随机种子
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
torch.cuda.manual_seed_all(seed) # 为所有GPU设置随机种子
def test_one_path(args, seq_index, data_save_path, fig_save_path):
print("start test seq_index: ", seq_index)
# init agent
agent = get_agent(args)
# init env
env = DatacenterEnv(args)
env.seq_index = seq_index
# start test
obs = env.reset()
for _ in count():
# select and perform an action
with torch.no_grad():
action = agent.select_action(obs)
# execute action
next_obs, _, done, _ = env.step(action)
# move to the next state
obs = next_obs
if done:
break
# save test result
# save not run to end data
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
# print mean std and mean run time
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
runtime_fitness = np.mean(machines_finish_time_record)
print(f"std_fitness {std_fitness} runtime_fitness {runtime_fitness}")
# save run to end data
env.run_to_end()
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
np.save(
os.path.join(data_save_path, f"end_occupancy_rate_{seq_index}.npy"),
machines_occupancy_rate,
)
machines_finish_time_record = np.array(env.machines_finish_time_record)
np.save(
os.path.join(data_save_path, f"end_finish_time_{seq_index}.npy"),
machines_finish_time_record,
)
for i in range(4):
data = machines_occupancy_rate[:, :, i]
save_name = os.path.join(fig_save_path, "use_rate", f"use_rate_e{seq_index}_{i}.png")
plot_mutil_lines_chart(
data,
save_name=save_name,
xlabel="time",
ylabel="utilization",
title="Container Resource Utilization",
)
save_name = os.path.join(fig_save_path, "finish_time", f"finish_time_e{seq_index}.png")
plot_mutil_lines_chart(
machines_finish_time_record,
save_name=save_name,
xlabel="time",
ylabel="remaining time",
title="Container Remaining Time",
)
del agent
return std_fitness, runtime_fitness, env.job_num
if __name__ == "__main__":
args = parse_args()
args.method = "igd"
args.tag = "server_num_test02"
args.actual = True
# args.checkpoint_path = "output/train/nsga/run03/elite/g1_1/20_-501.30449_-25.49838.pth"
# args.checkpoint_path = "output/train/nsga/run05/elite/g24214_0/10_-351.04309_-20.52227.pth"
# args.checkpoint_path = "output/train/wsga/run05/elite/g13443_3/0_-335.70133_-14.49433.pth"
# args.checkpoint_path = (
# "output/train/ns_deepjs/run02_no_mask/models/e10000_s0_d401.1772_b15.8262"
# )
args.checkpoint_path = (
"output/train/ns_deepjs/run02_no_mask/models/e13919_s9_d380.7892_b22.2165"
)
# args.checkpoint_path = "output/train/nei_nsga/g30000_0/12_-218.78153_-174.13751.pth"
# job_num_list = range(2, 10)
server_num_list = [5, 10, 20, 30, 40, 50]
job_num_list = [int(5 * i / 10) for i in server_num_list]
# user_sigam_list = [0]
root_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
result = []
result2 = []
for server_num, max_job_num in zip(server_num_list, job_num_list):
args.machine_num = server_num
user_load_rate = (
max_job_num
/ 2
* args.max_res_req
/ 2
* args.max_job_len
/ 2
/ args.res_capacity
/ args.machine_num
)
if user_load_rate > 1.1:
break
print(f"Test server_num {server_num} user_load_rate {user_load_rate:.3f}")
save_dir = os.path.join(
root_dir,
f"server_num_{server_num}",
)
os.makedirs(save_dir, exist_ok=True)
fig_save_path = os.path.join(save_dir, "fig")
data_save_path = os.path.join(save_dir, "data")
os.makedirs(data_save_path, exist_ok=True)
os.makedirs(fig_save_path, exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "use_rate"), exist_ok=True)
os.makedirs(os.path.join(fig_save_path, "finish_time"), exist_ok=True)
# save args
args.max_job_num = max_job_num
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
# mutil process
mutil_process = []
# pool = Pool(10)
pool = Pool(cpu_count())
for i in range(args.job_seq_num):
one_process = pool.apply_async(
test_one_path, args=(args, i, data_save_path, fig_save_path)
)
mutil_process.append(one_process)
pool.close()
pool.join()
# caculate mean performent
fitness_record = []
job_num_list = []
for p in mutil_process:
std_fitness, runtime_fitness, job_num = p.get()
job_num_list.append(job_num)
fitness_record.append((std_fitness, runtime_fitness))
result2.append((user_load_rate, std_fitness, runtime_fitness))
fitness_record = np.array(fitness_record)
mean_fitness = np.mean(fitness_record, axis=0)
std_fitness = np.std(fitness_record, axis=0)
print(job_num_list)
np.save(os.path.join(data_save_path, "job_num.npy"), np.array(job_num))
print(
"mean std fitness: {:.4f} mean runtime fitness: {:.4f}".format(
mean_fitness[0], mean_fitness[1]
)
)
print(
"std std fitness: {:.4f} std runtime fitness: {:.4f}".format(
std_fitness[0], std_fitness[1]
)
)
print("done")
df = pd.DataFrame(
result,
columns=[
"user_load_rate",
"balance_fitness_mean",
"duration_fitness_mean",
"balance_fitness_std",
"duration_fitness_std",
],
)
df.to_csv(os.path.join(root_dir, f"mean_std.csv"))
df2 = pd.DataFrame(
result2,
columns=[
"user_load_rate",
"balance_fitness",
"duration_fitness",
],
)
df2.to_csv(os.path.join(root_dir, f"all_data.csv"))
| 13,588 | 31.823671 | 97 | py |
MERL-LB | MERL-LB-main/mp_train_nn_nsga2_no_mask.py | import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from multiprocessing import Pool, cpu_count
from config.ga import *
from typing import List
from envs.datacenter_env.env import DatacenterEnv
from torch.utils.tensorboard import SummaryWriter
class Actor(nn.Module):
def __init__(self, dim_list=[126, 32, 1]):
super().__init__()
self.dim_list = dim_list
fc = []
self.param_num = 0
for i in range(len(dim_list) - 1):
fc.append(nn.Linear(dim_list[i], dim_list[i + 1]))
self.param_num += dim_list[i] * dim_list[i + 1] + dim_list[i + 1]
self.fc = nn.ModuleList(fc)
def forward(self, x):
for i in range(len(self.fc) - 1):
x = F.relu(self.fc[i](x))
x = self.fc[-1](x)
x = torch.squeeze(x, dim=-1)
return x
def update(self, weights):
weights = torch.FloatTensor(weights)
with torch.no_grad():
start = 0
for fc in self.fc:
end = start + fc.in_features * fc.out_features
fc.weight.data = weights[start:end].reshape(fc.out_features, fc.in_features)
start = end
end = start + fc.out_features
fc.bias.data = weights[start:end]
start = end
def predict(self, input, action_mask=None):
predict = self(input)
if action_mask is not None:
predict[action_mask == False] += -1e8
return torch.argmax(predict, dim=1).cpu().item()
def show(self):
with torch.no_grad():
for parameters in self.parameters():
print(parameters.numpy().flatten())
class Agent(nn.Module):
def __init__(self):
super(Agent, self).__init__()
self.job_actor = Actor()
def update(self, job_weights):
self.job_actor.update(job_weights)
def choose_action(self, obs):
(
job_res_req_rate,
job_run_time,
machines_all_occupancy_rate,
machines_run_time,
_,
action_mask,
) = obs
# to tensor
job_state = torch.tensor(np.array([*job_res_req_rate, job_run_time]), dtype=torch.float)
machines_all_occupancy_rate = torch.tensor(
np.array([machines_all_occupancy_rate]), dtype=torch.float
)
machines_run_time = torch.tensor(np.array([machines_run_time]), dtype=torch.float)
action_mask = torch.tensor(np.array([action_mask]), dtype=torch.bool)
# job_state: B*t*r, machines_state: B*n*t*r, buffer_state: B*t
B, n, t, r = machines_all_occupancy_rate.shape
machines_occupancy_rate_mean = torch.mean(machines_all_occupancy_rate, dim=1) # B*t*r
machines_occupancy_rate_std = torch.std(machines_all_occupancy_rate, dim=1) # B*t*r
job_state = job_state.reshape(B, 1, -1)
job_state = job_state.repeat(1, n, 1)
machines_occupancy_rate_mean = machines_occupancy_rate_mean.reshape(B, 1, -1)
machines_occupancy_rate_std = machines_occupancy_rate_std.reshape(B, 1, -1)
machines_state_mean = torch.cat(
(
machines_occupancy_rate_mean,
machines_occupancy_rate_std,
),
dim=-1,
)
machines_occupancy_rate = machines_all_occupancy_rate.reshape(B, n, -1)
machines_run_time = machines_run_time.reshape(B, n, -1)
machines_state_mean_std_run_time = machines_state_mean.repeat(1, n, 1)
job_input = torch.cat(
(
job_state,
machines_occupancy_rate,
machines_run_time,
machines_state_mean_std_run_time,
),
dim=-1,
) # B*n*dim2
# action = self.job_actor.predict(job_input, action_mask)
action = self.job_actor.predict(job_input)
return action
def show(self):
self.job_actor.show()
class Individual:
def __init__(self, job_genes=None):
self.agent = Agent()
self.param_num = self.agent.job_actor.param_num
self.job_genes = job_genes
self.train_fitness = None
self.eval_fitness = None
self.std_fitness = np.inf
self.steps = 0
def init(self):
self.job_genes = np.random.uniform(-1, 1, self.param_num)
def update(self):
self.agent.update(self.job_genes.copy())
def run_individual_in_env(id, args, genes, seq_index):
env = DatacenterEnv(args)
env.seq_index = seq_index
env.reset()
individual = Individual(genes)
individual.update()
obs = env.reset()
done = False
action_list = []
reward_list = []
while not done:
action = individual.agent.choose_action(obs)
obs, reward, done, _ = env.step(action)
action_list.append(action)
reward_list.append(reward)
if args.ga_fitness_type == "std":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.sum(machines_occupancy_mean_std)
fitness = -std_fitness
elif args.ga_fitness_type == "runtime":
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.sum(machines_finish_time_record / 60) # 避免过大
fitness = -runtime_fitness
elif args.ga_fitness_type == "double":
# 计算标准差
machines_occupancy_rate = np.array(env.machines_occupancy_rate_record)
machines_occupancy_std = np.std(machines_occupancy_rate * args.res_capacity, axis=1)
machines_occupancy_mean_std = np.mean(machines_occupancy_std, axis=1)
std_fitness = np.mean(machines_occupancy_mean_std)
# 计算运行时长
machines_finish_time_record = np.array(env.machines_finish_time_record)
runtime_fitness = np.mean(machines_finish_time_record) # 避免过大
fitness = np.array([-runtime_fitness, -std_fitness])
return id, fitness
class GA:
def __init__(self, args):
self.args = args
self.p_size = args.ga_parent_size
self.c_size = args.ga_children_size
self.job_genes_len = 0
self.mutate_rate = args.ga_mutate_rate
self.mutate_scale = args.ga_mutate_scale
self.population: List[Individual] = []
self.elitism_population: List[Individual] = []
self.avg_fitness = 0
self.seq_index = 0
self.seq_num = args.job_seq_num
self.generation = 0
def setup_seed(self):
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def generate_ancestor(self):
for _ in range(self.p_size):
individual = Individual()
individual.init()
self.population.append(individual)
self.job_genes_len = individual.param_num
def inherit_ancestor(self):
"""Load genes(nn model parameters) from file."""
for i in range(self.p_size):
pth = os.path.join("model", "all_individual", str(i) + "_nn.pth")
nn = torch.load(pth)
genes = []
with torch.no_grad():
for parameters in nn.parameters():
genes.extend(parameters.numpy().flatten())
self.population.append(Individual(np.array(genes)))
def crossover(self, c1_genes, c2_genes):
"""Single point crossover."""
p1_genes = c1_genes.copy()
p2_genes = c2_genes.copy()
point = np.random.randint(0, (self.job_genes_len))
c1_genes[: point + 1] = p2_genes[: point + 1]
c2_genes[: point + 1] = p1_genes[: point + 1]
def mutate(self, c_genes):
"""Gaussian mutation with scale"""
mutation_array = np.random.random(c_genes.shape) < self.mutate_rate
mutation = np.random.normal(size=c_genes.shape)
mutation[mutation_array] *= self.mutate_scale
c_genes[mutation_array] += mutation[mutation_array]
# def elitism_selection(self):
# # 归一化
# fitness_list = []
# for individual in self.population:
# fitness_list.append(individual.train_fitness)
# fitness_list = np.array(fitness_list)
# norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
# np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
# )
# # 权重相加排序
# norm_fitness_list = np.sum(
# norm_fitness_list * self.args.ga_fitness_wight, axis=-1
# )
# population_sorted_index = np.argsort(norm_fitness_list) # 升序取后面几位
# population_sorted_index = population_sorted_index[-self.p_size :]
# self.elitism_population = [
# self.population[index] for index in population_sorted_index
# ]
# self.avg_fitness = np.mean(fitness_list[population_sorted_index], axis=0)
# self.elitism_norm_fitness_list = norm_fitness_list[population_sorted_index]
def elitism_selection(self):
# 归一化值
fitness_list = []
for individual in self.population:
fitness_list.append(individual.train_fitness)
fitness_list = np.array(fitness_list)
norm_fitness_list = (fitness_list - np.min(fitness_list, axis=0)) / (
np.max(fitness_list, axis=0) - np.min(fitness_list, axis=0)
)
# 快速非支配排序越小越好 所以转换为正数
fm_fitness_list = -np.array(fitness_list).T
# 快速非支配排序
front_list = self.fast_non_dominated_sort(fm_fitness_list)
# 拥挤度计算
crowded_distance_list = []
for front in front_list:
front_values = fm_fitness_list[:, front]
crowded_distance = self.crowded_distance(front_values)
crowded_distance_list.append(crowded_distance)
# 精英选择
elitism_index = []
save_best_front = False
for front, crowded_distance in zip(front_list, crowded_distance_list):
# 保存最前沿模型
if not save_best_front:
best_front_population = []
for index in front:
best_front_population.append(self.population[index])
self.best_front_population = best_front_population
save_best_front = True
# 根据拥挤度排序
front = np.array(front)
sorted_index = np.argsort(crowded_distance) # 升序排序
sorted_front = front[sorted_index[::-1]] # 降序排序取拥挤度大的
# 选择精英
# 选择的个数是不是可以定义?
for index in sorted_front:
if len(elitism_index) < self.p_size:
elitism_index.append(index)
else:
break
# [0.5, 05] 权重相加排序
norm_fitness_list = np.sum(norm_fitness_list * self.args.ga_fitness_wight, axis=-1)
elitism_population = [self.population[index] for index in elitism_index]
# 检查精英变化数量
elite_change_num = len(elitism_population)
for elite in elitism_population:
if elite in self.elitism_population:
elite_change_num -= 1
self.elitism_population = elitism_population
self.fitness_list = fitness_list
self.avg_fitness = np.mean(fitness_list[elitism_index], axis=0)
self.elitism_norm_fitness_list = norm_fitness_list[elitism_index]
return elite_change_num
# 轮盘赌选择子代
def roulette_wheel_selection(self, size) -> List[Individual]:
# 值越大被取到的概率就越大
selection = []
wheel = sum(self.elitism_norm_fitness_list)
for _ in range(size):
pick = np.random.uniform(0, wheel)
current = 0
for i, individual_fitness in enumerate(self.elitism_norm_fitness_list):
current += individual_fitness
if current > pick:
selection.append(self.elitism_population[i])
break
return selection
# 随机选择
def random_select_parent(self, size):
# 随机选择两个父代
selection = random.sample(self.elitism_population, size)
return selection
# 产生子代
def generate_children(self):
children_population = []
while len(children_population) < self.c_size:
# p1, p2 = self.roulette_wheel_selection(2)
p1, p2 = self.random_select_parent(2)
c1_genes, c2_genes = p1.job_genes.copy(), p2.job_genes.copy()
self.crossover(c1_genes, c2_genes)
self.mutate(c1_genes)
self.mutate(c2_genes)
c1 = Individual(c1_genes)
c2 = Individual(c2_genes)
children_population.extend([c1, c2])
self.children_population = children_population
def save_population(self, population: list[Individual], label=""):
save_dir = os.path.join(
self.args.save_path,
self.args.method,
self.args.tag,
label,
f"g{self.generation}_{self.seq_index}",
)
os.makedirs(save_dir, exist_ok=True)
mean_fitness_list = []
for id, individual in enumerate(population):
mean_fitness = np.array(individual.train_fitness)
mean_fitness_list.append([self.generation, id, *mean_fitness.tolist()])
model_save_path = os.path.join(
save_dir, "{}_{:.5f}_{:.5f}.pth".format(id, *mean_fitness.tolist())
)
individual.update()
torch.save(individual.agent.job_actor.state_dict(), model_save_path)
mean_fitness_list = np.array(mean_fitness_list)
np.save(os.path.join(save_dir, "mean_fitness_record.npy"), mean_fitness_list)
return mean_fitness_list
# 进化
def evolve(self):
# 普通循环测试
# population = []
# for individual in self.population:
# individual = run_individual_in_env(
# self.args,
# individual.job_genes,
# self.seq_index,
# )
# population.append(individual)
# 多进程
population_num = self.args.ga_parent_size + self.args.ga_children_size
pool_num = min(cpu_count(), population_num)
print(f"use {pool_num} cup core")
pool = Pool(10)
mutil_process = []
for id, individual in enumerate(self.population):
# 在坏境中运行个体获得个体适应度
one_process = pool.apply_async(
run_individual_in_env,
args=(
id,
self.args,
individual.job_genes,
self.seq_index,
),
)
mutil_process.append(one_process)
pool.close()
pool.join()
# 收集进程结果
for one_process in mutil_process:
id, fitness = one_process.get()
self.population[id].train_fitness = fitness
# 保存所有结果
self.save_population(self.population, "all")
# 精英选择
elite_change_num = self.elitism_selection()
# 保存精英
elite_fitness_list = self.save_population(self.elitism_population, "elite")
# 子代生成
self.generate_children()
new_population = []
new_population.extend(self.elitism_population)
new_population.extend(self.children_population)
self.population = new_population
self.seq_index = (self.seq_index + 1) % self.seq_num
self.generation += 1
return elite_change_num, elite_fitness_list
# 值排序
def sort_by_values(self, values):
# 升序排序
sorted_index_list = []
for value in values:
sorted_index = np.argsort(value)
sorted_index_list.append(sorted_index)
return sorted_index_list
# 拥挤度计算
def crowded_distance(self, values):
distances = []
sorted_index_list = self.sort_by_values(values) # 升序排序
for value, sorted_index in zip(values, sorted_index_list):
distance = np.ones(len(sorted_index)) * 1e5
for i in range(1, len(sorted_index) - 1):
pre_index = sorted_index[i - 1]
curr_index = sorted_index[i]
after_index = sorted_index[i + 1]
distance[curr_index] = (value[after_index] - value[pre_index]) / (
max(value) - min(value)
)
distances.append(distance)
distances = np.array(distances)
distance = np.sum(distances, axis=0)
return distance
# 快速非支配排序
def fast_non_dominated_sort(self, values):
# 值越小越好
values11 = values[0] # 函数1解集
S = [[] for _ in range(0, len(values11))] # 存放 每个个体支配解的集合
front = [[]] # 存放群体的级别集合,一个级别对应一个[]
n = [0 for _ in range(0, len(values11))] # 每个个体被支配解的个数 即针对每个解 存放有多少好于这个解的个数
rank = [np.inf for _ in range(0, len(values11))] # 存放每个个体的级别
# 遍历每一个个体得到各个个体的被支配解个数和支配解集合
# 目标函数值越小越好
for p in range(0, len(values11)):
S[p] = [] # 该个体支配解的集合 即存放差于该解的解
n[p] = 0 # 该个体被支配的解的个数初始化为0 即找到有多少好于该解
for q in range(0, len(values11)): # 遍历每一个个体
less = 0 # 的目标函数值小于p个体的目标函数值数目
equal = 0 # 的目标函数值等于p个体的目标函数值数目
greater = 0 # 的目标函数值大于p个体的目标函数值数目
for k in range(len(values)): # 遍历每一个目标函数
if values[k][p] > values[k][q]: # 目标函数k时 q个体值小于p个体
less = less + 1 # q比p 好
if values[k][p] == values[k][q]: # 目标函数k时 p个体值等于于q个体
equal = equal + 1
if values[k][p] < values[k][q]: # 目标函数k时 q个体值大于p个体
greater = greater + 1 # q比p差
if (less + equal == len(values)) and (equal != len(values)):
n[p] = n[p] + 1 # q比好 比p好的个体个数加1
elif (greater + equal == len(values)) and (equal != len(values)):
S[p].append(q) # q比p差 存放比p差的个体解序号
# 找出Pareto最优解 即n[p]=0的个体p序号
if n[p] == 0:
rank[p] = 0 # 序号为p的个体 等级为0即最优
if p not in front[0]:
# 如果p不在第0层中 将其追加到第0层中
front[0].append(p) # 存放Pareto最优解序号
# 划分各层解
i = 0
while front[i] != []: # 如果分层集合为不为空
Q = []
for p in front[i]: # 遍历当前分层集合的各个个体p
for q in S[p]: # 遍历p个体的每个支配解q
n[q] = n[q] - 1 # 则将支配解中所有给对应的个体np-1
if n[q] == 0:
rank[q] = i + 1
if q not in Q:
Q.append(q) # 存放front=i+1的个体序号
i = i + 1 # front等级+1
front.append(Q)
del front[len(front) - 1] # 删除循环退出时i+1产生的[]
return front # 返回各层的解序号集合 类似[[1],[9],[0, 8],[7, 6],[3, 5],[2, 4]]
if __name__ == "__main__":
args = parse_args()
args.method = "nsga_no_mask"
args.job_seq_num = 1
args.tag = "run01"
save_dir = os.path.join(
args.save_path,
args.method,
args.tag,
)
os.makedirs(save_dir, exist_ok=True)
# save args
args_dict = args.__dict__
args_path = os.path.join(save_dir, "args.txt")
with open(args_path, "w") as f:
for each_arg, value in args_dict.items():
f.writelines(each_arg + " : " + str(value) + "\n")
writer = SummaryWriter(os.path.join(save_dir, "log"))
ga = GA(args)
ga.setup_seed()
if args.ga_choice == "generate":
ga.generate_ancestor()
else:
ga.inherit_ancestor()
fitness_list = []
mean_best_fitness = [-np.inf] * args.ga_fitness_num
while True:
print("=" * 100)
print(f"evolve generation {ga.generation}")
elite_change_num, elite_fitness_list = ga.evolve()
# log to tensorbord
writer.add_scalar("Train/Elite change num", elite_change_num, ga.generation)
elite_fitness_list = np.array(elite_fitness_list)
elite_fitness_list = -elite_fitness_list[:, -2:]
y = elite_fitness_list[:, 0]
x = elite_fitness_list[:, 1]
figure = plt.figure(figsize=(8, 8), dpi=100)
plt.scatter(x, y, label="train")
plt.scatter(16.2658, 534.9209, label="lc")
# plt.scatter(x, y, lable="rr")
plt.scatter(66.8868, 349.5121, label="lg")
plt.scatter(17.0905, 351.4006, label="wsga")
plt.xlim((0, 250))
plt.ylim((200, 600))
plt.xlabel("balance")
plt.ylabel("duration")
plt.title("Target distribution")
plt.legend()
writer.add_figure("Train/Target distribution", figure, ga.generation)
plt.close()
max_elite_fitness = np.max(elite_fitness_list, axis=0)
min_elite_fitness = np.min(elite_fitness_list, axis=0)
writer.add_scalar("Train/Balance fitness max", max_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness max", max_elite_fitness[0], ga.generation)
writer.add_scalar("Train/Balance fitness min", min_elite_fitness[1], ga.generation)
writer.add_scalar("Train/Duration fitness min", min_elite_fitness[0], ga.generation)
| 21,474 | 34.378913 | 96 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.