repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
more-or-let | more-or-let-master/pydrobert/mol/model.py | '''Tensor ops and models'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import keras.backend as K
import numpy as np
from keras.callbacks import CSVLogger
from keras.callbacks import ModelCheckpoint
from keras.initializers import RandomUniform
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.layers.merge import Maximum
from keras.layers.wrappers import TimeDistributed
from keras.models import Model
from keras.models import load_model
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.regularizers import l2
from pydrobert.mol.callbacks import ExtendedEarlyStopping
from pydrobert.mol.callbacks import ExtendedHistory
from pydrobert.mol.callbacks import RandomStateCheckpoint
from pydrobert.mol.config import TrainConfig
from six.moves.cPickle import load
__author__ = "Sean Robertson"
__email__ = "sdrobert@cs.toronto.edu"
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2017 Sean Robertson"
class ConvCTC(object):
'''Fully convolutional CTC
Architecture is based off [1]_ entirely.
Parameters
----------
config : config.ModelConfig
A configuration object containing model-building hyperparameters
Attributes
----------
config : config.ModelConfig
model : Keras.model
The underlying keras acoustic model
.. [1] Zhang, Y et al. "Towards End-to-End Speech Recognition with Deep
Convolutional Neural Networks" https://arxiv.org/abs/1701.02720
'''
def __init__(self, config):
if K.image_dim_ordering() != 'tf':
# not sure if I'm right, but I think the TimeDistributed
# wrapper will always take axis 1, which could be the
# channel axis in Theano
raise ValueError('dimensions must be tensorflow-ordered')
self.config = config
self.model = None
def fit_generator(
self, train_config, train_data, val_data=None):
'''Fit the acoustic model to data from generators
Parameters
----------
train_config : pydrobert.mol.config.TrainConfig
train_data : pydrobert.mol.corpus.TrainData
Should yield sample tuples of [feats, labels, feat_sizes,
label_sizes], [dummy_data]
val_data : pydrobert.mol.corpus.ValidationData, optional
Validation data. Should yield sample tuples of [feats,
labels, feat_sizes, label_sizes], [dummy_data]
'''
if train_data.batch_size != train_config.batch_size:
raise ValueError(
'Expected training data to have batch size {}; got {}'.format(
train_config.batch_size, train_data.batch_size))
elif val_data and val_data.batch_size != train_config.batch_size:
raise ValueError(
'Expected val data to have batch size {}; got {}'.format(
train_config.batch_size, val_data.batch_size))
elif (train_data.num_sub != 4) or (
train_data.axis_lengths != ((0, 0), (1, 0))):
raise ValueError(
'Expecting training data to yield sub-samples of '
'(feats, label, feat_len, label_len)')
elif val_data and ((val_data.num_sub != 4) or (
val_data.axis_lengths != ((0, 0), (1, 0)))):
raise ValueError(
'Expecting val data to yield sub-samples of '
'(feats, label, feat_len, label_len)')
elif any(axis != 0 for axis in train_data.batch_axis):
raise ValueError('All batch axes in training data must be 0!')
elif val_data and any(axis != 0 for axis in val_data.batch_axis):
raise ValueError('All batch axes in validation data must be 0!')
# create history callback
additional_metadata = vars(self.config)
additional_metadata.update(vars(train_config))
# training_stage and patience get added by callbacks
# the rest don't affect the model directly
del additional_metadata['training_stage']
del additional_metadata['csv_path']
del additional_metadata['csv_delimiter']
del additional_metadata['patience']
del additional_metadata['model_formatter']
del additional_metadata['train_formatter']
history = ExtendedHistory(
csv_path=train_config.csv_path,
strict=True,
delimiter=train_config.csv_delimiter,
model_formatter=train_config.model_formatter,
training_stage=train_config.training_stage,
**additional_metadata
)
initial_epoch = max(-1, history.get_last_epoch()) + 1
model_path = history.get_last('model_path')
prev_training_stage = history.get_last('training_stage')
if history.get_last('train_rng_path'):
with open(history.get_last('train_rng_path'), 'rb') as rng_file:
rng_state = load(rng_file)
train_data.rng.set_state(rng_state)
self._ready_acoustic_model(
model_path=model_path,
train_config=train_config,
prev_training_stage=prev_training_stage,
)
if prev_training_stage == train_config.training_stage:
# it's possible that we've already hit our early stopping
# criterion. Just return in that case
prev_wait = int(history.get_last('wait'))
if prev_wait >= train_config.patience:
return
if val_data:
val_monitored = 'val_loss'
else:
val_monitored = 'loss'
callbacks = [history]
if train_config.patience is not None:
callbacks.append(ExtendedEarlyStopping(
monitor=val_monitored,
min_delta=train_config.min_delta,
patience=train_config.patience,
mode='min',
))
if train_config.model_formatter:
callbacks.append(ModelCheckpoint(
monitor=val_monitored,
filepath=train_config.model_formatter,
save_best_only=False,
period=1,
))
if train_config.train_formatter:
callbacks.append(RandomStateCheckpoint(
train_config.train_formatter,
rng=train_data.rng,
log_entry='train_rng_path',
))
if train_config.csv_path:
callbacks.append(CSVLogger(
train_config.csv_path,
separator=train_config.csv_delimiter,
append=initial_epoch,
))
self.model.fit_generator(
train_data.batch_generator(repeat=True),
steps_per_epoch=len(train_data),
epochs=train_config.max_epochs,
callbacks=callbacks,
validation_data=(
val_data.batch_generator(repeat=True) if val_data else None),
validation_steps=len(val_data) if val_data else 0,
initial_epoch=initial_epoch,
shuffle=False,
)
def decode(self, decode_config, feats):
'''Decode features
Parameters
----------
decode_config : pydrobert.mol.config.DecodeConfig
feats : 2D float32
Of shape `(frame, feat)` representing a single recording
Returns
-------
tuple
Integer id sequence representing the labels assigned to the
sequence
'''
self._ready_acoustic_model(model_path=decode_config.model_path)
decoder = self._construct_decoder(decode_config.beam_width)
length_batch = np.asarray([[feats.shape[0]]], dtype=np.int32)
ret_labels = decoder(
[feats[np.newaxis, :, :, np.newaxis], length_batch, 0]
)[0][0]
return tuple(int(idee) for idee in ret_labels if idee != -1)
def decode_generator(self, decode_config, eval_data):
'''Decode features from generator
Parameters
----------
decode_config : pydrobert.mol.config.DecodeConfig
eval_data : pydrobert.mol.corpus.EvalData
Evaluation/test data. Should yield samples which are one of:
1. only features
2. (key, feats)
3. (feats, feat_len)
4. (key, feats, feat_len)
3. and 4. are preferred as they allow batches of greater
than size 1 to be processed.
Yields
------
tuple
batches which are one of:
1. [seq]
2. [(key, seq)]
3. [seq] * eval_data.batch_size
4. [(key, seq)] * eval_data.batch_size
according to what eval_data yields. The outermost length is
the length of the batch (1. and 2. correspond to batch
sizes of 1, 3. and 4. of eval_data.batch_size). seq is an
integer id tuple representing the labels assigned to the
associated feature.
'''
# determine the eval_data setup
eval_style = None
if eval_data.add_key:
if eval_data.num_sub == 3 and eval_data.axis_lengths == ((0, 0),):
eval_style = 4
elif eval_data.num_sub == 2:
eval_style = 2
elif eval_data.num_sub == 2 and eval_data.axis_lengths == ((0, 0),):
eval_style = 3
elif eval_data.num_sub == 1:
eval_style = 1
if eval_style is None:
raise ValueError(
'Expected evaluation data to yield samples of one of: '
'feats, (key, feats), (feats, feat_len), or (key, feats, '
'feat_len)')
self._ready_acoustic_model(model_path=decode_config.model_path)
decoder = self._construct_decoder(decode_config.beam_width)
if eval_style <= 2:
for sample in eval_data.sample_generator():
if eval_style == 1:
feat_len = sample.shape[0]
ret_labels = decoder([
sample[np.newaxis, :, :, :], # add batch_axis
np.array([feat_len], dtype=np.int32),
0,
])[0][0]
yield [tuple(
int(idee) for idee in ret_labels if idee != -1)]
else:
key, feats = sample
feat_len = feats.shape[0]
ret_labels = decoder([
feats[np.newaxis, :, :, :],
np.array([feat_len], dtype=np.int32),
0,
])[0][0]
yield [(key, tuple(
int(idee) for idee in ret_labels if idee != -1))]
else:
for batch in eval_data.batch_generator():
if eval_style == 3:
if not eval_data.batch_size:
batch = (batch[0][np.newaxis, :, :, :], [batch[1]])
ret_labels = decoder(batch + (0,))[0]
ret_labels = [
tuple(int(idee) for idee in sample if idee != -1)
for sample in ret_labels
]
if not eval_data.batch_size:
yield ret_labels[0]
else:
yield ret_labels
else:
if not eval_data.batch_size:
batch = (
[batch[0]],
batch[1][np.newaxis, :, :, :],
np.asarray([batch[2]], dtype=np.int32),
)
ret_labels = decoder(batch[1:] + (0,))[0]
ret_labels = [
(
key,
tuple(int(idee) for idee in sample if idee != -1),
)
for key, sample in zip(batch[0], ret_labels)
]
if not eval_data.batch_size:
yield ret_labels[0]
else:
yield ret_labels
def evaluate_generator(self, decode_config, eval_data):
'''Return loss over samples
Parameters
----------
decode_config : pydrobert.mol.config.DecodeConfig
eval_data : pydrobert.mol.corpus.ValidationData
A generator that yields [feats, labels, feat_sizes,
label_sizes], [dummy_data]
Returns
-------
float
The loss
'''
self._ready_acoustic_model(model_path=decode_config.model_path)
return self.model.evaluate_generator(iter(eval_data), steps=len(eval_data))
def _ready_acoustic_model(
self, model_path=None, train_config=None,
prev_training_stage=None):
# 3 options exist to get an acoustic model
# 1. load a model verbatim from file
# 2. construct a model from scratch and initialize
# 3. load a model's weights from file and use them in a new
# model
# option 1. is for resuming training in the same stage or we're
# decoding. Option 2 is for when we are just starting training
# or there's no model to load. Option 3 is when we're switching
# training stages.
if model_path and (
train_config is None or
train_config.training_stage == prev_training_stage):
self.model = load_model(
model_path, custom_objects={
'_ctc_loss': _ctc_loss,
'_y_pred_loss': _y_pred_loss,
},
)
return # assume already compiled
if train_config is None:
train_config = TrainConfig()
self._construct_acoustic_model(train_config)
if model_path:
self.model.load_weights(model_path, by_name=True)
if train_config.training_stage == 'adam':
optimizer = Adam(lr=train_config.adam_lr, clipvalue=1.0)
elif train_config.training_stage == 'sgd':
optimizer = SGD(lr=train_config.sgd_lr, clipvalue=1.0)
self.model.compile(
loss={'ctc_loss': _y_pred_loss},
optimizer=optimizer,
)
def _construct_acoustic_model(self, train_config=TrainConfig()):
# construct an acoustic model from scratch
self._cur_weight_seed = self.config.weight_seed
def _layer_kwargs():
ret = {
'activation': 'linear',
'kernel_initializer': RandomUniform(
minval=-self.config.weight_init_mag,
maxval=self.config.weight_init_mag,
seed=self._cur_weight_seed,
),
}
self._cur_weight_seed = self._cur_weight_seed + 1
if train_config.training_stage == 'sgd':
ret['kernel_regularizer'] = l2(train_config.sgd_reg)
return ret
# convolutional layer pattern
def _conv_maxout_layer(last_layer, n_filts, name_prefix, dropout=True):
conv_a = Conv2D(
n_filts,
(self.config.filt_time_width, self.config.filt_freq_width),
strides=(
self.config.filt_time_stride,
self.config.filt_freq_stride,
),
padding='same',
name=name_prefix + '_a',
**_layer_kwargs()
)(last_layer)
conv_b = Conv2D(
n_filts,
(self.config.filt_time_width, self.config.filt_freq_width),
strides=(
self.config.filt_time_stride,
self.config.filt_freq_stride,
),
padding='same',
name=name_prefix + '_b',
**_layer_kwargs()
)(last_layer)
last = Maximum(name=name_prefix + '_m')([conv_a, conv_b])
# pre-weights (i.e. post max), as per
# http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf
if dropout:
last = Dropout(
train_config.dropout, name=name_prefix + '_d',
seed=self._cur_weight_seed)(last)
self._cur_weight_seed += 1
return last
# inputs
feat_input = Input(
shape=(
None,
self.config.num_feats * (1 + self.config.delta_order),
1,
),
name='feat_in',
)
feat_size_input = Input(
shape=(1,), dtype='int32', name='feat_size_in')
label_input = Input(
shape=(None,), dtype='int32', name='label_in')
label_size_input = Input(
shape=(1,), dtype='int32', name='label_size_in')
last_layer = feat_input
# convolutional layers
n_filts = self.config.init_num_filt_channels
last_layer = _conv_maxout_layer(
last_layer, n_filts, 'conv_1', dropout=False)
last_layer = MaxPooling2D(
pool_size=(
self.config.pool_time_width,
self.config.pool_freq_width),
name='conv_1_p')(last_layer)
last_layer = Dropout(
train_config.dropout, name='conv_1_d',
seed=self._cur_weight_seed)(last_layer)
self._cur_weight_seed += 1
for layer_no in range(2, 11):
if layer_no == 5:
n_filts *= 2
last_layer = _conv_maxout_layer(
last_layer, n_filts, 'conv_{}'.format(layer_no))
last_layer = Lambda(
lambda layer: K.max(layer, axis=2),
output_shape=(None, n_filts),
name='max_freq_into_channel',
)(last_layer)
# dense layers
for layer_no in range(1, 4):
name_prefix = 'dense_{}'.format(layer_no)
dense_a = Dense(
self.config.num_dense_hidden, name=name_prefix + '_a',
**_layer_kwargs()
)
dense_b = Dense(
self.config.num_dense_hidden, name=name_prefix + '_b',
**_layer_kwargs()
)
td_a = TimeDistributed(
dense_a, name=name_prefix + '_td_a')(last_layer)
td_b = TimeDistributed(
dense_b, name=name_prefix + '_td_b')(last_layer)
last_layer = Maximum(name=name_prefix + '_m')([td_a, td_b])
last_layer = Dropout(
train_config.dropout, name=name_prefix + '_d',
seed=self._cur_weight_seed,
)(last_layer)
self._cur_weight_seed += 1
activation_dense = Dense(
self.config.num_labels, name='dense_activation',
**_layer_kwargs()
)
activation_layer = TimeDistributed(
activation_dense, name='dense_activation_td')(last_layer)
# we take a page from the image_ocr example and treat the ctc as a
# lambda layer.
loss_layer = Lambda(
lambda args: _ctc_loss(*args),
output_shape=(1,), name='ctc_loss'
)([
label_input,
activation_layer,
feat_size_input,
label_size_input
])
self.model = Model(
inputs=[
feat_input,
label_input,
feat_size_input,
label_size_input,
],
outputs=[loss_layer],
)
def _construct_decoder(self, beam_width):
label_out = _ctc_decode(
self.model.get_layer(name='dense_activation_td').output,
self.model.get_layer(name='feat_size_in').output,
beam_width=beam_width
)
decoder = K.function(
[
self.model.get_layer(name='feat_in').output,
self.model.get_layer(name='feat_size_in').output,
K.learning_phase(),
],
label_out,
)
return decoder
def _dft_ctc_loss(y_true, y_pred, input_length, label_length):
# keras impl assumes softmax then log hasn't been performed yet. In tf.nn,
# it has
assert False, "fixme"
sm_y_pred = K.softmax(y_pred)
cost = K.ctc_batch_cost(y_true, sm_y_pred, input_length, label_length)
return cost
def _dft_ctc_decode(y_pred, input_length, beam_width=100):
assert False, "fixme"
sm_y_pred = K.softmax(y_pred)
return K.ctc_decode(
sm_y_pred, K.flatten(input_length),
beam_width=beam_width, greedy=False, top_paths=1)[0][0]
def _tf_dft_ctc_decode(y_pred, input_length, beam_width=100):
import tensorflow as tf
input_length = tf.reshape(input_length, [-1])
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
if beam_width == 1:
(decoded,), _ = tf.nn.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded,), _ = tf.nn.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=1,
)
decoded_dense = tf.sparse.to_dense(decoded, default_value=-1)
return (decoded_dense,)
def _tf_ctc_dense_to_sparse(y_true, label_lengths):
import tensorflow as tf
# y_true (batch_size, max_seq_length)
# label_lengths (batch_size,)
dense_shape = tf.shape(y_true)
dense_mask = tf.sequence_mask(label_lengths, dense_shape[1])
sparse_values = tf.boolean_mask(y_true, dense_mask)
sparse_indices = tf.where(dense_mask)
return tf.SparseTensor(
sparse_indices, sparse_values, tf.to_int64(dense_shape))
def _tf_dft_ctc_loss(y_true, y_pred, input_length, label_length):
import tensorflow as tf
# replicate the logic in ctc_batch_cost, sans log and pre softmax
label_length = tf.reshape(label_length, [-1])
input_length = tf.reshape(input_length, [-1])
sparse_labels = _tf_ctc_dense_to_sparse(y_true, label_length)
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
loss = tf.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length)
return tf.expand_dims(loss, 1)
def _tf_warp_ctc_loss(y_true, y_pred, input_length, label_length):
import tensorflow as tf
import warpctc_tensorflow
# replicate the logic in ctc_batch_cost, sans log and pre softmax
label_length = tf.reshape(label_length, [-1])
input_length = tf.reshape(input_length, [-1])
sparse_labels = _tf_ctc_dense_to_sparse(y_true, label_length)
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
with K.get_session().graph._kernel_label_map({"CTCLoss": "WarpCTC"}):
loss = tf.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length)
return tf.expand_dims(loss, 1)
def _y_pred_loss(y_true, y_pred):
'''Simply return y_pred as loss'''
return y_pred
if K.backend() == 'tensorflow':
import tensorflow as tf
# this'll help with tensorflow non-determinism if everything runs
# swimmingly, but we really can't control it nicely
tf.set_random_seed(74023893)
try:
import warpctc_tensorflow
_ctc_loss = _tf_warp_ctc_loss
except ImportError:
print('Warp-ctc not installled. Using built-in ctc', file=sys.stderr)
_ctc_loss = _tf_dft_ctc_loss
_ctc_decode = _tf_dft_ctc_decode
else:
_ctc_loss = _dft_ctc_loss
_ctc_decode = _dft_ctc_decode
| 23,829 | 37.874388 | 83 | py |
more-or-let | more-or-let-master/pydrobert/mol/ctc_2.py | '''Tensor ops and models related to Connectionist Temporal Classification
All CTC classes accept input via generators in one of the following forms:
1. Numpy arrays of shape `(time, bank_size)` representing individual audio
samples. This is used for decoding
2. Tuples of `(audio_sample, label_seq)`, where `label_seq` is an
array-like of shape `(num_labels,)` whose values are the label sequence.
This is used for training
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import listdir
from os import makedirs
from os.path import isdir
from os.path import join
from sys import stderr
import keras.backend as K
import numpy as np
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ProgbarLogger
from keras.callbacks import TensorBoard
from keras.layers import Conv2D
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.layers.merge import Maximum
from keras.layers.wrappers import TimeDistributed
from keras.models import Model
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.regularizers import l2
from pydrobert.signal.post import CMVN
from pydrobert.signal.post import Deltas
__author__ = "Sean Robertson"
__email__ = "sdrobert@cs.toronto.edu"
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2017 Sean Robertson"
class ConvCTC(object):
'''Fully convolutional CTC
Architecture is based off [1]_ entirely.
Input are expected to be of the form described in module docstring
Parameters
----------
input_shape : tuple
Tuple of `(max_time_steps, bank_size)`. `bank_size` needs to be
set, but not `max_time_steps`.
num_labels : int
Number of unique labels, including the blank
num_deltas : int, optional
The number of deltas to calculate on the audio. Deltas will be
appended on the second axis
weight_dir : str, optional
Path to where weights are/will be stored. Default is to store no
weights
fine_tuning : bool, optional
Whether the model has entered the 'fine-tuning' stage of
training
tensorboard_dir : str, optional
If set to a valid path and the Keras backend is Tensorflow,
writes Tensorboard logs to that path
cmvn_path : str, optional
If set, CMVN stats will be drawn from this file and applied to
every utterance
concatenate : bool
Whether deltas are concatenated along the frequency axis
(`True`), or given their own channels (`False`)
double_weights : bool
If enabled, feature maps and hidden units are doubled to combat
the ambiguity in the weight counts of [1]_
Attributes
----------
input_shape : tuple
num_labels : int
weight_dir : str or None
fine_tuning : boolstopping
tensorboard_dir : str or None
.. [1] Zhang, Y et al. "Towards End-to-End Speech Recognition with Deep
Convolutional Neural Networks" https://arxiv.org/abs/1701.02720
'''
def __init__(
self, input_shape, num_labels, num_deltas=0,
weight_dir=None, fine_tuning=False, tensorboard_dir=None,
cmvn_path=None, concatenate=True, double_weights=False):
if not input_shape[1]:
raise ValueError('bank size must be fixed')
if K.image_dim_ordering() != 'tf':
# not sure if I'm right, but I think the TimeDistributed
# wrapper will always take axis 1, which could be the
# channel axis in Theano
raise ValueError('dimensions must be tensorflow-ordered')
if weight_dir is not None and not isdir(weight_dir):
makedirs(weight_dir)
if tensorboard_dir is not None:
if K.backend() != 'tensorflow':
print(
'Ignoring tensorboard_dir setting. Backend is not '
'tensorflow',
file=stderr
)
tensorboard_dir = None
elif not isdir(tensorboard_dir):
makedirs(tensorboard_dir)
self._tensorboard_dir = tensorboard_dir
self._weight_dir = weight_dir
self._num_labels = num_labels
self._input_shape = input_shape
self._fine_tuning = fine_tuning
if num_deltas:
self._deltas = Deltas(num_deltas, concatenate=concatenate)
else:
self._deltas = None
self._audio_input = None
self._audio_size_input = None
self._label_input = None
self._label_size_input = None
self._activation_layer = None
self._acoustic_model = None
self._double_weights = double_weights
if cmvn_path:
self._cmvn = CMVN(cmvn_path, dtype='bm')
else:
self._cmvn = CMVN()
# constants or initial settings based on paper
self._filt_size = (5, 3) # time first, unlike paper
self._pool_size = (1, 3)
self._dropout_p = 0.3
# I asked the first author about this. To keep the number of
# parameters constant for maxout, she halved the values she
# reported in the paper
self._initial_filts = 128 // (1 if double_weights else 2)
self._dense_size = 1024 // (1 if double_weights else 2)
self._layer_kwargs = {
'activation': 'linear',
'kernel_initializer' : 'uniform',
}
if self._fine_tuning:
self._layer_kwargs['kernel_regularizer'] = l2(l=1e-5)
self._construct_acoustic_model()
self._past_epochs = 0
self._acoustic_model.summary()
super(ConvCTC, self).__init__()
@property
def input_shape(self):
return self._input_shape
@property
def num_labels(self):
return self._num_labels
@property
def weight_dir(self):
return self._weight_dir
@property
def fine_tuning(self):
return self._fine_tuning
@property
def tensorboard_dir(self):
return self._tensorboard_dir
def fit_generator(
self, train_generator, train_samples_per_epoch,
val_generator=None, val_samples_per_epoch=None,
batch_size=20, max_epochs=1000000, early_stopping=None):
'''Fit the acoustic model to data from generators
Parameters
----------
train_generator : generator
An infinitely cycling generator of audio input and labels to train
train_samples_per_epoch : int
How many unique samples are generated
val_generator : generator, optional
Validation set generator. If not set, training loss is used to
keep track of best/early stopping
val_samples_per_epoch : int, optional
Must be set if `val_generator` is set
batch_size : int, optional
Size of minibatches. Decrease if running out of memory
max_epochs : int, optional
The maximum number of epochs to run for. Can be fewer if early
stopping is enabled. The default is a million epochs (practically
infinite)
early_stopping : int or None
If `None`, early stopping is disabled. Otherwise this value is used
as the `patience` parameter for early stopping
'''
val_monitored = None
if val_generator or val_samples_per_epoch:
if None in (val_generator, val_samples_per_epoch):
raise ValueError(
'Either both val_generator and val_samples_per_epoch '
'must be set, or neither'
)
val_monitored = 'val_loss'
val_generator = _training_wrapper(
val_generator,
batch_size,
val_samples_per_epoch,
self._num_labels,
self._deltas,
self._cmvn,
)
else:
print(
'Monitoring training loss instead of validation loss. Not '
'recommended.', file=stderr)
val_monitored = 'loss'
train_generator = _training_wrapper(
train_generator,
batch_size,
train_samples_per_epoch,
self._num_labels,
self._deltas,
self._cmvn,
)
callbacks = []
optimizer = None
if self._fine_tuning:
optimizer = SGD(lr=1e-5, clipvalue=1.0)
else:
optimizer = Adam(lr=1e-4, clipvalue=1.0)
if self._weight_dir:
file_regex = join(
self._weight_dir,
'weights.{epoch:03d}.{' + val_monitored + ':07.2f}.hdf5',
)
callbacks.append(ModelCheckpoint(
filepath=file_regex, monitor=val_monitored,
save_weights_only=True, save_best_only=False,
period=1,
))
if self._tensorboard_dir:
callbacks.append(TensorBoard(
log_dir=self._tensorboard_dir, write_graph=False,
batch_size=batch_size))
if early_stopping is not None:
callbacks.append(EarlyStopping(
monitor=val_monitored,
patience=early_stopping,
mode='min',
min_delta=.1,
))
self._acoustic_model.compile(
loss={'ctc_loss' : lambda y_true, y_pred: y_pred},
optimizer=optimizer,
)
self._load_weights()
self._acoustic_model.fit_generator(
train_generator,
(train_samples_per_epoch + batch_size - 1) // batch_size,
max_epochs,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=(val_samples_per_epoch + batch_size - 1) // batch_size,
initial_epoch=self._past_epochs,
)
def decode_once(self, audio, beam_width=100):
'''Decode a sample of audio using beam search
Parameters
----------
audio : array-like
2D array of shape `(time, freq)`
beam_width : int
Returns
-------
array-like
Either a 1D array of `(num_labels,)`
'''
self._load_weights(for_decoding=True)
decoder = self._construct_decoder(beam_width)
if self._cmvn:
audio = self._cmvn.apply(audio, axis=1, in_place=True)
if self._deltas:
audio = self._deltas.apply(audio, axis=0, in_place=True)
length_batch = np.asarray([[audio.shape[0]]], dtype=np.int32)
ret_labels = decoder(
[audio[np.newaxis, :, :, np.newaxis], length_batch, 0]
)[0][0]
return tuple(int(idee) for idee in ret_labels if idee != -1)
def decode_generator(self, generator, num_samples, beam_width=100):
'''Decode audio samples from a generator using beam search
Parameters
----------
generator : generator
Generators can follow the same style as in `fit_generator` (the
label tensors are discarded), or can contain the audio
samples/batches by themselves
num_samples : int, optional
Stop after this number of samples. The generator is expected to
produce infinitely, so this will be after num_samples
beam_width : int
Yields
------
tuple or None
Sequences of labels, one sample at a time
'''
self._load_weights(for_decoding=True)
decoder = self._construct_decoder(beam_width)
wrapper = _decoding_wrapper(generator, self._deltas, self._cmvn)
for _ in range(num_samples):
cur_batch = next(wrapper)
ret_labels = decoder(cur_batch + [0])[0][0]
yield tuple(int(idee) for idee in ret_labels if idee != -1)
def _construct_acoustic_model(self):
# construct acoustic model
# convolutional layer pattern
def _conv_maxout_layer(last_layer, n_filts, name_prefix, dropout=True):
conv_a = Conv2D(
n_filts, self._filt_size,
padding='same',
name=name_prefix + '_a', **self._layer_kwargs
)(last_layer)
conv_b = Conv2D(
n_filts, self._filt_size,
padding='same',
name=name_prefix + '_b', **self._layer_kwargs
)(last_layer)
last = Maximum(name=name_prefix + '_m')([conv_a, conv_b])
# pre-weights (i.e. post max), as per
# http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf
if dropout:
last = Dropout(self._dropout_p, name=name_prefix + '_d')(last)
return last
n_filts = self._initial_filts
# inputs
audio_input_shape = [self._input_shape[0], self._input_shape[1], 1]
if self._deltas:
if self._deltas.concatenate:
audio_input_shape[1] *= self._deltas.num_deltas + 1
else:
audio_input_shape[2] *= self._deltas.num_deltas + 1
self._audio_input = Input(
shape=audio_input_shape, name='audio_in')
self._audio_size_input = Input(shape=(1,), name='audio_size_in')
self._label_input = Input(shape=(None,), name='label_in')
self._label_size_input = Input(shape=(1,), name='label_size_in')
last_layer = self._audio_input
# convolutional layers
last_layer = _conv_maxout_layer(
last_layer, n_filts, 'conv_1', dropout=False)
last_layer = MaxPooling2D(
pool_size=self._pool_size, name='conv_1_p')(last_layer)
last_layer = Dropout(self._dropout_p, name='conv_1_d')(last_layer)
for layer_no in range(2, 11):
if layer_no == 5:
n_filts *= 2
last_layer = _conv_maxout_layer(
last_layer, n_filts, 'conv_{}'.format(layer_no))
last_layer = Lambda(
lambda layer: K.max(layer, axis=2),
output_shape=(
self._input_shape[0],
n_filts,
),
name='max_freq_into_channel',
)(last_layer)
# dense layers
for layer_no in range(1, 4):
name_prefix = 'dense_{}'.format(layer_no)
dense_a = Dense(
self._dense_size, name=name_prefix + '_a',
**self._layer_kwargs
)
dense_b = Dense(
self._dense_size, name=name_prefix + '_b',
**self._layer_kwargs
)
td_a = TimeDistributed(
dense_a, name=name_prefix + '_td_a')(last_layer)
td_b = TimeDistributed(
dense_b, name=name_prefix + '_td_b')(last_layer)
last_layer = Maximum(name=name_prefix + '_m')([td_a, td_b])
last_layer = Dropout(
self._dropout_p, name=name_prefix + '_d'
)(last_layer)
activation_dense = Dense(
self._num_labels, name='dense_activation',
**self._layer_kwargs
)
self._activation_layer = TimeDistributed(
activation_dense, name='dense_activation_td')(last_layer)
# we take a page from the image_ocr example and treat the ctc as a
# lambda layer.
self._loss_layer = Lambda(
lambda args: _ctc_loss(*args),
output_shape=(1,), name='ctc_loss'
)([
self._label_input,
self._activation_layer,
self._audio_size_input,
self._label_size_input
])
self._acoustic_model = Model(
inputs=[
self._audio_input,
self._label_input,
self._audio_size_input,
self._label_size_input,
],
outputs=[self._loss_layer],
)
def _construct_decoder(self, beam_width):
label_out = _ctc_decode(
self._activation_layer, self._audio_size_input,
beam_width=beam_width
)
decoder = K.function(
[self._audio_input, self._audio_size_input, K.learning_phase()],
label_out,
)
return decoder
def _load_weights(self, for_decoding=False):
to_load = None
self._past_epochs = 0
if self._weight_dir:
# weight filename format is `weights.epoch.val_loss.hdf5`
if for_decoding:
# load the weights with the lowest validation loss
min_loss = float('inf')
for name in listdir(self._weight_dir):
cur_loss = float(str.join('.', name.split('.')[2:-1]))
cur_epoch = int(name.split('.')[1]) + 1
if cur_loss < min_loss:
to_load = name
min_loss = cur_loss
self._past_epochs = max(cur_epoch, self._past_epochs)
else:
# load the last set of stored weights
for name in listdir(self._weight_dir):
cur_epoch = int(name.split('.')[1]) + 1
if cur_epoch > self._past_epochs:
self._past_epochs = cur_epoch
to_load = name
if to_load:
self._acoustic_model.load_weights(join(self._weight_dir, to_load))
else:
print('No weights to load!', file=stderr)
def _decoding_wrapper(wrapped, deltas, cmvn):
# samples are tuples of (audio, audio_len). Return batches of size 1
while True:
elem = next(wrapped)
audio = None
if isinstance(elem, np.ndarray):
audio = elem
else:
audio = elem[0] # assume tuple w/ index 1 the label
if cmvn:
audio = cmvn.apply(audio, axis=1, in_place=True)
if deltas:
audio = deltas.apply(audio, axis=0, in_place=True)
length_batch = np.asarray([[audio.shape[0]]], dtype=np.int32)
inputs = [audio[np.newaxis, :, :, np.newaxis], length_batch]
yield inputs
del audio, length_batch
def _training_wrapper(
wrapped, max_batch_size, epoch_size, blank_label, deltas, cmvn):
# samples are tuples of (audio, labels, audio_len, label_len)
elems = [next(wrapped)]
bank_size = elems[0][0].shape[1]
remainder = epoch_size
while True:
batch_size = min(remainder, max_batch_size)
dummy_y_true = np.zeros((batch_size,), dtype=K.floatx())
while len(elems) < batch_size:
elems.append(next(wrapped))
max_time_steps = max(elem[0].shape[0] for elem in elems)
max_label_length = max(len(elem[1]) for elem in elems)
batch_shape = [batch_size, max_time_steps, bank_size, 1]
if deltas:
if deltas.concatenate:
batch_shape[2] *= deltas.num_deltas + 1
else:
batch_shape[3] *= deltas.num_deltas + 1
audios = np.empty(batch_shape, dtype=K.floatx())
# should be ints, but I think they're cast by keras to floats.
# Might as well not double dip
label_seqs = np.ones((batch_size, max_label_length), dtype=K.floatx())
label_seqs *= blank_label
audio_lengths = np.empty((batch_size, 1), dtype=K.floatx())
label_lengths = np.empty((batch_size, 1), dtype=K.floatx())
for samp_idx, (audio, label_seq) in enumerate(elems):
audio = np.pad(
audio, ((0, max_time_steps - audio.shape[0]), (0, 0)), 'edge')
if cmvn:
audio = cmvn.apply(audio, axis=1, in_place=True)
if deltas:
audio = deltas.apply(audio, axis=0, in_place=True)
audios[samp_idx, :, :, :] = audio.reshape(batch_shape[1:])
audio_lengths[samp_idx, 0] = audio.shape[0]
label_seqs[samp_idx, :len(label_seq)] = label_seq
label_lengths[samp_idx, 0] = len(label_seq)
inputs = {
'audio_in' : audios,
'audio_size_in' : audio_lengths,
'label_in' : label_seqs,
'label_size_in' : label_lengths,
}
outputs = {
'ctc_loss' : dummy_y_true,
}
elems = []
remainder -= batch_size
if not remainder:
remainder = epoch_size
yield inputs, outputs
del audios, label_seqs, audio_lengths, label_lengths, dummy_y_true
def _dft_ctc_loss(y_true, y_pred, input_length, label_length):
# keras impl assumes softmax then log hasn't been performed yet. In tf.nn,
# it has
assert False, "fixme"
sm_y_pred = K.softmax(y_pred)
cost = K.ctc_batch_cost(y_true, sm_y_pred, input_length, label_length)
return cost
def _dft_ctc_decode(y_pred, input_length, beam_width=100):
assert False, "fixme"
sm_y_pred = K.softmax(y_pred)
return K.ctc_decode(
sm_y_pred, K.flatten(input_length),
beam_width=beam_width, greedy=False, top_paths=1)[0][0]
def _tf_dft_ctc_decode(y_pred, input_length, beam_width=100):
import tensorflow as tf
input_length = tf.to_int32(tf.reshape(input_length, [-1]))
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
(decoded,), _ = tf.nn.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=1,
)
decoded_dense = tf.sparse_to_dense(
decoded.indices, decoded.dense_shape, decoded.values, default_value=-1)
return (decoded_dense,)
def _tf_ctc_dense_to_sparse(y_true, label_lengths):
import tensorflow as tf
# y_true (batch_size, max_seq_length)
# label_lengths (batch_size,)
dense_shape = tf.shape(y_true)
dense_mask = tf.sequence_mask(label_lengths, dense_shape[1])
sparse_values = tf.boolean_mask(tf.to_int32(y_true), dense_mask)
sparse_indices = tf.where(dense_mask)
return tf.SparseTensor(
sparse_indices, sparse_values, tf.to_int64(dense_shape))
def _tf_dft_ctc_loss(y_true, y_pred, input_length, label_length):
import tensorflow as tf
# replicate the logic in ctc_batch_cost, sans log and pre softmax
label_length = tf.to_int32(tf.reshape(label_length, [-1]))
input_length = tf.to_int32(tf.reshape(input_length, [-1]))
sparse_labels = _tf_ctc_dense_to_sparse(y_true, label_length)
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
loss = tf.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length)
return tf.expand_dims(loss, 1)
def _tf_warp_ctc_loss(y_true, y_pred, input_length, label_length):
import tensorflow as tf
import warpctc_tensorflow
# replicate the logic in ctc_batch_cost, sans log and pre softmax
label_length = tf.to_int32(tf.reshape(label_length, [-1]))
input_length = tf.to_int32(tf.reshape(input_length, [-1]))
sparse_labels = _tf_ctc_dense_to_sparse(y_true, label_length)
y_pred = tf.transpose(y_pred, perm=[1, 0, 2])
with K.get_session().graph._kernel_label_map({"CTCLoss": "WarpCTC"}):
loss = tf.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length)
return tf.expand_dims(loss, 1)
if K.backend() == 'tensorflow':
try:
import warpctc_tensorflow
_ctc_loss = _tf_warp_ctc_loss
except ImportError:
print('Warp-ctc not installled. Using built-in ctc', file=stderr)
_ctc_loss = _tf_dft_ctc_loss
_ctc_decode = _tf_dft_ctc_decode
else:
_ctc_loss = _dft_ctc_loss
_ctc_decode = _dft_ctc_decode
| 23,792 | 37.687805 | 84 | py |
flores | flores-main/shared_tasks/dynalab/handler.py | # Copyright (c) Facebook, Inc. and its affiliates.
import json
import logging
import time
import os
from pathlib import Path
import fairseq.checkpoint_utils
import sentencepiece
import torch
from typing import NamedTuple
from dynalab.handler.base_handler import BaseDynaHandler
from dynalab.tasks.flores_small1 import TaskIO
from fairseq.sequence_generator import SequenceGenerator
from fairseq.tasks.translation import TranslationConfig, TranslationTask
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Tell Torchserve to let use do the deserialization
os.environ["TS_DECODE_INPUT_REQUEST"] = "false"
def mapping(languages: str) -> dict:
return dict(
tuple(pair.split(":"))
for pair in languages.strip().replace("\n", "").split(",")
)
ISO2M100 = mapping(
"""
afr:af,amh:am,ara:ar,asm:as,ast:ast,azj:az,bel:be,ben:bn,bos:bs,bul:bg,
cat:ca,ceb:ceb,ces:cs,ckb:ku,cym:cy,dan:da,deu:de,ell:el,eng:en,est:et,
fas:fa,fin:fi,fra:fr,ful:ff,gle:ga,glg:gl,guj:gu,hau:ha,heb:he,hin:hi,
hrv:hr,hun:hu,hye:hy,ibo:ig,ind:id,isl:is,ita:it,jav:jv,jpn:ja,kam:kam,
kan:kn,kat:ka,kaz:kk,kea:kea,khm:km,kir:ky,kor:ko,lao:lo,lav:lv,lin:ln,
lit:lt,ltz:lb,lug:lg,luo:luo,mal:ml,mar:mr,mkd:mk,mlt:mt,mon:mn,mri:mi,
msa:ms,mya:my,nld:nl,nob:no,npi:ne,nso:ns,nya:ny,oci:oc,orm:om,ory:or,
pan:pa,pol:pl,por:pt,pus:ps,ron:ro,rus:ru,slk:sk,slv:sl,sna:sn,snd:sd,
som:so,spa:es,srp:sr,swe:sv,swh:sw,tam:ta,tel:te,tgk:tg,tgl:tl,tha:th,
tur:tr,ukr:uk,umb:umb,urd:ur,uzb:uz,vie:vi,wol:wo,xho:xh,yor:yo,zho_simp:zh,
zho_trad:zh,zul:zu
"""
)
class FakeGenerator:
"""Fake sequence generator, that returns the input."""
def generate(self, models, sample, prefix_tokens=None):
src_tokens = sample["net_input"]["src_tokens"]
return [[{"tokens": tokens[:-1]}] for tokens in src_tokens]
class Handler(BaseDynaHandler):
"""Use Fairseq model for translation.
To use this handler, download one of the Flores pretrained model:
615M parameters:
https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz
175M parameters:
https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz
and extract the files next to this one.
Notably there should be a "dict.txt" and a "sentencepiece.bpe.model".
"""
def initialize(self, context):
"""
load model and extra files.
"""
logger.info(
f"Will initialize with system_properties: {context.system_properties}"
)
model_pt_path, model_file_dir, device = self._handler_initialize(context)
config = json.loads(
(Path(model_file_dir) / "model_generation.json").read_text()
)
self.device = device
translation_cfg = TranslationConfig()
self.vocab = TranslationTask.load_dictionary("dict.txt")
self.spm = sentencepiece.SentencePieceProcessor()
self.spm.Load("sentencepiece.bpe.model")
logger.info("Loaded sentencepiece.bpe.model")
if config.get("dummy", False):
self.sequence_generator = FakeGenerator()
logger.warning("Will use a FakeGenerator model, only testing BPE")
else:
task = TranslationTask(translation_cfg, self.vocab, self.vocab)
[model], cfg = fairseq.checkpoint_utils.load_model_ensemble(
[model_pt_path], task=task
)
model.eval().to(self.device)
logger.info(f"Loaded model from {model_pt_path} to device {self.device}")
logger.info(
f"Will use the following config: {json.dumps(config, indent=4)}"
)
self.sequence_generator = SequenceGenerator(
[model],
tgt_dict=self.vocab,
beam_size=config.get("beam_size", 1),
max_len_a=config.get("max_len_a", 1.3),
max_len_b=config.get("max_len_b", 5),
min_len=config.get("min_len", 5),
)
self.taskIO = TaskIO()
self.initialized = True
def lang_token(self, lang: str) -> int:
"""Converts the ISO 639-3 language code to MM100 language codes."""
simple_lang = ISO2M100[lang]
token = self.vocab.index(f"__{simple_lang}__")
assert token != self.vocab.unk(), f"Unknown language '{lang}' ({simple_lang})"
return token
def tokenize(self, line: str) -> list:
words = self.spm.EncodeAsPieces(line.strip())
tokens = [self.vocab.index(word) for word in words]
return tokens
def preprocess_one(self, sample) -> dict:
"""
preprocess data into a format that the model can do inference on
"""
# TODO: this doesn't seem to produce good results. wrong EOS / BOS ?
tokens = self.tokenize(sample["sourceText"])
src_token = self.lang_token(sample["sourceLanguage"])
tgt_token = self.lang_token(sample["targetLanguage"])
return {
"src_tokens": [src_token] + tokens + [self.vocab.eos()],
"src_length": len(tokens) + 1,
"tgt_token": tgt_token,
}
return sample
def preprocess(self, samples) -> dict:
samples = [self.preprocess_one(s) for s in samples]
prefix_tokens = torch.tensor([[s["tgt_token"]] for s in samples])
src_lengths = torch.tensor([s["src_length"] for s in samples])
src_tokens = data_utils.collate_tokens(
[torch.tensor(s["src_tokens"]) for s in samples],
self.vocab.pad(),
self.vocab.eos(),
)
return {
"nsentences": len(samples),
"ntokens": src_lengths.sum().item(),
"net_input": {
"src_tokens": src_tokens.to(self.device),
"src_lengths": src_lengths.to(self.device),
},
"prefix_tokens": prefix_tokens.to(self.device),
}
def strip_pad(self, sentence):
assert sentence.ndim == 1
return sentence[sentence.ne(self.vocab.pad())]
@torch.no_grad()
def inference(self, input_data: dict) -> list:
generated = self.sequence_generator.generate(
models=[],
sample=input_data,
prefix_tokens=input_data["prefix_tokens"],
)
# `generate` returns a list of samples
# with several hypothesis per sample
# and a dict per hypothesis.
# We also need to strip the language token.
return [hypos[0]["tokens"][1:] for hypos in generated]
def postprocess(self, inference_output, samples: list) -> list:
"""
post process inference output into a response.
response should be a list of json
the response format will need to pass the validation in
```
dynalab.tasks.flores_small1.TaskIO().verify_response(response)
```
"""
translations = [
self.vocab.string(self.strip_pad(sentence), "sentencepiece")
for sentence in inference_output
]
return [
# Signing required by dynabench, don't remove.
self.taskIO.sign_response(
{"id": sample["uid"], "translatedText": translation},
sample,
)
for translation, sample in zip(translations, samples)
]
_service = Handler()
def deserialize(torchserve_data: list) -> list:
samples = []
for torchserve_sample in torchserve_data:
data = torchserve_sample["body"]
# In case torchserve did the deserialization for us.
if isinstance(data, dict):
samples.append(data)
elif isinstance(data, (bytes, bytearray)):
lines = data.decode("utf-8").splitlines()
for i, l in enumerate(lines):
try:
samples.append(json.loads(l))
except Exception as e:
logging.error(f"Couldn't deserialize line {i}: {l}")
logging.exception(e)
else:
logging.error(f"Unexpected payload: {data}")
return samples
def handle_mini_batch(service, samples):
n = len(samples)
start_time = time.time()
input_data = service.preprocess(samples)
logger.info(
f"Preprocessed a batch of size {n} ({n/(time.time()-start_time):.2f} samples / s)"
)
start_time = time.time()
output = service.inference(input_data)
logger.info(
f"Infered a batch of size {n} ({n/(time.time()-start_time):.2f} samples / s)"
)
start_time = time.time()
json_results = service.postprocess(output, samples)
logger.info(
f"Postprocessed a batch of size {n} ({n/(time.time()-start_time):.2f} samples / s)"
)
return json_results
def handle(torchserve_data, context):
if not _service.initialized:
_service.initialize(context)
if torchserve_data is None:
return None
start_time = time.time()
all_samples = deserialize(torchserve_data)
n = len(all_samples)
logger.info(
f"Deserialized a batch of size {n} ({n/(time.time()-start_time):.2f} samples / s)"
)
# Adapt this to your model. The GPU has 16Gb of RAM.
batch_size = 128
results = []
samples = []
for i, sample in enumerate(all_samples):
samples.append(sample)
if len(samples) < batch_size and i + 1 < n:
continue
results.extend(handle_mini_batch(_service, samples))
samples = []
assert len(results)
start_time = time.time()
response = "\n".join(json.dumps(r, indent=None, ensure_ascii=False) for r in results)
logger.info(
f"Serialized a batch of size {n} ({n/(time.time()-start_time):.2f} samples / s)"
)
return [response]
def local_test():
from dynalab.tasks import flores_small1
bin_data = b"\n".join(json.dumps(d).encode("utf-8") for d in flores_small1.data)
torchserve_data = [{"body": bin_data}]
manifest = {"model": {"serializedFile": "model.pt"}}
system_properties = {"model_dir": ".", "gpu_id": None}
class Context(NamedTuple):
system_properties: dict
manifest: dict
ctx = Context(system_properties, manifest)
batch_responses = handle(torchserve_data, ctx)
print(batch_responses)
single_responses = [
handle([{"body": json.dumps(d).encode("utf-8")}], ctx)[0]
for d in flores_small1.data
]
assert batch_responses == ["\n".join(single_responses)]
if __name__ == "__main__":
local_test()
| 10,595 | 33.514658 | 94 | py |
flores | flores-main/previous_releases/floresv1/scripts/translate.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import torch
from subprocess import check_call, check_output
from glob import glob
from tempfile import NamedTemporaryFile as TempFile
import time
import subprocess
import multiprocessing as mp
from utils import check_last_line, count_line
import tqdm
def translate_files_slurm(args, cmds, expected_output_files):
conda_env = '/private/home/pipibjc/.conda/envs/fairseq-20190509'
for cmd in cmds:
with TempFile('w') as script:
sh = f"""#!/bin/bash
source activate {conda_env}
{cmd}
"""
print(sh)
script.write(sh)
script.flush()
cmd = f"sbatch --gres=gpu:1 -c {args.cpu + 2} {args.sbatch_args} --time=15:0:0 {script.name}"
import sys
print(cmd, file=sys.stderr)
check_call(cmd, shell=True)
# wait for all outputs has finished
num_finished = 0
while num_finished < len(expected_output_files):
num_finished = 0
for output_file in expected_output_files:
num_finished += 1 if check_finished(output_file) else 0
if num_finished < len(expected_output_files):
time.sleep(3 * 60)
print("sleeping for 3m ...")
def check_finished(output_file):
return check_last_line(output_file, "finished")
def get_output_file(dest_dir, file):
return f"{dest_dir}/{os.path.basename(file)}.log"
def translate(arg_list):
(q, cmd) = arg_list
i = q.get()
os.environ['CUDA_VISIBLE_DEVICES']=str(i)
cmd = f"CUDA_VISIBLE_DEVICES={i} {cmd}"
print(f"executing:\n{cmd}")
check_call(cmd, shell=True)
q.put(i)
def translate_files_local(args, cmds):
m = mp.Manager()
gpu_queue = m.Queue()
for i in args.cuda_visible_device_ids:
gpu_queue.put(i)
with mp.Pool(processes=len(args.cuda_visible_device_ids)) as pool:
for _ in tqdm.tqdm(pool.imap_unordered(translate, [(gpu_queue, cmd) for cmd in cmds]), total=len(cmds)):
pass
def translate_files(args, dest_dir, input_files):
cmd_template = f"""fairseq-interactive \
{args.databin} \
--source-lang {args.source_lang} --target-lang {args.target_lang} \
--path {args.model} \
--lenpen {args.lenpen} \
--max-len-a {args.max_len_a} \
--max-len-b {args.max_len_b} \
--buffer-size {args.buffer_size} \
--max-tokens {args.max_tokens} \
--num-workers {args.cpu} > {{output_file}} && \
echo "finished" >> {{output_file}}
"""
cmds = []
expected_output_files = []
for input_file in input_files:
output_file = get_output_file(dest_dir, input_file)
cmds.append(f"cat {input_file} | " + cmd_template.format(output_file=output_file))
expected_output_files.append(output_file)
if args.backend == 'local':
translate_files_local(args, cmds)
elif args.backend == 'slurm':
translate_files_slurm(args, cmds, expected_output_files)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', required=True, help='Path to file to translate')
parser.add_argument('--model', '-m', required=True, help='Model checkpoint')
parser.add_argument('--lenpen', default=1.2, type=float, help='Length penalty')
parser.add_argument('--beam', default=5, type=int, help='Beam size')
parser.add_argument('--max-len-a', type=float, default=0, help='max-len-a parameter when back-translating')
parser.add_argument('--max-len-b', type=int, default=200, help='max-len-b parameter when back-translating')
parser.add_argument('--cpu', type=int, default=4, help='Number of CPU for interactive.py')
parser.add_argument('--cuda-visible-device-ids', '-gids', default=None, nargs='*', help='List of cuda visible devices ids, camma separated')
parser.add_argument('--dest', help='Output path for the intermediate and translated file')
parser.add_argument('--max-tokens', type=int, default=12000, help='max tokens')
parser.add_argument('--buffer-size', type=int, default=10000, help='Buffer size')
parser.add_argument('--chunks', type=int, default=100)
parser.add_argument('--source-lang', type=str, default=None, help='Source langauge. Will inference from the model if not set')
parser.add_argument('--target-lang', type=str, default=None, help='Target langauge. Will inference from the model if not set')
parser.add_argument('--databin', type=str, default=None, help='Parallel databin. Will combine with the back-translated databin')
parser.add_argument('--sbatch-args', default='', help='Extra SBATCH arguments')
parser.add_argument('--backend', type=str, default='local', choices=['local', 'slurm'])
args = parser.parse_args()
args.cuda_visible_device_ids = args.cuda_visible_device_ids or list(range(torch.cuda.device_count()))
chkpnt = torch.load(args.model)
model_args = chkpnt['args']
if args.source_lang is None or args.target_lang is None:
args.source_lang = args.source_lang or model_args.source_lang
args.target_lang = args.target_lang or model_args.target_lang
if args.databin is None:
args.databin = args.databin or model_args.data
root_dir = os.path.dirname(os.path.realpath(__file__))
translation_dir = os.path.join(args.dest or root_dir, 'translations', f'{args.source_lang}-{args.target_lang}')
tempdir = os.path.join(translation_dir, 'splits')
os.makedirs(tempdir, exist_ok=True)
split_files = glob(f'{tempdir}/mono_data*')
if len(split_files) != args.chunks:
if len(split_files) != 0:
print("number of split files are not the same as chunks. removing files and re-split")
[os.remove(os.path.join(tempdir, f)) for f in os.listdir(tempdir)]
print("splitting files ...")
check_call(f'split -n "r/{args.chunks}" -a3 -d {args.data} {tempdir}/mono_data', shell=True)
split_files = glob(f'{tempdir}/mono_data*')
else:
print("has the same number of splitted file and the specified chunks, skip splitting file")
translated_files = []
files_to_translate = []
for file in split_files:
# skip the translation job if it's finished
output_file = get_output_file(translation_dir, file)
translated_files.append(output_file)
if check_finished(output_file):
print(f"{output_file} is translated")
continue
files_to_translate.append(file)
print(f"{len(files_to_translate)} files to translate")
translate_files(args, translation_dir, files_to_translate)
# aggregate translated files
generated_src = f'{args.dest}/generated.src'
generated_tgt = f'{args.dest}/generated.hypo'
if count_line(generated_src) != count_line(generated_tgt) or count_line(generated_src) <= 0:
print(f"aggregating translated {len(translated_files)} files")
with TempFile() as fout:
files = " ".join(translated_files)
check_call(f"cat {files}", shell=True, stdout=fout)
# strip head and make pairs
check_call(f'cat {fout.name} | grep "^S" | cut -f2 > {generated_src}', shell=True)
check_call(f'cat {fout.name} | grep "^H" | cut -f3 > {generated_tgt}', shell=True)
assert count_line(generated_src) == count_line(generated_tgt)
print(f"output generated files to {generated_src}, {generated_tgt}")
if __name__ == '__main__':
main()
| 7,695 | 41.054645 | 144 | py |
flores | flores-main/previous_releases/floresv1/scripts/train.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import torch
from subprocess import check_call, check_output
from glob import glob
from tempfile import TemporaryDirectory, NamedTemporaryFile as TempFile
import math
import json
from utils import check_last_line
def read_config(config_path):
with open(config_path, 'r') as js:
return json.load(js)
def call(cmd, shell=True):
print(cmd)
check_call(cmd, shell=shell)
def train(src, tgt, train_config, savedir, databin):
# expect to have 'hyperparameters', 'src', 'tgt', 'databin' in train_config
os.makedirs(savedir, exist_ok=True)
logpath = os.path.join(savedir, 'train.log')
checkpoint = os.path.join(savedir, 'checkpoint_best.pt')
if check_last_line(logpath, 'done') and os.path.exists(checkpoint):
print(f"Training is finished. Best checkpoint: {checkpoint}")
return
cuda_visible_devices = list(range(torch.cuda.device_count()))
num_visible_gpu = len(cuda_visible_devices)
num_gpu = min(train_config['gpu'], 2**int(math.log2(num_visible_gpu)))
cuda_devices_clause = f"CUDA_VISIBLE_DEVICES={','.join([str(i) for i in cuda_visible_devices[:num_gpu]])}"
update_freq = train_config['gpu'] / num_gpu
call(f"""{cuda_devices_clause} fairseq-train {databin} \
--source-lang {src} --target-lang {tgt} \
--save-dir {savedir} \
--update-freq {update_freq} \
{" ".join(train_config['parameters'])} \
| tee {logpath}
""", shell=True)
def eval_bleu(src, tgt, subset, lenpen, databin, checkpoint, output, max_token=20000):
bleuarg = "--sacrebleu" if tgt == "en" else ""
call(f"""fairseq-generate {databin} \
--source-lang {src} --target-lang {tgt} \
--path {checkpoint} \
--max-tokens {max_token} \
--beam 5 \
--lenpen {lenpen} \
--max-len-a 1.8 \
--max-len-b 10 \
--gen-subset {subset} \
--remove-bpe=sentencepiece \
{bleuarg} > {output}
""")
return check_output(f"tail -n 1 {output}", shell=True).decode('utf-8').strip()
def translate(src, tgt, model, lenpen, dest, data, max_token=12000):
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'translate.py')
check_call(f"""python {script_path} --data {data}\
--source-lang {src} --target-lang {tgt} \
--model {model} \
--beam 5 --lenpen {lenpen} \
--max-len-a 1.8 \
--max-len-b 10 \
--dest {dest} \
--max-token {max_token} \
--chunks 100 \
--backend local
""", shell=True)
# (src, tgt) is the direction of the databin
def build_bt_databin(src, tgt, train_prefix, para_databin, output_folder):
final_output = os.path.join(f'{output_folder}/data-bin')
if os.path.exists(final_output):
print(f"Databin path {final_output} exists")
return final_output
train_databin = os.path.join(output_folder, 'train-data-bin')
os.makedirs(train_databin, exist_ok=True)
call(f"ln -fs {train_prefix}.hypo {output_folder}/bt.{src}")
call(f"ln -fs {train_prefix}.src {output_folder}/bt.{tgt}")
call(f"""fairseq-preprocess \
--source-lang {src} --target-lang {tgt} \
--trainpref {output_folder}/bt \
--destdir {train_databin} \
--joined-dictionary \
--srcdict {para_databin}/dict.{src}.txt \
--workers 40
""")
os.makedirs(final_output, exist_ok=True)
call(f"ln -fs {para_databin}/* {final_output}")
for lang in [src, tgt]:
for suffix in ['idx', 'bin']:
file_suffix = f"{src}-{tgt}.{lang}.{suffix}"
call(f"ln -fs {train_databin}/train.{file_suffix} {output_folder}/data-bin/train1.{file_suffix}")
return final_output
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', required=True, help='pipeline config')
parser.add_argument('--databin', '-d', required=True, help='initial databin')
args = parser.parse_args()
configs = read_config(args.config)
workdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../experiments')
#cuda_visible_devices=args.cuda_visible_devices or list(range(torch.cuda.device_count()))
initial_databin = args.databin
for i in range(len(configs)):
(name, config) = configs[i]
src = config['src']
tgt = config['tgt']
direction = f"{src}-{tgt}"
print(f"Start {name} iteration, {direction}")
iter_workdir = os.path.join(workdir, name, direction)
# train
model_dir = os.path.join(iter_workdir, 'model')
train(src, tgt, config['train'], model_dir, initial_databin)
checkpoint_path = os.path.join(model_dir, 'checkpoint_best.pt')
# eval
lenpen = config['translate']['lenpen']
eval_output = os.path.join(model_dir, 'eval.txt')
if check_last_line(eval_output, "BLEU"):
print(check_output(f"tail -n 1 {eval_output}", shell=True).decode('utf-8').strip())
else:
print(eval_bleu(
config['src'], config['tgt'],
'test', lenpen,
args.databin, checkpoint_path,
os.path.join(model_dir, 'eval.txt')
))
# Early exit to skip back-translation for the last iteration
if i == len(configs) - 1:
break
# translate
translate_output = os.path.join(iter_workdir, 'synthetic')
translate(config['src'], config['tgt'], checkpoint_path, lenpen, translate_output, config['translate']['mono'], config['translate']['max_token'])
# generate databin
databin_folder = os.path.join(translate_output, 'bt')
initial_databin = build_bt_databin(
config['tgt'], config['src'],
os.path.join(translate_output, 'generated'), args.databin, databin_folder
)
main()
| 6,101 | 36.435583 | 153 | py |
code-nerf | code-nerf-main/src/utils.py |
import imageio
import numpy as np
import torch
# import json
# from torchvision import transforms
import os
def get_rays(H, W, focal, c2w):
i, j = torch.meshgrid(torch.linspace(0, W - 1, W), torch.linspace(0, H - 1, H))
i = i.t()
j = j.t()
dirs = torch.stack([(i - W * .5) / focal, -(j - H * .5) / focal, -torch.ones_like(i)], -1)
rays_d = torch.sum(dirs[..., np.newaxis, :].type_as(c2w) * c2w[..., :3, :3], -1)
viewdirs = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
rays_o = c2w[..., :3, -1].expand(rays_d.shape)
rays_o, viewdirs = rays_o.reshape(-1, 3), viewdirs.reshape(-1, 3)
return rays_o, viewdirs
def sample_from_rays(ro, vd, near, far, N_samples, z_fixed = False):
# Given ray centre (camera location), we sample z_vals
# we do not use ray_o here - just number of rays
if z_fixed:
z_vals = torch.linspace(near, far, N_samples).type_as(ro)
else:
dist = (far - near) / (2*N_samples)
z_vals = torch.linspace(near+dist, far-dist, N_samples).type_as(ro)
z_vals += torch.rand(N_samples) * (far - near) / (2*N_samples)
xyz = ro.unsqueeze(-2) + vd.unsqueeze(-2) * z_vals.unsqueeze(-1)
vd = vd.unsqueeze(-2).repeat(1,N_samples,1)
return xyz, vd, z_vals
def volume_rendering(sigmas, rgbs, z_vals, white_bg = True):
deltas = z_vals[1:] - z_vals[:-1]
deltas = torch.cat([deltas, torch.ones_like(deltas[:1]) * 1e10])
alphas = 1 - torch.exp(-sigmas.squeeze(-1) * deltas)
trans = 1 - alphas + 1e-10
transmittance = torch.cat([torch.ones_like(trans[..., :1]), trans], -1)
accum_trans = torch.cumprod(transmittance, -1)[..., :-1]
weights = alphas * accum_trans
rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2)
depth_final = torch.sum(weights * z_vals, -1)
if white_bg:
weights_sum = weights.sum(1)
rgb_final = rgb_final + 1 - weights_sum.unsqueeze(-1)
return rgb_final, depth_final
def image_float_to_uint8(img):
"""
Convert a float image (0.0-1.0) to uint8 (0-255)
"""
#print(img.shape)
vmin = np.min(img)
vmax = np.max(img)
if vmax - vmin < 1e-10:
vmax += 1e-10
img = (img - vmin) / (vmax - vmin)
img *= 255.0
return img.astype(np.uint8)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true'):
return True
elif v.lower() in ('no', 'false'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') | 2,512 | 34.394366 | 94 | py |
code-nerf | code-nerf-main/src/model.py | import torch
import torch.nn as nn
def PE(x, degree):
y = torch.cat([2.**i * x for i in range(degree)], -1)
w = 1
return torch.cat([x] + [torch.sin(y) * w, torch.cos(y) * w], -1)
class CodeNeRF(nn.Module):
def __init__(self, shape_blocks = 2, texture_blocks = 1, W = 256,
num_xyz_freq = 10, num_dir_freq = 4, latent_dim=256):
super().__init__()
self.shape_blocks = shape_blocks
self.texture_blocks = texture_blocks
self.num_xyz_freq = num_xyz_freq
self.num_dir_freq = num_dir_freq
d_xyz, d_viewdir = 3 + 6 * num_xyz_freq, 3 + 6 * num_dir_freq
self.encoding_xyz = nn.Sequential(nn.Linear(d_xyz, W), nn.ReLU())
for j in range(shape_blocks):
layer = nn.Sequential(nn.Linear(latent_dim,W),nn.ReLU())
setattr(self, f"shape_latent_layer_{j+1}", layer)
layer = nn.Sequential(nn.Linear(W,W), nn.ReLU())
setattr(self, f"shape_layer_{j+1}", layer)
self.encoding_shape = nn.Linear(W,W)
self.sigma = nn.Sequential(nn.Linear(W,1), nn.Softplus())
self.encoding_viewdir = nn.Sequential(nn.Linear(W+d_viewdir, W), nn.ReLU())
for j in range(texture_blocks):
layer = nn.Sequential(nn.Linear(latent_dim, W), nn.ReLU())
setattr(self, f"texture_latent_layer_{j+1}", layer)
layer = nn.Sequential(nn.Linear(W,W), nn.ReLU())
setattr(self, f"texture_layer_{j+1}", layer)
self.rgb = nn.Sequential(nn.Linear(W, W//2), nn.ReLU(), nn.Linear(W//2, 3))
def forward(self, xyz, viewdir, shape_latent, texture_latent):
xyz = PE(xyz, self.num_xyz_freq)
viewdir = PE(viewdir, self.num_dir_freq)
y = self.encoding_xyz(xyz)
for j in range(self.shape_blocks):
z = getattr(self, f"shape_latent_layer_{j+1}")(shape_latent)
y = y + z
y = getattr(self, f"shape_layer_{j+1}")(y)
y = self.encoding_shape(y)
sigmas = self.sigma(y)
y = torch.cat([y, viewdir], -1)
y = self.encoding_viewdir(y)
for j in range(self.texture_blocks):
z = getattr(self, f"texture_latent_layer_{j+1}")(texture_latent)
y = y + z
y = getattr(self, f"texture_layer_{j+1}")(y)
rgbs = self.rgb(y)
return sigmas, rgbs | 2,360 | 43.54717 | 83 | py |
code-nerf | code-nerf-main/src/data.py |
import imageio
import numpy as np
import torch
# import json
# from torchvision import transforms
import os
def load_poses(pose_dir, idxs=[]):
txtfiles = np.sort([os.path.join(pose_dir, f.name) for f in os.scandir(pose_dir)])
posefiles = np.array(txtfiles)[idxs]
srn_coords_trans = np.diag(np.array([1, -1, -1, 1])) # SRN dataset
poses = []
for posefile in posefiles:
pose = np.loadtxt(posefile).reshape(4,4)
poses.append(pose@srn_coords_trans)
return torch.from_numpy(np.array(poses)).float()
def load_imgs(img_dir, idxs = []):
allimgfiles = np.sort([os.path.join(img_dir, f.name) for f in os.scandir(img_dir)])
imgfiles = np.array(allimgfiles)[idxs]
imgs = []
for imgfile in imgfiles:
img = imageio.imread(imgfile, pilmode='RGB')
img = img.astype(np.float32)
img /= 255.
imgs.append(img)
return torch.from_numpy(np.array(imgs))
def load_intrinsic(intrinsic_path):
with open(intrinsic_path, 'r') as f:
lines = f.readlines()
focal = float(lines[0].split()[0])
H, W = lines[-1].split()
H, W = int(H), int(W)
return focal, H, W
class SRN():
def __init__(self, cat='srn_cars', splits='cars_train',
data_dir = '../data/ShapeNet_SRN/',
num_instances_per_obj = 1, crop_img = True):
"""
cat: srn_cars / srn_chairs
split: cars_train(/test/val) or chairs_train(/test/val)
First, we choose the id
Then, we sample images (the number of instances matter)
"""
self.data_dir = os.path.join(data_dir, cat, splits)
self.ids = np.sort([f.name for f in os.scandir(self.data_dir)])
self.lenids = len(self.ids)
self.num_instances_per_obj = num_instances_per_obj
self.train = True if splits.split('_')[1] == 'train' else False
self.crop_img = crop_img
def __len__(self):
return self.lenids
def __getitem__(self, idx):
obj_id = self.ids[idx]
if self.train:
focal, H, W, imgs, poses, instances = self.return_train_data(obj_id)
return focal, H, W, imgs, poses, instances, idx
else:
focal, H, W, imgs, poses = self.return_test_val_data(obj_id)
return focal, H, W, imgs, poses, idx
def return_train_data(self, obj_id):
pose_dir = os.path.join(self.data_dir, obj_id, 'pose')
img_dir = os.path.join(self.data_dir, obj_id, 'rgb')
intrinsic_path = os.path.join(self.data_dir, obj_id, 'intrinsics.txt')
instances = np.random.choice(50, self.num_instances_per_obj)
poses = load_poses(pose_dir, instances)
imgs = load_imgs(img_dir, instances)
focal, H, W = load_intrinsic(intrinsic_path)
if self.crop_img:
imgs = imgs[:,32:-32,32:-32,:]
H, W = H // 2, W//2
return focal, H, W, imgs.reshape(self.num_instances_per_obj, -1,3), poses, instances
def return_test_val_data(self, obj_id):
pose_dir = os.path.join(self.data_dir, obj_id, 'pose')
img_dir = os.path.join(self.data_dir, obj_id, 'rgb')
intrinsic_path = os.path.join(self.data_dir, obj_id, 'intrinsics.txt')
instances = np.arange(250)
poses = load_poses(pose_dir, instances)
imgs = load_imgs(img_dir, instances)
focal, H, W = load_intrinsic(intrinsic_path)
return focal, H, W, imgs, poses | 3,451 | 37.786517 | 92 | py |
code-nerf | code-nerf-main/src/trainer.py |
import numpy as np
import torch
import torch.nn as nn
import json
from data import SRN
from utils import get_rays, sample_from_rays, volume_rendering, image_float_to_uint8
from model import CodeNeRF
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import math
import time
class Trainer():
def __init__(self, save_dir, gpu, jsonfile = 'srncar.json', batch_size=2048,
check_iter = 10000):
super().__init__()
# Read Hyperparameters
hpampath = os.path.join('jsonfiles', jsonfile)
with open(hpampath, 'r') as f:
self.hpams = json.load(f)
self.device = torch.device('cuda:' + str(gpu))
self.make_model()
self.make_dataloader(num_instances_per_obj = 1, crop_img = False)
self.make_codes()
self.B = batch_size
self.make_savedir(save_dir)
self.niter, self.nepoch = 0, 0
self.check_iter = check_iter
def training(self, iters_crop, iters_all, num_instances_per_obj=1):
if iters_crop > iters_all:
raise error
while self.niter < iters_all:
if self.niter < iters_crop:
self.training_single_epoch(num_instances_per_obj = num_instances_per_obj,
num_iters = iters_crop, crop_img = True)
else:
self.training_single_epoch(num_instances_per_obj=num_instances_per_obj,
num_iters=iters_all, crop_img = False)
self.save_models()
self.nepoch += 1
def training_single_epoch(self, num_instances_per_obj, num_iters, crop_img = True):
# single epoch here means that it iterates over whole objects
# only 1 or a few images are chosen for each epoch
self.make_dataloader(num_instances_per_obj, crop_img = crop_img)
self.set_optimizers()
# per object
for d in self.dataloader:
if self.niter < num_iters:
focal, H, W, imgs, poses, instances, obj_idx = d
obj_idx = obj_idx.to(self.device)
# per image
self.opts.zero_grad()
for k in range(num_instances_per_obj):
# print(k, num_instances_per_obj, poses[0, k].shape, imgs.shape, 'k')
t1 = time.time()
self.opts.zero_grad()
rays_o, viewdir = get_rays(H.item(), W.item(), focal, poses[0,k])
xyz, viewdir, z_vals = sample_from_rays(rays_o, viewdir, self.hpams['near'], self.hpams['far'],
self.hpams['N_samples'])
loss_per_img, generated_img = [], []
for i in range(0, xyz.shape[0], self.B):
shape_code, texture_code = self.shape_codes(obj_idx), self.texture_codes(obj_idx)
sigmas, rgbs = self.model(xyz[i:i+self.B].to(self.device),
viewdir[i:i+self.B].to(self.device),
shape_code, texture_code)
rgb_rays, _ = volume_rendering(sigmas, rgbs, z_vals.to(self.device))
loss_l2 = torch.mean((rgb_rays - imgs[0, k, i:i+self.B].type_as(rgb_rays))**2)
if i == 0:
reg_loss = torch.norm(shape_code, dim=-1) + torch.norm(texture_code, dim=-1)
loss_reg = self.hpams['loss_reg_coef'] * torch.mean(reg_loss)
loss = loss_l2 + loss_reg
else:
loss = loss_l2
loss.backward()
loss_per_img.append(loss_l2.item())
generated_img.append(rgb_rays)
self.opts.step()
self.log_psnr_time(np.mean(loss_per_img), time.time() - t1, obj_idx)
self.log_regloss(reg_loss, obj_idx)
if self.niter % self.check_iter == 0:
generated_img = torch.cat(generated_img)
generated_img = generated_img.reshape(H,W,3)
gtimg = imgs[0,-1].reshape(H,W,3)
self.log_img(generated_img, gtimg, obj_idx)
print(-10*np.log(np.mean(loss_per_img))/np.log(10), self.niter)
if self.niter % self.hpams['check_points'] == 0:
self.save_models(self.niter)
self.niter += 1
def log_psnr_time(self, loss_per_img, time_spent, obj_idx):
psnr = -10*np.log(loss_per_img) / np.log(10)
self.writer.add_scalar('psnr/train', psnr, self.niter, obj_idx)
self.writer.add_scalar('time/train', time_spent, self.niter, obj_idx)
def log_regloss(self, loss_reg, obj_idx):
self.writer.add_scalar('reg/train', loss_reg, self.niter, obj_idx)
def log_img(self, generated_img, gtimg, obj_idx):
H, W = generated_img.shape[:-1]
ret = torch.zeros(H,2*W, 3)
ret[:,:W,:] = generated_img
ret[:,W:,:] = gtimg
ret = image_float_to_uint8(ret.detach().cpu().numpy())
self.writer.add_image('train_'+str(self.niter) + '_' + str(obj_idx.item()), torch.from_numpy(ret).permute(2,0,1))
def set_optimizers(self):
lr1, lr2 = self.get_learning_rate()
self.opts = torch.optim.AdamW([
{'params':self.model.parameters(), 'lr': lr1},
{'params':self.shape_codes.parameters(), 'lr': lr2},
{'params':self.texture_codes.parameters(), 'lr':lr2}
])
def get_learning_rate(self):
model_lr, latent_lr = self.hpams['lr_schedule'][0], self.hpams['lr_schedule'][1]
num_model = self.niter // model_lr['interval']
num_latent = self.niter // latent_lr['interval']
lr1 = model_lr['lr'] * 2**(-num_model)
lr2 = latent_lr['lr'] * 2**(-num_latent)
return lr1, lr2
def make_model(self):
self.model = CodeNeRF(**self.hpams['net_hyperparams']).to(self.device)
def make_codes(self):
embdim = self.hpams['net_hyperparams']['latent_dim']
d = len(self.dataloader)
self.shape_codes = nn.Embedding(d, embdim)
self.texture_codes = nn.Embedding(d, embdim)
self.shape_codes.weight = nn.Parameter(torch.randn(d, embdim) / math.sqrt(embdim/2))
self.texture_codes.weight = nn.Parameter(torch.randn(d, embdim) / math.sqrt(embdim/2))
self.shape_codes = self.shape_codes.to(self.device)
self.texture_codes = self.texture_codes.to(self.device)
def make_dataloader(self, num_instances_per_obj, crop_img):
# cat : whether it is 'srn_cars' or 'srn_chairs'
# split: whether it is 'car_train', 'car_test' or 'car_val'
# data_dir : the root directory of ShapeNet_SRN
# num_instances_per_obj : how many images we chosose from objects
cat = self.hpams['data']['cat']
data_dir = self.hpams['data']['data_dir']
splits = self.hpams['data']['splits']
srn = SRN(cat=cat, splits=splits, data_dir = data_dir,
num_instances_per_obj = num_instances_per_obj, crop_img = crop_img)
self.dataloader = DataLoader(srn, batch_size=1, num_workers =4)
def make_savedir(self, save_dir):
self.save_dir = os.path.join('exps', save_dir)
if not os.path.isdir(self.save_dir):
os.makedirs(os.path.join(self.save_dir, 'runs'))
self.writer = SummaryWriter(os.path.join(self.save_dir, 'runs'))
hpampath = os.path.join(self.save_dir, 'hpam.json')
with open(hpampath, 'w') as f:
json.dump(self.hpams, f, indent=2)
def save_models(self, iter = None):
save_dict = {'model_params': self.model.state_dict(),
'shape_code_params': self.shape_codes.state_dict(),
'texture_code_params': self.texture_codes.state_dict(),
'niter': self.niter,
'nepoch' : self.nepoch
}
if iter != None:
torch.save(save_dict, os.path.join(self.save_dir, str(iter) + '.pth'))
torch.save(save_dict, os.path.join(self.save_dir, 'models.pth'))
| 8,307 | 46.204545 | 121 | py |
code-nerf | code-nerf-main/src/optimizer.py |
import numpy as np
import torch
import torch.nn as nn
import json
from data import SRN
from utils import get_rays, sample_from_rays, volume_rendering, image_float_to_uint8
from skimage.metrics import structural_similarity as compute_ssim
from model import CodeNeRF
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import imageio
import time
class Optimizer():
def __init__(self, saved_dir, gpu, instance_ids=[], splits='test',
jsonfile = 'srncar.json', batch_size=2048, num_opts = 200):
"""
:param saved_dir: the directory of pre-trained model
:param gpu: which GPU we would use
:param instance_id: the number of images for test-time optimization(ex : 000082.png)
:param splits: test or val
:param jsonfile: where the hyper-parameters are saved
:param num_opts : number of test-time optimization steps
"""
super().__init__()
# Read Hyperparameters
hpampath = os.path.join('jsonfiles', jsonfile)
with open(hpampath, 'r') as f:
self.hpams = json.load(f)
self.device = torch.device('cuda:' + str(gpu))
self.make_model()
self.load_model_codes(saved_dir)
self.make_dataloader(splits, len(instance_ids))
print('we are going to save at ', self.save_dir)
#self.saved_dir = saved_dir
self.B = batch_size
self.num_opts = num_opts
self.splits = splits
self.nviews = str(len(instance_ids))
self.psnr_eval = {}
self.psnr_opt = {}
self.ssim_eval = {}
def optimize_objs(self, instance_ids, lr=1e-2, lr_half_interval=50, save_img = True):
logpath = os.path.join(self.save_dir, 'opt_hpams.json')
hpam = {'instance_ids' : instance_ids, 'lr': lr, 'lr_half_interval': lr_half_interval,
'
': self.splits}
with open(logpath, 'w') as f:
json.dump(hpam, f, indent=2)
self.lr, self.lr_half_interval, iters = lr, lr_half_interval, 0
instance_ids = torch.tensor(instance_ids)
self.optimized_shapecodes = torch.zeros(len(self.dataloader), self.mean_shape.shape[1])
self.optimized_texturecodes = torch.zeros(len(self.dataloader), self.mean_texture.shape[1])
# Per object
for num_obj, d in enumerate(self.dataloader):
focal, H, W, imgs, poses, obj_idx = d
tgt_imgs, tgt_poses = imgs[0, instance_ids], poses[0, instance_ids]
self.nopts, self.lr_half_interval = 0, lr_half_interval
shapecode = self.mean_shape.to(self.device).clone().detach().requires_grad_()
texturecode = self.mean_texture.to(self.device).clone().detach().requires_grad_()
# First Optimize
self.set_optimizers(shapecode, texturecode)
while self.nopts < self.num_opts:
self.opts.zero_grad()
t1 = time.time()
generated_imgs, gt_imgs = [], []
for num, instance_id in enumerate(instance_ids):
tgt_img, tgt_pose = tgt_imgs[num].reshape(-1,3), tgt_poses[num]
rays_o, viewdir = get_rays(H.item(), W.item(), focal, tgt_pose)
xyz, viewdir, z_vals = sample_from_rays(rays_o, viewdir, self.hpams['near'], self.hpams['far'],
self.hpams['N_samples'])
loss_per_img, generated_img = [], []
for i in range(0, xyz.shape[0], self.B):
sigmas, rgbs = self.model(xyz[i:i+self.B].to(self.device),
viewdir[i:i+self.B].to(self.device),
shapecode, texturecode)
rgb_rays, _ = volume_rendering(sigmas, rgbs, z_vals.to(self.device))
#print(rgb_rays.shape, tgt_img.shape)
loss_l2 = torch.mean((rgb_rays - tgt_img[i:i+self.B].type_as(rgb_rays))**2)
if i == 0:
reg_loss = torch.norm(shapecode, dim=-1) + torch.norm(texturecode, dim=-1)
loss_reg = self.hpams['loss_reg_coef'] * torch.mean(reg_loss)
loss = loss_l2 + loss_reg
else:
loss = loss_l2
loss.backward()
loss_per_img.append(loss_l2.item())
generated_img.append(rgb_rays)
generated_imgs.append(torch.cat(generated_img).reshape(H,W,3))
gt_imgs.append(tgt_img.reshape(H,W,3))
self.opts.step()
self.log_opt_psnr_time(np.mean(loss_per_img), time.time() - t1, self.nopts + self.num_opts * num_obj,
num_obj)
self.log_regloss(reg_loss.item(), self.nopts, num_obj)
if self.save_img:
self.save_img(generated_imgs, gt_imgs, self.ids[num_obj], self.nopts)
self.nopts += 1
if self.nopts % lr_half_interval == 0:
self.set_optimizers(shapecode, texturecode)
# Then, Evaluate
with torch.no_grad():
#print(tgt_poses.shape)
for num in range(250):
if num not in instance_ids:
tgt_img, tgt_pose = imgs[0,num].reshape(-1,3), poses[0, num]
rays_o, viewdir = get_rays(H.item(), W.item(), focal, poses[0, num])
xyz, viewdir, z_vals = sample_from_rays(rays_o, viewdir, self.hpams['near'], self.hpams['far'],
self.hpams['N_samples'])
loss_per_img, generated_img = [], []
for i in range(0, xyz.shape[0], self.B):
sigmas, rgbs = self.model(xyz[i:i+self.B].to(self.device),
viewdir[i:i + self.B].to(self.device),
shapecode, texturecode)
rgb_rays, _ = volume_rendering(sigmas, rgbs, z_vals.to(self.device))
loss_l2 = torch.mean((rgb_rays - tgt_img[i:i+self.B].type_as(rgb_rays)) ** 2)
loss_per_img.append(loss_l2.item())
generated_img.append(rgb_rays)
self.log_eval_psnr(np.mean(loss_per_img), num, num_obj)
self.log_compute_ssim(torch.cat(generated_img).reshape(H, W, 3), tgt_img.reshape(H, W, 3),
num, num_obj)
if save_img:
self.save_img([torch.cat(generated_img).reshape(H,W,3)], [tgt_img.reshape(H,W,3)], self.ids[num_obj], num,
opt=False)
# Save the optimized codes
self.optimized_shapecodes[num_obj] = shapecode.detach().cpu()
self.optimized_texturecodes[num_obj] = texturecode.detach().cpu()
self.save_opts(num_obj)
def save_opts(self, num_obj):
saved_dict = {
'ids': self.ids,
'num_obj' : num_obj,
'optimized_shapecodes' : self.optimized_shapecodes,
'optimized_texturecodes': self.optimized_texturecodes,
'psnr_eval': self.psnr_eval,
'ssim_eval': self.ssim_eval
}
torch.save(saved_dict, os.path.join(self.save_dir, 'codes.pth'))
print('We finished the optimization of ' + str(num_obj))
def save_img(self, generated_imgs, gt_imgs, obj_id, instance_num, opt=True):
H, W = gt_imgs[0].shape[:2]
nviews = int(self.nviews)
if not opt:
nviews = 1
generated_imgs = torch.cat(generated_imgs).reshape(nviews, H, W, 3)
gt_imgs = torch.cat(gt_imgs).reshape(nviews, H, W, 3)
ret = torch.zeros(nviews *H, 2 * W, 3)
ret[:,:W,:] = generated_imgs.reshape(-1, W, 3)
ret[:,W:,:] = gt_imgs.reshape(-1, W, 3)
ret = image_float_to_uint8(ret.detach().cpu().numpy())
save_img_dir = os.path.join(self.save_dir, obj_id)
if not os.path.isdir(save_img_dir):
os.makedirs(save_img_dir)
if opt:
imageio.imwrite(os.path.join(save_img_dir, 'opt' + self.nviews + '_' + str(instance_num) + '.png'), ret)
else:
imageio.imwrite(os.path.join(save_img_dir, str(instance_num) + '_' + self.nviews + '.png'), ret)
def log_compute_ssim(self, generated_img, gt_img, niters, obj_idx):
generated_img_np = generated_img.detach().cpu().numpy()
gt_img_np = gt_img.detach().cpu().numpy()
ssim = compute_ssim(generated_img_np, gt_img_np, multichannel=True)
if niters == 0:
self.ssim_eval[obj_idx] = [ssim]
else:
self.ssim_eval[obj_idx].append(ssim)
def log_eval_psnr(self, loss_per_img, niters, obj_idx):
psnr = -10 * np.log(loss_per_img) / np.log(10)
if niters == 0:
self.psnr_eval[obj_idx] = [psnr]
else:
self.psnr_eval[obj_idx].append(psnr)
def log_opt_psnr_time(self, loss_per_img, time_spent, niters, obj_idx):
psnr = -10*np.log(loss_per_img) / np.log(10)
self.writer.add_scalar('psnr_opt/' + self.nviews + '/' + self.splits, psnr, niters, obj_idx)
self.writer.add_scalar('time_opt/' + self.nviews + '/' + self.splits, time_spent, niters, obj_idx)
def log_regloss(self, loss_reg, niters, obj_idx):
self.writer.add_scalar('reg/' + self.nviews + '/' + self.splits, loss_reg, niters, obj_idx)
def set_optimizers(self, shapecode, texturecode):
lr = self.get_learning_rate()
#print(lr)
self.opts = torch.optim.AdamW([
{'params': shapecode, 'lr': lr},
{'params': texturecode, 'lr':lr}
])
def get_learning_rate(self):
opt_values = self.nopts // self.lr_half_interval
lr = self.lr * 2**(-opt_values)
return lr
def make_model(self):
self.model = CodeNeRF(**self.hpams['net_hyperparams']).to(self.device)
def load_model_codes(self, saved_dir):
saved_path = os.path.join('exps', saved_dir, 'models.pth')
saved_data = torch.load(saved_path, map_location = torch.device('cpu'))
self.make_save_img_dir(os.path.join('exps', saved_dir, 'test'))
self.make_writer(saved_dir)
self.model.load_state_dict(saved_data['model_params'])
self.model = self.model.to(self.device)
self.mean_shape = torch.mean(saved_data['shape_code_params']['weight'], dim=0).reshape(1,-1)
self.mean_texture = torch.mean(saved_data['texture_code_params']['weight'], dim=0).reshape(1,-1)
def make_writer(self, saved_dir):
self.writer = SummaryWriter(os.path.join('exps', saved_dir, 'test', 'runs'))
def make_save_img_dir(self, save_dir):
save_dir_tmp = save_dir
num = 2
while os.path.isdir(save_dir_tmp):
save_dir_tmp = save_dir + '_' + str(num)
num += 1
os.makedirs(save_dir_tmp)
self.save_dir = save_dir_tmp
#print(self.save_dir)
def make_dataloader(self, splits, num_instances_per_obj, crop_img=False):
cat = self.hpams['data']['cat']
data_dir = self.hpams['data']['data_dir']
obj = cat.split('_')[1]
splits = obj + '_' + splits
srn = SRN(cat=cat, splits=splits, data_dir = data_dir,
num_instances_per_obj = num_instances_per_obj, crop_img = crop_img)
self.ids = srn.ids
self.dataloader = DataLoader(srn, batch_size=1, num_workers =4, shuffle = False)
| 11,915 | 47.636735 | 134 | py |
pynbody | pynbody-master/pynbody/plot/stars.py | """
stars
=====
"""
import logging
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from .. import array, filt, units, units as _units
from ..analysis import angmom, profile
from ..sph import Kernel2D, render_spherical_image
from .sph import image
logger = logging.getLogger('pynbody.plot.stars')
def bytscl(arr, mini=0, maxi=10000):
X = (arr - mini) / (maxi - mini)
X[X > 1] = 1
X[X < 0] = 0
return X
def nw_scale_rgb(r, g, b, scales=[4, 3.2, 3.4]):
return r * scales[0], g * scales[1], b * scales[2]
def nw_arcsinh_fit(r, g, b, nonlinearity=3):
radius = r + g + b
val = np.arcsinh(radius * nonlinearity) / nonlinearity / radius
return r * val, g * val, b * val
def combine(r, g, b, magnitude_range, brightest_mag=None, mollview=False):
# flip sign so that brightest pixels have biggest value
r = -r
g = -g
b = -b
if brightest_mag is None:
brightest_mag = []
# find something close to the maximum that is not quite the maximum
for x in r, g, b:
if mollview:
x_tmp = x.flatten()[x.flatten()<0]
ordered = np.sort(x_tmp.data)
else:
ordered = np.sort(x.flatten())
brightest_mag.append(ordered[-len(ordered) // 5000])
brightest_mag = max(brightest_mag)
else:
brightest_mag = -brightest_mag
rgbim = np.zeros((r.shape[0], r.shape[1], 3))
rgbim[:, :, 0] = bytscl(r, brightest_mag - magnitude_range, brightest_mag)
rgbim[:, :, 1] = bytscl(g, brightest_mag - magnitude_range, brightest_mag)
rgbim[:, :, 2] = bytscl(b, brightest_mag - magnitude_range, brightest_mag)
return rgbim, -brightest_mag
def convert_to_mag_arcsec2(image, mollview=False):
if not mollview:
assert image.units=="pc^-2"
pc2_to_sqarcsec = 2.3504430539466191e-09
return -2.5*np.log10(image*pc2_to_sqarcsec)
def render(sim, filename=None,
r_band='i', g_band='v', b_band='u',
r_scale=0.5, g_scale=1.0, b_scale=1.0,
dynamic_range=2.0,
mag_range=None,
width=50,
resolution=500,
starsize=None,
plot=True, axes=None, ret_im=False, clear=True,
ret_range=False, with_dust=False, z_range=50.0):
'''
Make a 3-color image of stars.
The colors are based on magnitudes found using stellar Marigo
stellar population code. If with_dust is True, a simple dust
screening is applied.
Returns: If ret_im=True, an NxNx3 array representing an RGB image
**Optional keyword arguments:**
*filename*: string (default: None)
Filename to be written to (if a filename is specified)
*r_band*: string (default: 'i')
Determines which Johnston filter will go into the image red channel
*g_band*: string (default: 'v')
Determines which Johnston filter will go into the image green channel
*b_band*: string (default: 'b')
Determines which Johnston filter will go into the image blue channel
*r_scale*: float (default: 0.5)
The scaling of the red channel before channels are combined
*g_scale*: float (default: 1.0)
The scaling of the green channel before channels are combined
*b_scale*: float (default: 1.0)
The scaling of the blue channel before channels are combined
*width*: float in kpc (default:50)
Sets the size of the image field in kpc
*resolution*: integer (default: 500)
Sets the number of pixels on the side of the image
*starsize*: float in kpc (default: None)
If not None, sets the maximum size of stars in the image
*ret_im*: bool (default: False)
if True, the NxNx3 image array is returned
*ret_range*: bool (default: False)
if True, the range of the image in mag arcsec^-2 is returned.
*plot*: bool (default: True)
if True, the image is plotted
*axes*: matplotlib axes object (deault: None)
if not None, the axes object to plot to
*dynamic_range*: float (default: 2.0)
The number of dex in luminosity over which the image brightness ranges
*mag_range*: float, float (default: None)
If provided, the brightest and faintest surface brightnesses in the range,
in mag arcsec^-2. Takes precedence over dynamic_range.
*with_dust*: bool, (default: False)
If True, the image is rendered with a simple dust screening model
based on Calzetti's law.
*z_range*: float, (default: 50.0)
If with_dust is True this parameter specifies the z range
over which the column density will be calculated.
The default value is 50 kpc.
'''
if isinstance(width, str) or issubclass(width.__class__, _units.UnitBase):
if isinstance(width, str):
width = _units.Unit(width)
width = width.in_units(sim['pos'].units, **sim.conversion_context())
if starsize is not None:
smf = filt.HighPass('smooth', str(starsize) + ' kpc')
sim.s[smf]['smooth'] = array.SimArray(starsize, 'kpc', sim=sim)
r = image(sim.s, qty=r_band + '_lum_den', width=width, log=False,
units="pc^-2", clear=False, noplot=True, resolution=resolution) * r_scale
g = image(sim.s, qty=g_band + '_lum_den', width=width, log=False,
units="pc^-2", clear=False, noplot=True, resolution=resolution) * g_scale
b = image(sim.s, qty=b_band + '_lum_den', width=width, log=False,
units="pc^-2", clear=False, noplot=True, resolution=resolution) * b_scale
# convert all channels to mag arcsec^-2
r=convert_to_mag_arcsec2(r)
g=convert_to_mag_arcsec2(g)
b=convert_to_mag_arcsec2(b)
if with_dust is True:
# render image with a simple dust absorption correction based on Calzetti's law using the gas content.
try:
import extinction
except ImportError:
warnings.warn(
"Could not load extinction package. If you want to use this feature, "
"plaese install the extinction package from here: http://extinction.readthedocs.io/en/latest/"
"or <via pip install extinction> or <conda install -c conda-forge extinction>", RuntimeWarning)
return
warm = filt.HighPass('temp',3e4)
cool = filt.LowPass('temp',3e4)
positive = filt.BandPass('z',-z_range,z_range) #LowPass('z',0)
column_den_warm = image(sim.g[positive][warm], qty='rho', width=width, log=False,
units="kg cm^-2", clear=False, noplot=True,z_camera=z_range)
column_den_cool = image(sim.g[positive][cool], qty='rho', width=width, log=False,
units="kg cm^-2", clear=False, noplot=True,z_camera=z_range)
mh = 1.67e-27 # units kg
cool_fac = 0.25 # fudge factor to make dust absorption not too strong
# get the column density of gas
col_den = np.divide(column_den_warm,mh)+np.divide(column_den_cool*cool_fac,mh)
# get absorption coefficient
a_v = 0.5*col_den*2e-21
#get the central wavelength for the given band
wavelength_avail = {'u':3571,'b':4378,'v':5466,'r':6695,'i':8565,'j':12101,
'h':16300,'k':21900,'U':3571,'B':4378,'V':5466,'R':6695,'I':8565,'J':12101,
'H':16300,'K':21900} #in Angstrom
# effective wavelength taken from http://svo2.cab.inta-csic.es/svo/theory/fps3/index.php?mode=browse&gname=Generic&gname2=Johnson
# and from https://en.wikipedia.org/wiki/Photometric_system for h, k
lr,lg,lb = wavelength_avail[r_band],wavelength_avail[g_band],wavelength_avail[b_band] #in Angstrom
wave = np.array([lb, lg, lr])
ext_r = np.empty_like(r)
ext_g = np.empty_like(g)
ext_b = np.empty_like(b)
for i in range(len(a_v)):
for j in range(len(a_v[0])):
ext = extinction.calzetti00(wave.astype(np.float_), a_v[i][j].astype(np.float_), 3.1, unit='aa', out=None)
ext_r[i][j] = ext[2]
ext_g[i][j] = ext[1]
ext_b[i][j] = ext[0]
r = r+ext_r
g = g+ext_g
b = b+ext_b
#r,g,b = nw_scale_rgb(r,g,b)
#r,g,b = nw_arcsinh_fit(r,g,b)
if mag_range is None:
rgbim, mag_max = combine(r, g, b, dynamic_range*2.5)
mag_min = mag_max + 2.5*dynamic_range
else:
mag_max, mag_min = mag_range
rgbim, mag_max = combine(r, g, b, mag_min - mag_max, mag_max)
if plot:
if clear:
plt.clf()
if axes is None:
axes = plt.gca()
if axes:
axes.imshow(
rgbim[::-1, :], extent=(-width / 2, width / 2, -width / 2, width / 2))
axes.set_xlabel('x [' + str(sim.s['x'].units) + ']')
axes.set_ylabel('y [' + str(sim.s['y'].units) + ']')
plt.draw()
if filename:
plt.savefig(filename)
if ret_im:
return rgbim
if ret_range:
return mag_max, mag_min
def mollview(map=None,fig=None,plot=False,filenme=None,
rot=None,coord=None,unit='',
xsize=800,title='Mollweide view',nest=False,
min=None,max=None,flip='astro',
remove_dip=False,remove_mono=False,
gal_cut=0,
format='%g',format2='%g',
cbar=True,cmap=None, notext=False,
norm=None,hold=False,margins=None,sub=None,
return_projected_map=False):
"""Plot an healpix map (given as an array) in Mollweide projection.
Requires the healpy package.
This function is taken from the Healpy package and slightly modified.
Parameters
----------
map : float, array-like or None
An array containing the map, supports masked maps, see the `ma` function.
If None, will display a blank map, useful for overplotting.
fig : figure object or None, optional
The figure to use. Default: create a new figure
plot : bool (default: False)
if True the image is plotted
filename : string (default: None)
Filename to be written to (if a filename is specified)
rot : scalar or sequence, optional
Describe the rotation to apply.
In the form (lon, lat, psi) (unit: degrees) : the point at
longitude *lon* and latitude *lat* will be at the center. An additional rotation
of angle *psi* around this direction is applied.
coord : sequence of character, optional
Either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to rotate
the map from the first to the second coordinate system.
unit : str, optional
A text describing the unit of the data. Default: ''
xsize : int, optional
The size of the image. Default: 800
title : str, optional
The title of the plot. Default: 'Mollweide view'
nest : bool, optional
If True, ordering scheme is NESTED. Default: False (RING)
min : float, optional
The minimum range value
max : float, optional
The maximum range value
flip : {'astro', 'geo'}, optional
Defines the convention of projection : 'astro' (default, east towards left, west towards right)
or 'geo' (east towards right, west towards left)
remove_dip : bool, optional
If :const:`True`, remove the dipole+monopole
remove_mono : bool, optional
If :const:`True`, remove the monopole
gal_cut : float, scalar, optional
Symmetric galactic cut for the dipole/monopole fit.
Removes points in latitude range [-gal_cut, +gal_cut]
format : str, optional
The format of the scale label. Default: '%g'
format2 : str, optional
Format of the pixel value under mouse. Default: '%g'
cbar : bool, optional
Display the colorbar. Default: True
notext : bool, optional
If True, no text is printed around the map
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
hold : bool, optional
If True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
sub : int, scalar or sequence, optional
Use only a zone of the current figure (same syntax as subplot).
Default: None
margins : None or sequence, optional
Either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
return_projected_map : bool
if True returns the projected map in a 2d numpy array
See Also
--------
gnomview, cartview, orthview, azeqview
"""
try:
from healpy import pixelfunc, projaxes as PA
except ImportError:
warnings.warn(
"Could not load healpy package. If you want to use this feature, "
"plaese install the healpy package from here: http://healpy.readthedocs.io/en/latest/"
"or via pip or conda.", RuntimeWarning)
return
# Create the figure
if not (hold or sub):
if fig == None:
f=plt.figure(figsize=(8.5,5.4))
extent = (0.02,0.05,0.96,0.9)
else:
f=fig
extent = (0.02,0.05,0.96,0.9)
elif hold:
f=plt.gcf()
left,bottom,right,top = np.array(f.gca().get_position()).ravel()
extent = (left,bottom,right-left,top-bottom)
f.delaxes(f.gca())
else: # using subplot syntax
f=plt.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub//100, (sub%100)//10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)//ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
# Starting to draw : turn interactive off
wasinteractive = plt.isinteractive()
plt.ioff()
try:
if map is None:
map = np.zeros(12)+np.inf
cbar=False
map = pixelfunc.ma_to_array(map)
ax=PA.HpxMollweideAxes(f,extent,coord=coord,rot=rot,
format=format2,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,
nest=nest,copy=True,
verbose=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,
copy=True,verbose=True)
img = ax.projmap(map,nest=nest,xsize=xsize,coord=coord,vmin=min,vmax=max,
cmap=cmap,norm=norm)
if cbar:
im = ax.get_images()[0]
b = im.norm.inverse(np.linspace(0,1,im.cmap.N+1))
v = np.linspace(im.norm.vmin,im.norm.vmax,im.cmap.N)
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(im,ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.05,fraction=0.1,boundaries=b,values=v,
format=format)
else:
# for older matplotlib versions, no ax kwarg
cb=f.colorbar(im,orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.05,fraction=0.1,boundaries=b,values=v,
format=format)
cb.solids.set_rasterized(True)
ax.set_title(title)
if not notext:
ax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',transform=ax.transAxes)
if cbar:
cb.ax.text(0.5,-1.0,unit,fontsize=14,
transform=cb.ax.transAxes,ha='center',va='center')
f.sca(ax)
finally:
if plot:
plt.draw()
if wasinteractive:
plt.ion()
#plt.show()
if return_projected_map:
return img
def render_mollweide(sim, filename=None,
r_band='i', g_band='v', b_band='u',
r_scale=0.5, g_scale=1.0, b_scale=1.0,
dynamic_range=2.0,
mag_range=None,
width=25,
nside=128,
starsize=None,
plot=True, axes=None, ret_im=False, clear=True,
ret_range=False):
'''
Make a 3-color all-sky image of stars in a mollweide projection.
Adapted from the function pynbody.plot.stars.render
The colors are based on magnitudes found using stellar Marigo
stellar population code. However there is no radiative transfer
to account for dust.
Returns: If ret_im=True, an NxNx3 array representing an RGB image
**Optional keyword arguments:**
*filename*: string (default: None)
Filename to be written to (if a filename is specified)
*r_band*: string (default: 'i')
Determines which Johnston filter will go into the image red channel
*g_band*: string (default: 'v')
Determines which Johnston filter will go into the image green channel
*b_band*: string (default: 'b')
Determines which Johnston filter will go into the image blue channel
*r_scale*: float (default: 0.5)
The scaling of the red channel before channels are combined
*g_scale*: float (default: 1.0)
The scaling of the green channel before channels are combined
*b_scale*: float (default: 1.0)
The scaling of the blue channel before channels are combined
*width*: float in kpc (default:50)
Sets the size of the image field in kpc
*starsize*: float in kpc (default: None)
If not None, sets the maximum size of stars in the image
*ret_im*: bool (default: False)
if True, the NxNx3 image array is returned
*ret_range*: bool (default: False)
if True, the range of the image in mag arcsec^-2 is returned.
*plot*: bool (default: True)
if True, the image is plotted
*axes*: matplotlib axes object (deault: None)
if not None, the axes object to plot to
*dynamic_range*: float (default: 2.0)
The number of dex in luminosity over which the image brightness ranges
*mag_range*: float, float (default: None)
If provided, the brightest and faintest surface brightnesses in the range,
in mag arcsec^-2. Takes precedence over dynamic_range.
'''
if isinstance(width, str) or issubclass(width.__class__, _units.UnitBase):
if isinstance(width, str):
width = _units.Unit(width)
width = width.in_units(sim['pos'].units, **sim.conversion_context())
if starsize is not None:
smf = filt.HighPass('smooth', str(starsize) + ' kpc')
sim.s[smf]['smooth'] = array.SimArray(starsize, 'kpc', sim=sim)
r = render_spherical_image(sim.s, qty=r_band + '_lum_den', nside=nside, distance=width, kernel=Kernel2D(),kstep=0.5, denoise=None, out_units="pc^-2", threaded=False)# * r_scale
r = mollview(r,return_projected_map=True) * r_scale
f=plt.gcf()
g = render_spherical_image(sim.s, qty=g_band + '_lum_den', nside=nside, distance=width, kernel=Kernel2D(),kstep=0.5, denoise=None, out_units="pc^-2", threaded=False)# * g_scale
g = mollview(g,return_projected_map=True,fig=f) * g_scale
f=plt.gcf()
b = render_spherical_image(sim.s, qty=b_band + '_lum_den', nside=nside, distance=width, kernel=Kernel2D(),kstep=0.5, denoise=None, out_units="pc^-2", threaded=False)# * b_scale
b = mollview(b,return_projected_map=True,fig=f) * b_scale
# convert all channels to mag arcsec^-2
r=convert_to_mag_arcsec2(r, mollview=True)
g=convert_to_mag_arcsec2(g, mollview=True)
b=convert_to_mag_arcsec2(b, mollview=True)
if mag_range is None:
rgbim, mag_max = combine(r, g, b, dynamic_range*2.5, mollview=True)
mag_min = mag_max + 2.5*dynamic_range
else:
mag_max, mag_min = mag_range
rgbim, mag_max = combine(r, g, b, mag_min - mag_max, mag_max, mollview=True)
if plot:
if clear:
plt.clf()
if axes is None:
axes = plt.gca()
if axes:
axes.imshow(
rgbim[::-1, :])#, extent=(-width / 2, width / 2, -width / 2, width / 2)
axes.axis('off')
plt.draw()
if filename:
plt.savefig(filename)
if ret_im:
return rgbim
if ret_range:
return mag_max, mag_min
def sfh(sim, filename=None, massform=True, clear=False, legend=False,
subplot=False, trange=False, bins=100, **kwargs):
'''
star formation history
**Optional keyword arguments:**
*trange*: list, array, or tuple
size(t_range) must be 2. Specifies the time range.
*bins*: int
number of bins to use for the SFH
*massform*: bool
decides whether to use original star mass (massform) or final star mass
*subplot*: subplot object
where to plot SFH
*legend*: boolean
whether to draw a legend or not
*clear*: boolean
if False (default), plot on the current axes. Otherwise, clear the figure first.
By default, sfh will use the formation mass of the star. In tipsy, this will be
taken from the starlog file. Set massform=False if you want the final (observed)
star formation history
**Usage:**
>>> import pynbody.plot as pp
>>> pp.sfh(s,linestyle='dashed',color='k')
'''
import matplotlib.pyplot as pyplot
if subplot:
plt = subplot
else:
plt = pyplot
if "nbins" in kwargs:
bins = kwargs['nbins']
if 'nbins' in kwargs:
bins = kwargs['nbins']
del kwargs['nbins']
if ((len(sim.g)>0) | (len(sim.d)>0)): simstars = sim.star
else: simstars = sim
if trange:
assert len(trange) == 2
else:
trange = [simstars['tform'].in_units(
"Gyr").min(), simstars['tform'].in_units("Gyr").max()]
binnorm = 1e-9 * bins / (trange[1] - trange[0])
trangefilt = filt.And(filt.HighPass('tform', str(trange[0]) + ' Gyr'),
filt.LowPass('tform', str(trange[1]) + ' Gyr'))
tforms = simstars[trangefilt]['tform'].in_units('Gyr')
if massform:
try:
weight = simstars[trangefilt][
'massform'].in_units('Msol') * binnorm
except (KeyError, units.UnitsException):
warnings.warn(
"Could not load massform array -- falling back to current stellar masses", RuntimeWarning)
weight = simstars[trangefilt]['mass'].in_units('Msol') * binnorm
else:
weight = simstars[trangefilt]['mass'].in_units('Msol') * binnorm
if clear:
plt.clf()
sfhist, thebins, patches = plt.hist(tforms, weights=weight, bins=bins,
histtype='step', **kwargs)
if not subplot:
# don't set the limits
#plt.ylim(0.0, 1.2 * np.max(sfhist))
plt.xlabel('Time [Gyr]', fontsize='large')
plt.ylabel(r'SFR [M$_\odot$ yr$^{-1}$]', fontsize='large')
else:
plt.set_ylim(0.0, 1.2 * np.max(sfhist))
# Make both axes have the same start and end point.
if subplot:
x0, x1 = plt.get_xlim()
else:
x0, x1 = plt.gca().get_xlim()
# add a z axis on top if it has not been already done by an earlier plot:
from pynbody.analysis import pkdgrav_cosmo as cosmo
c = cosmo.Cosmology(sim=sim)
old_axis = pyplot.gca()
pz = plt.twiny()
labelzs = np.arange(5, int(sim.properties['z']) - 1, -1)
times = [13.7 * c.Exp2Time(1.0 / (1 + z)) / c.Exp2Time(1) for z in labelzs]
pz.set_xticks(times)
pz.set_xticklabels([str(x) for x in labelzs])
pz.set_xlim(x0, x1)
pz.set_xlabel('$z$')
pyplot.sca(old_axis)
if legend:
plt.legend(loc=1)
if filename:
logger.info("Saving %s", filename)
plt.savefig(filename)
return array.SimArray(sfhist, "Msol yr**-1"), array.SimArray(thebins, "Gyr")
def schmidtlaw(sim, center=True, filename=None, pretime='50 Myr',
diskheight='3 kpc', rmax='20 kpc', compare=True,
radial=True, clear=True, legend=True, bins=10, **kwargs):
'''Schmidt Law
Plots star formation surface density vs. gas surface density including
the observed relationships. Currently, only plots densities found in
radial annuli.
**Usage:**
>>> import pynbody.plot as pp
>>> pp.schmidtlaw(h[1])
**Optional keyword arguments:**
*center*: bool
center and align the input simulation faceon.
*filename*: string
Name of output file
*pretime* (default='50 Myr'): age of stars to consider for SFR
*diskheight* (default='3 kpc'): height of gas and stars above
and below disk considered for SF and gas densities.
*rmax* (default='20 kpc'): radius of disk considered
*compare* (default=True): whether to include Kennicutt (1998) and
Bigiel+ (2008) for comparison
*radial* (default=True): should bins be annuli or a rectangular grid?
*bins* (default=10): How many radial bins should there be?
*legend*: boolean
whether to draw a legend or not
'''
if not radial:
raise NotImplementedError("Sorry, only radial Schmidt law currently supported")
if center:
angmom.faceon(sim)
if isinstance(pretime, str):
pretime = units.Unit(pretime)
# select stuff
diskgas = sim.gas[filt.Disc(rmax, diskheight)]
diskstars = sim.star[filt.Disc(rmax, diskheight)]
youngstars = np.where(diskstars['tform'].in_units("Myr") >
sim.properties['time'].in_units(
"Myr", **sim.conversion_context())
- pretime.in_units('Myr'))[0]
# calculate surface densities
if radial:
ps = profile.Profile(diskstars[youngstars], nbins=bins)
pg = profile.Profile(diskgas, nbins=bins)
else:
# make bins 2 kpc
nbins = rmax * 2 / binsize
pg, x, y = np.histogram2d(diskgas['x'], diskgas['y'], bins=nbins,
weights=diskgas['mass'],
range=[(-rmax, rmax), (-rmax, rmax)])
ps, x, y = np.histogram2d(diskstars[youngstars]['x'],
diskstars[youngstars]['y'],
weights=diskstars['mass'],
bins=nbins, range=[(-rmax, rmax), (-rmax, rmax)])
if clear:
plt.clf()
plt.loglog(pg['density'].in_units('Msol pc^-2'),
ps['density'].in_units('Msol kpc^-2') / pretime / 1e6, "+",
**kwargs)
if compare:
xsigma = np.logspace(np.log10(pg['density'].in_units('Msol pc^-2')).min(),
np.log10(
pg['density'].in_units('Msol pc^-2')).max(),
100)
ysigma = 2.5e-4 * xsigma ** 1.5 # Kennicutt (1998)
xbigiel = np.logspace(1, 2, 10)
ybigiel = 10. ** (-2.1) * xbigiel ** 1.0 # Bigiel et al (2007)
plt.loglog(xsigma, ysigma, label='Kennicutt (1998)')
plt.loglog(
xbigiel, ybigiel, linestyle="dashed", label='Bigiel et al (2007)')
plt.xlabel(r'$\Sigma_{gas}$ [M$_\odot$ pc$^{-2}$]')
plt.ylabel(r'$\Sigma_{SFR}$ [M$_\odot$ yr$^{-1}$ kpc$^{-2}$]')
if legend:
plt.legend(loc=2)
if (filename):
logger.info("Saving %s", filename)
plt.savefig(filename)
def oneschmidtlawpoint(sim, center=True, pretime='50 Myr',
diskheight='3 kpc', rmax='20 kpc', **kwargs):
'''One Schmidt Law Point
Determines values for star formation surface density and gas surface
density for the entire galaxy based on the half mass cold gas radius.
**Usage:**
import pynbody.plot as pp
pp.oneschmidtlawpoint(h[1])
*pretime* (default='50 Myr'): age of stars to consider for SFR
*diskheight* (default='3 kpc'): height of gas and stars above
and below disk considered for SF and gas densities.
*rmax* (default='20 kpc'): radius of disk considered
'''
if center:
angmom.faceon(sim)
cg = h[1].gas[filt.LowPass('temp', 1e5)]
cgd = cg[filt.Disc('30 kpc', '3 kpc')]
cgs = np.sort(cgd['rxy'].in_units('kpc'))
rhgas = cgs[len(cgs) / 2]
instars = h[1].star[filt.Disc(str(rhgas) + ' kpc', '3 kpc')]
minstars = np.sum(
instars[filt.LowPass('age', '100 Myr')]['mass'].in_units('Msol'))
ingasm = np.sum(
cg[filt.Disc(str(rhgas) + ' kpc', '3 kpc')]['mass'].in_units('Msol'))
rpc = rhgas * 1000.0
rkpc = rhgas
xsigma = ingasm / (np.pi * rpc * rpc)
ysigma = minstars / (np.pi * rkpc * rkpc * 1e8)
return xsigma, ysigma
def satlf(sim, band='v', filename=None, MWcompare=True, Trentham=True,
clear=True, legend=True,
label='Simulation', **kwargs):
'''
satellite luminosity function
**Options:**
*band* ('v'): which Johnson band to use. available filters: U, B,
V, R, I, J, H, K
*filename* (None): name of file to which to save output
*MWcompare* (True): whether to plot comparison lines to MW
*Trentham* (True): whether to plot comparison lines to Trentham +
Tully (2009) combined with Koposov et al (2007)
By default, satlf will use the formation mass of the star. In
tipsy, this will be taken from the starlog file.
**Usage:**
>>> import pynbody.plot as pp
>>> h = s.halos()
>>> pp.satlf(h[1],linestyle='dashed',color='k')
'''
import os
from ..analysis import luminosity as lum
halomags = []
# try :
for haloid in sim.properties['children']:
if (sim._halo_catalogue.contains(haloid)):
halo = sim._halo_catalogue[haloid]
try:
halo.properties[band + '_mag'] = lum.halo_mag(halo, band=band)
if np.isfinite(halo.properties[band + '_mag']):
halomags.append(halo.properties[band + '_mag'])
except IndexError:
pass # no stars in satellite
# except KeyError:
#raise KeyError, str(sim)+' properties have no children key as a halo type would'
if clear:
plt.clf()
plt.semilogy(sorted(halomags), np.arange(len(halomags)) + 1, label=label,
**kwargs)
plt.xlabel('M$_{' + band + '}$')
plt.ylabel('Cumulative LF')
if MWcompare:
# compare with observations of MW
tolfile = os.path.join(os.path.dirname(__file__), "tollerud2008mw")
if os.path.exists(tolfile):
tolmags = [float(q) for q in file(tolfile).readlines()]
else:
raise OSError(tolfile + " not found")
plt.semilogy(sorted(tolmags), np.arange(len(tolmags)),
label='Milky Way')
if Trentham:
halomags = np.array(halomags)
halomags = halomags[np.asarray(np.where(np.isfinite(halomags)))]
xmag = np.linspace(halomags.min(), halomags.max(), 100)
# Trentham + Tully (2009) equation 6
# number of dwarfs between -11>M_R>-17 is well correlated with mass
logNd = 0.91 * np.log10(sim.properties['mass']) - 10.2
# set Nd from each equal to combine Trentham + Tully with Koposov
coeff = 10.0 ** logNd / (10 ** -0.6 - 10 ** -1.2)
# print 'Koposov coefficient:'+str(coeff)
# Analytic expression for MW from Koposov
#import pdb; pdb.set_trace()
yn = coeff * 10 ** ((xmag + 5.0) / 10.0) # Koposov et al (2007)
plt.semilogy(xmag, yn, linestyle="dashed",
label='Trentham & Tully (2009)')
if legend:
plt.legend(loc=2)
if (filename):
logger.info("Saving %s", filename)
plt.savefig(filename)
def sbprofile(sim, band='v', diskheight='3 kpc', rmax='20 kpc', binning='equaln',
center=True, clear=True, filename=None, axes=False, fit_exp=False,
print_ylabel=True, fit_sersic=False, **kwargs):
'''
surface brightness profile
**Usage:**
>>> import pynbody.plot as pp
>>> h = s.halos()
>>> pp.sbprofile(h[1],exp_fit=3,linestyle='dashed',color='k')
**Options:**
*band* ('v'): which Johnson band to use. available filters: U, B,
V, R, I, J, H, K
*fit_exp*(False): Fits straight exponential line outside radius specified.
*fit_sersic*(False): Fits Sersic profile outside radius specified.
*diskheight('3 kpc')*
*rmax('20 kpc')*: Size of disk to be profiled
*binning('equaln')*: How show bin sizes be determined? based on
pynbody.analysis.profile
*center(True)*: Automatically align face on and center?
*axes(False)*: In which axes (subplot) should it be plotted?
*filename* (None): name of file to which to save output
**needs a description of all keywords**
By default, sbprof will use the formation mass of the star.
In tipsy, this will be taken from the starlog file.
'''
if center:
logger.info("Centering...")
angmom.faceon(sim)
logger.info("Selecting disk stars")
diskstars = sim.star[filt.Disc(rmax, diskheight)]
logger.info("Creating profile")
ps = profile.Profile(diskstars, type=binning)
logger.info("Plotting")
r = ps['rbins'].in_units('kpc')
if axes:
plt = axes
else:
import matplotlib.pyplot as plt
if clear:
plt.clf()
plt.plot(r, ps['sb,' + band], linewidth=2, **kwargs)
if axes:
plt.set_ylim(max(ps['sb,' + band]), min(ps['sb,' + band]))
else:
plt.ylim(max(ps['sb,' + band]), min(ps['sb,' + band]))
if fit_exp:
exp_inds = np.where(r.in_units('kpc') > fit_exp)
expfit = np.polyfit(np.array(r[exp_inds]),
np.array(ps['sb,' + band][exp_inds]), 1)
# 1.0857 is how many magnitudes a 1/e decrease is
print(("h: ", 1.0857 / expfit[0], " u_0:", expfit[1]))
fit = np.poly1d(expfit)
if 'label' in kwargs:
del kwargs['label']
if 'linestyle' in kwargs:
del kwargs['linestyle']
plt.plot(r, fit(r), linestyle='dashed', **kwargs)
if fit_sersic:
sersic_inds = np.where(r.in_units('kpc') < fit_sersic)
sersicfit = np.polyfit(np.log10(np.array(r[sersic_inds])),
np.array(ps['sb,' + band][sersic_inds]), 1)
fit = np.poly1d(sersicfit)
print(("n: ", sersicfit[0], " other: ", sersicfit[1]))
if 'label' in kwargs:
del kwargs['label']
if 'linestyle' in kwargs:
del kwargs['linestyle']
plt.plot(r, fit(r), linestyle='dashed', **kwargs)
#import pdb; pdb.set_trace()
if axes:
if print_ylabel:
plt.set_ylabel(band + '-band Surface brightness [mag as$^{-2}$]')
else:
plt.xlabel('R [kpc]')
plt.ylabel(band + '-band Surface brightness [mag as$^{-2}$]')
if filename:
logger.info("Saving %s", filename)
plt.savefig(filename)
def f(x, alpha, delta, g):
return -np.log10(10.0 ** (x * alpha) + 1.0) + delta * (np.log10(1 + np.exp(x))) ** g / (1 + np.exp(10 ** -x))
def behroozi(xmasses, z, alpha=-1.412, Kravtsov=False):
'''Based on Behroozi+ (2013) return what stellar mass corresponds to the
halo mass passed in.
**Usage**
>>> from pynbody.plot.stars import moster
>>> xmasses = np.logspace(np.log10(min(totmasshalos)),1+np.log10(max(totmasshalos)),20)
>>> ystarmasses, errors = moster(xmasses,halo_catalog._halos[1].properties['z'])
>>> plt.fill_between(xmasses,np.array(ystarmasses)/np.array(errors),
y2=np.array(ystarmasses)*np.array(errors),
facecolor='#BBBBBB',color='#BBBBBB')
'''
loghm = np.log10(xmasses)
# from Behroozi et al (2013)
if Kravtsov: EPS = -1.642
else: EPS = -1.777
EPSpe = 0.133
EPSme = 0.146
EPSanu = -0.006
EPSanupe = 0.113
EPSanume = 0.361
EPSznu = 0
EPSznupe = 0.003
EPSznume = 0.104
EPSa = 0.119
EPSape = 0.061
EPSame = -0.012
M1 = 11.514
M1pe = 0.053
M1me = 0.009
M1a = -1.793
M1ape = 0.315
M1ame = 0.330
M1z = -0.251
M1zpe = 0.012
M1zme = 0.125
if Kravtsov: alpha=-1.779
AL = alpha
ALpe = 0.02
ALme = 0.105
ALa = 0.731
ALape = 0.344
ALame = 0.296
if Kravtsov: DEL=4.394
else: DEL = 3.508
DELpe = 0.087
DELme = 0.369
DELa = 2.608
DELape = 2.446
DELame = 1.261
DELz = -0.043
DELzpe = 0.958
DELzme = 0.071
if Kravtsov: G=0.547
else: G = 0.316
Gpe = 0.076
Gme = 0.012
Ga = 1.319
Gape = 0.584
Game = 0.505
Gz = 0.279
Gzpe = 0.256
Gzme = 0.081
a = 1.0 / (z + 1.0)
nu = np.exp(-4 * a ** 2)
logm1 = M1 + nu * (M1a * (a - 1.0) + M1z * z)
logeps = EPS + nu * (EPSanu * (a - 1.0) + EPSznu * z) - EPSa * (a - 1.0)
analpha = AL + nu * ALa * (a - 1.0)
delta = DEL + nu * DELa * (a - 1.0)
g = G + nu * (Ga * (a - 1.0) + z * Gz)
x = loghm - logm1
f0 = -np.log10(2.0) + delta * np.log10(2.0) ** g / (1.0 + np.exp(1))
smp = logm1 + logeps + f(x, analpha, delta, g) - f0
if isinstance(smp, np.ndarray):
scatter = np.zeros(len(smp))
scatter = 0.218 - 0.023 * (a - 1.0)
return 10 ** smp, 10 ** scatter
def moster(xmasses, z):
'''Based on Moster+ (2013) return what stellar mass corresponds to the
halo mass passed in.
**Usage**
>>> from pynbody.plot.stars import moster
>>> xmasses = np.logspace(np.log10(min(totmasshalos)),1+np.log10(max(totmasshalos)),20)
>>> ystarmasses, errors = moster(xmasses,halo_catalog._halos[1].properties['z'])
>>> plt.fill_between(xmasses,np.array(ystarmasses)/np.array(errors),
y2=np.array(ystarmasses)*np.array(errors),
facecolor='#BBBBBB',color='#BBBBBB')
'''
hmp = np.log10(xmasses)
# from Moster et al (2013)
M10a = 11.590470
M11a = 1.194913
R10a = 0.035113
R11a = -0.024729
B10a = 1.376177
B11a = -0.825820
G10a = 0.608170
G11a = 0.329275
M10e = 0.236067
M11e = 0.353477
R10e = 0.00577173
R11e = 0.00693815
B10e = 0.153
B11e = 0.225
G10e = 0.059
G11e = 0.173
a = 1.0 / (z + 1.0)
m1 = M10a + M11a * (1.0 - a)
r = R10a + R11a * (1.0 - a)
b = B10a + B11a * (1.0 - a)
g = G10a + G11a * (1.0 - a)
smp = hmp + np.log10(2.0 * r) - np.log10((10.0 ** (hmp - m1)) ** (-b) + (10.0 ** (hmp - m1)) **
(g))
eta = np.exp(np.log(10.) * (hmp - m1))
alpha = eta ** (-b) + eta ** g
dmdm10 = (g * eta ** g + b * eta ** (-b)) / alpha
dmdm11 = (g * eta ** g + b * eta ** (-b)) / alpha * (1.0 - a)
dmdr10 = np.log10(np.exp(1.0)) / r
dmdr11 = np.log10(np.exp(1.0)) / r * (1.0 - a)
dmdb10 = np.log10(np.exp(1.0)) / alpha * np.log(eta) * eta ** (-b)
dmdb11 = np.log10(np.exp(1.0)) / alpha * \
np.log(eta) * eta ** (-b) * (1.0 - a)
dmdg10 = -np.log10(np.exp(1.0)) / alpha * np.log(eta) * eta ** g
dmdg11 = -np.log10(np.exp(1.0)) / alpha * \
np.log(eta) * eta ** g * (1.0 - a)
sigma = np.sqrt(dmdm10 * dmdm10 * M10e * M10e + dmdm11 * dmdm11 * M11e * M11e + dmdr10 * dmdr10 * R10e * R10e + dmdr11 * dmdr11 * R11e * R11e + dmdb10 * dmdb10 * B10e * B10e + dmdb11 * dmdb11 * B11e
* B11e + dmdg10 * dmdg10 * G10e * G10e + dmdg11 * dmdg11 * G11e * G11e)
return 10 ** smp, 10 ** sigma
def hudson(xmasses, z):
''' Based on Hudson+ (2014), returns what stellar mass corresponds to the
halo mass passed in. This is the only SMHMR function that is not based
on abundance matching, but instead uses date from CFHTLenS galaxy lensing data.
>>> from pynbody.plot.stars import hudson
>>> xmasses = np.logspace(np.log10(min(totmasshalos)),1+np.log10(max(totmasshalos)),20)
>>> ystarmasses, errors = hudson(xmasses,halo_catalog._halos[1].properties['z'])
>>> plt.fill_between(xmasses,np.array(ystarmasses)/np.array(errors),
y2=np.array(ystarmasses)*np.array(errors),
facecolor='#BBBBBB',color='#BBBBBB')
'''
f05 = 0.0414
f05_err = 0.0024
fz = 0.029
fz_err = 0.009
log10M05 = 12.07
log10M05_err = 0.07
Mz = 0.09
Mz_err = 0.24
beta = 0.69
beta_err = 0.09
gamma = 0.8
f1 = f05 + (z-0.5)*fz
f1_err = np.sqrt(f05_err**2+(z-0.5)**2*fz_err**2)
M1_exp = (log10M05+(z-0.5)*Mz)
M1_exp_err = np.sqrt(log10M05_err**2+(z-0.5)**2*Mz_err**2)
M1 = np.power(10, M1_exp)
M1_err = np.abs(M1*np.log(10)*M1_exp_err)
beta_term = np.power(xmasses/M1, -beta)
beta_term_err = np.sqrt((beta/M1*M1_err/M1**2)**2 + (np.log(M1)*beta_err)**2)
gamma_term = np.power(xmasses/M1, gamma)
gamma_term_err = np.abs(gamma_term*gamma*M1_err/M1**3)
fstar_denom = beta_term + gamma_term
fstar_denom_err = np.sqrt(beta_term*2+ gamma_term**2)
fstar = 2.0*f1/fstar_denom
fstar_err = np.sqrt((f1_err/f1)**2 + (fstar_denom_err/fstar_denom)**2)
return fstar*xmasses, 2.0/fstar_err
def subfindguo(halo_catalog, clear=False, compare=True, baryfrac=False,
filename=False, **kwargs):
'''Stellar Mass vs. Halo Mass
Takes a halo catalogue and plots the member stellar masses as a
function of halo mass.
Usage:
>>> import pynbody.plot as pp
>>> h = s.halos()
>>> pp.guo(h,marker='+',markerfacecolor='k')
**Options:**
*compare* (True): Should comparison line be plotted?
If compare = 'guo', Guo+ (2010) plotted instead of Behroozi+ (2013)
*baryfrac* (False): Should line be drawn for cosmic baryon fraction?
*filename* (None): name of file to which to save output
'''
# if 'marker' not in kwargs :
# kwargs['marker']='o'
starmasshalos = []
totmasshalos = []
f_b = halo_catalog[0].properties['omegaB0']/halo_catalog[0].properties['omegaM0']
for halo in halo_catalog:
for subhalo in halo.sub:
subhalo.properties['MassType'].convert_units('Msol')
halostarmass = subhalo.properties['MassType'][4]
if halostarmass:
starmasshalos.append(halostarmass)
totmasshalos.append(np.sum(subhalo.properties['MassType']))
if clear:
plt.clf()
plt.loglog(totmasshalos, starmasshalos, 'o', **kwargs)
plt.xlabel('Total Halo Mass')
plt.ylabel('Halo Stellar Mass')
if compare:
xmasses = np.logspace(
np.log10(min(totmasshalos)), 1 + np.log10(max(totmasshalos)), 20)
if compare == 'guo':
# from Sawala et al (2011) + Guo et al (2009)
ystarmasses = xmasses*0.129*((xmasses/2.5e11)**-0.926 + (xmasses/2.5e11)**0.261)**-2.44
else :
ystarmasses, errors = behroozi(xmasses,halo_catalog._halos[1].properties['z'])
plt.fill_between(xmasses,np.array(ystarmasses)/np.array(errors),
y2=np.array(ystarmasses)*np.array(errors),
facecolor='#BBBBBB',color='#BBBBBB')
plt.loglog(xmasses,ystarmasses,label='Behroozi et al (2013)')
if baryfrac :
xmasses = np.logspace(np.log10(min(totmasshalos)),1+np.log10(max(totmasshalos)),20)
ystarmasses = xmasses*f_b
plt.loglog(xmasses,ystarmasses,linestyle='dotted',label='f_b = '+'%.2f' % f_b)
ystarmasses = xmasses*0.1*f_b
plt.loglog(xmasses,ystarmasses,linestyle='dashed',label='0.1 f_b = '+'%.2f' % (0.1*f_b))
plt.axis([0.8*min(totmasshalos),1.2*max(totmasshalos),
0.8*min(starmasshalos),1.2*max(starmasshalos)])
if (filename):
logger.info("Saving %s", filename)
plt.savefig(filename)
def guo(halo_catalog, clear=False, compare=True, baryfrac=False,
filename=False, **kwargs):
'''Stellar Mass vs. Halo Mass
Takes a halo catalogue and plots the member stellar masses as a
function of halo mass.
Usage:
>>> import pynbody.plot as pp
>>> h = s.halos()
>>> pp.guo(h,marker='+',markerfacecolor='k')
**Options:**
*compare* (True): Should comparison line be plotted?
If compare = 'guo', Guo+ (2010) plotted instead of Behroozi+ (2013)
*baryfrac* (False): Should line be drawn for cosmic baryon fraction?
*filename* (None): name of file to which to save output
'''
# if 'marker' not in kwargs :
# kwargs['marker']='o'
starmasshalos = []
totmasshalos = []
halo_catalog._halos[1]['mass'].convert_units('Msol')
for i in np.arange(len(halo_catalog._halos)) + 1:
halo = halo_catalog[i]
halostarmass = np.sum(halo.star['mass'])
if halostarmass:
starmasshalos.append(halostarmass)
totmasshalos.append(np.sum(halo['mass']))
if clear:
plt.clf()
plt.loglog(totmasshalos, starmasshalos, 'o', **kwargs)
plt.xlabel('Total Halo Mass')
plt.ylabel('Halo Stellar Mass')
if compare:
xmasses = np.logspace(
np.log10(min(totmasshalos)), 1 + np.log10(max(totmasshalos)), 20)
if compare == 'guo':
# from Sawala et al (2011) + Guo et al (2009)
ystarmasses = xmasses*0.129*((xmasses/2.5e11)**-0.926 + (xmasses/2.5e11)**0.261)**-2.44
else :
ystarmasses, errors = behroozi(xmasses,halo_catalog._halos[1].properties['z'])
plt.fill_between(xmasses,np.array(ystarmasses)/np.array(errors),
y2=np.array(ystarmasses)*np.array(errors),
facecolor='#BBBBBB',color='#BBBBBB')
plt.loglog(xmasses,ystarmasses,label='Behroozi et al (2013)')
if baryfrac :
xmasses = np.logspace(np.log10(min(totmasshalos)),1+np.log10(max(totmasshalos)),20)
ystarmasses = xmasses*0.04/0.24
plt.loglog(xmasses,ystarmasses,linestyle='dotted',label='f_b = 0.16')
plt.axis([0.8*min(totmasshalos),1.2*max(totmasshalos),
0.8*min(starmasshalos),1.2*max(starmasshalos)])
if (filename):
logger.info("Saving %s", filename)
plt.savefig(filename)
| 41,698 | 29.661029 | 199 | py |
sememes_codriven_text_matching | sememes_codriven_text_matching-main/util_for_bert.py | import jieba
import torch
import pandas as pd
from torch.utils.data import DataLoader,Dataset
from gensim.models import word2vec
import json
import re
from how_net import is_sememe
import args
from tqdm import tqdm
from args import *
def load_word_vocab():
path ='data/chinese/bq_corpus/word_vocab.txt'
vocab = [line.strip() for line in open(path, encoding='utf-8').readlines()]
word2idx = {word: index for index, word in enumerate(vocab)}
idx2word = {index: word for index, word in enumerate(vocab)}
return word2idx, idx2word
def word2index(sentence1,sentence2):
word2idx,idx2word = load_word_vocab()
s1_ = []
s2_ = []
mat = []
for s1,s2 in tqdm(zip(sentence1,sentence2)):
seg1 = jieba.lcut(s1)
seg2 = jieba.lcut(s2)
s1_.append(seg1)
s2_.append(seg2)
res =[[0] * max_len for _ in range(max_len)]
for i in range(max_len):
for j in range(max_len):
if i<len(seg1) and j<len(seg2) and is_sememe(seg1[i], seg2[j]) :
res[i][j] = 1
mat.append(res)
return s1_,s2_,mat
def get_txt(dir):
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]")
ff = open(dir, 'r', encoding='utf8')
sent1, sent2, label = [], [], []
for line in ff.readlines():
# i = json.load(line)
i = line.rstrip().split('\t')
sent1.append(cop.sub('', i[0]))
sent2.append(cop.sub('', i[1]))
label.append(int(cop.sub('',i[2])))
return sent1,sent2,label
class LoadData(Dataset):
def __init__(self,dir,max_len=args.max_len):
s1,s2,label = get_txt(dir)
self.sent1 = s1[:10]
self.sent2 = s2[:10]
self.label = label[:10]
self.max_len = max_len
self.s1,self.s2,self.mat = word2index(self.sent1,self.sent2)
def __getitem__(self, i):
return self.s1[i],self.s2[i] ,self.label[i],self.mat[i]
def __len__(self):
return len(self.label)
def collate(batch):
s1 = [item[0] for item in batch]
s2 = [item[1] for item in batch]
label = torch.LongTensor([item[2] for item in batch])
mat = torch.LongTensor([item[3] for item in batch])
return s1,s2,label,mat
# if __name__ == '__main__':
# import pickle
# # sent1, sent2, label = get_txt('data/chinese/AFQMC/train.txt')
# # s1_index, s1_mask, s2_index, s2_mask, mat = word2index(sent1, sent2)
# # data = [s1_index, s1_mask, s2_index, s2_mask, mat, label]
# # f = open('AFQMC_train.pickle','wb')
# # pickle.dump(data,f)
#
# sent1, sent2, label = get_txt('data/chinese/AFQMC/dev.txt')
# s1_index, s1_mask, s2_index, s2_mask, mat = word2index(sent1, sent2)
# data = [s1_index, s1_mask, s2_index, s2_mask, mat, label]
# f = open('AFQMC_test.pickle','wb')
# pickle.dump(data,f) | 2,812 | 31.333333 | 80 | py |
sememes_codriven_text_matching | sememes_codriven_text_matching-main/hownet_bert.py | import torch
import math
import torch.nn as nn
import torch.optim as optim
import args
from numpy import *
from util_for_bert import *
from tqdm import tqdm_notebook, tqdm
from torch.nn import functional as F
from sklearn import metrics
from torch.optim.lr_scheduler import ExponentialLR, MultiStepLR
import numpy as np
import torch
import args
from transformers import BertTokenizer, BertConfig, BertForMaskedLM, BertForNextSentencePrediction
from transformers import BertModel
from torch.nn import functional as F
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.nn import functional as F
import args
from transformers import BertTokenizer, BertConfig, BertForMaskedLM, BertForNextSentencePrediction
from transformers import BertModel
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def length_to_mask(lengths):
a = torch.zeros(lengths.shape, dtype=torch.int64)
mask = a == lengths
return mask
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=args.max_len):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = torch.transpose(x, 0, 1)
x = x + self.pe[:x.size(0), :]
return x
class BERT(nn.Module):
def __init__(self):
super(BERT, self).__init__()
self.model_name = "hfl/chinese-bert-wwm-ext"
self.MODEL_PATH = './chinese-bert-wwm-ext/'
self.tokenizer = BertTokenizer.from_pretrained(self.model_name)
self.model_config = BertConfig.from_pretrained(self.model_name)
self.model_config.output_hidden_states = True
self.model_config.output_attentions = True
self.bert_model = BertModel.from_pretrained(self.MODEL_PATH, config=self.model_config)
def tokenization(self,word):
# sent_code = self.tokenizer.encode_plus([sentence])
# sent_code = self.tokenizer.tokenize(sentence)
ids = [self.tokenizer.convert_tokens_to_ids(word)]
# input_ids = sent_code['input_ids']
attention_mask = [1] * len(ids)
padding = [0] * (args.max_len - len(ids))
if len(ids) > args.max_len:
input_ids = ids[:args.max_len]
attention_mask = attention_mask[:args.max_len]
else:
ids += padding
attention_mask += padding
return ids,attention_mask
def bert_word_embed(self,ids,mask):
if len(ids) == 1:
tokens_tensor = torch.tensor([ids])
mask_tensors = torch.tensor([mask])
self.bert_model.eval()
with torch.no_grad():
outputs = self.bert_model(tokens_tensor, attention_mask=mask_tensors)
last_hidden_state = outputs['last_hidden_state']
return last_hidden_state.squeeze()
else:
tokens_tensor = torch.tensor([ids])
mask_tensors = torch.tensor([mask])
self.bert_model.eval()
with torch.no_grad():
outputs = self.bert_model(tokens_tensor, attention_mask=mask_tensors)
pooler_output = outputs['pooler_output']
return pooler_output.squeeze()
def word_embed(self, sentences):
sentences_embed = []
mask_list = []
for sentence in sentences:
sentence_embed = []
for word in list(sentence):
ids,masks = self.tokenization(word)
word_embed = self.bert_word_embed(ids,masks)
sentence_embed.append(word_embed)
last_hidden_stat = torch.stack(sentence_embed, 0).squeeze()
if last_hidden_stat.shape[0]>=args.max_len:
last_hidden_stat = last_hidden_stat[:args.max_len,:]
mask_list.append([True] * args.max_len)
else:
mask_list.append([True] * last_hidden_stat.shape[0] + [False] * (max_len - last_hidden_stat.shape[0]))
pad = torch.nn.ZeroPad2d(padding=(0,0,0,args.max_len-last_hidden_stat.shape[0]))
sentences_embed.append(pad(last_hidden_stat).unsqueeze(0))
return torch.cat(sentences_embed,0),torch.LongTensor(mask_list)
class M1(nn.Module):
def __init__(self, hidden_dim, num_class):
super(M1, self).__init__()
self.embedding_dim = 768
self.hidden_dim = hidden_dim
self.num_class = num_class
self.bert_embeds = BERT()
self.bn_embeds = nn.BatchNorm1d(self.embedding_dim)
self.position_embedding = PositionalEncoding(self.embedding_dim)
encoder_layer1 = nn.TransformerEncoderLayer(self.embedding_dim, 4, dim_feedforward=512, dropout=0.1,
activation='relu')
self.transformer = nn.TransformerEncoder(encoder_layer1, 8)
self.alpha = nn.Parameter(torch.tensor([1], dtype=torch.float), requires_grad=True)
self.att_fc = nn.Linear(4 * max_len, max_len)
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim, batch_first=True, bidirectional=True)
self.lr = nn.Linear(200, 100)
self.fc = nn.Sequential(
nn.Linear(8292, 1000),
nn.Linear(1000, self.num_class)
)
def apply_multiple(self, x):
# input: batch_size * seq_len * (2 * hidden_size)
p1 = F.avg_pool1d(x.transpose(1, 2), x.size(1)).squeeze(-1)
p2 = F.max_pool1d(x.transpose(1, 2), x.size(1)).squeeze(-1)
# output: batch_size * (4 * hidden_size)
return torch.cat([p1, p2], 1)
def soft_attention_align(self, x1, x2, mat):
a1 = torch.matmul(x1, x2.transpose(1, 2))
b1 = self.alpha * mat
attention = a1 + b1
weight1 = F.softmax(attention, dim=-1)
x1_align = torch.matmul(weight1, x2)
weight2 = F.softmax(attention.transpose(1, 2), dim=-1)
x2_align = torch.matmul(weight2, x1)
return x1_align, x2_align
def forward(self, sent1, sent2, label, mat, is_train=True):
# embeds: batch_size * seq_len => batch_size * seq_len * dim
# x1 = self.bn_embeds(self.embeds(sent1).transpose(1, 2).contiguous()).transpose(1, 2)
# x2 = self.bn_embeds(self.embeds(sent2).transpose(1, 2).contiguous()).transpose(1, 2)
x1,mask1 = self.bert_embeds.word_embed(sent1)
x2,mask2 = self.bert_embeds.word_embed(sent2)
x1_ = self.position_embedding(x1)
tf_1 = self.transformer(x1_, src_key_padding_mask=mask1)
x1_tf = tf_1.transpose(0, 1)
x2_ = self.position_embedding(x2)
tf_2 = self.transformer(x2_, src_key_padding_mask=mask2)
x2_tf = tf_2.transpose(0, 1)
x_1, x_2 = self.soft_attention_align(x1_tf, x2_tf, mat)
row = torch.sum(mat, dim=1)
line = torch.sum(mat, dim=2)
o1, _ = self.lstm(x_1)
o2, _ = self.lstm(x_2)
row_ = row.unsqueeze(-1)
line_ = line.unsqueeze(-1)
output1 = torch.cat([x1_tf, o1], dim=2)
output2 = torch.cat([x2_tf, o2], dim=2)
q1_rep = self.apply_multiple(output1)
q2_rep = self.apply_multiple(output2)
m = self.lr(torch.cat([line, row], dim=-1).float())
x = torch.cat([q1_rep, q2_rep, q1_rep - q2_rep, q1_rep * q2_rep, m], -1)
# batch_size * seq_len * dim => batch_size * seq_len * hidden_size
logits = self.fc(x)
out = torch.softmax(logits, 1)
if is_train:
loss1 = nn.CrossEntropyLoss()
loss_1 = loss1(out, label)
# loss2 = cosent(lam=20)
# out = out[:,1]
# loss_2 = loss2(out,label)
out = torch.argmax(out, 1)
return loss_1, out, self.alpha
else:
out = torch.argmax(out, 1)
return out
if __name__ == '__main__':
Model = M1(hidden_dim=args.hidden_dim, num_class=args.class_size)
train_dataset = LoadData('data/chinese/bq_corpus/train.txt')
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate, shuffle=True,
drop_last=False)
print('train data has been loaded')
test_dataset = LoadData('data/chinese/bq_corpus/test.txt')
test_loader = DataLoader(test_dataset, batch_size=50, collate_fn=collate, shuffle=True, drop_last=True)
print('test data has been loaded')
optimizer = optim.Adam([{"params": Model.parameters()}],
lr=1e-2)
scheduler = MultiStepLR(optimizer, milestones=[20, 50, 80, 100, 150], gamma=0.8)
total_params = sum(p.numel() for p in Model.parameters())
LOSS = nn.CrossEntropyLoss()
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in Model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
print('start training ....')
best_acc = 0
best_epoch = 0
for epoch in range(args.epoch):
process_bar = tqdm(train_loader, leave=False)
loss = 0
train_acc = 0
train_res = []
a = 0
for sent1, sent2, label, mat in process_bar:
loss, output, alpha = Model(sent1, sent2, label, mat)
optimizer.zero_grad()
a = alpha.item()
label = label
output = output.long()
correct_prediction = torch.eq(output, label)
train_accuracy = correct_prediction.float()
train_acc = torch.mean(train_accuracy, dim=0).item()
train_res.append(train_acc)
loss.backward()
optimizer.step()
scheduler.step()
print("alpha=%.6f" % a)
print('epoch={},loss={},train_acc = {}'.format(epoch, loss.item(), mean(train_res)))
res = []
f1s = []
for sent1, s1_mask, sent2, s2_mask, mat, label in test_loader:
output = Model(sent1, s1_mask, sent2, s2_mask, mat, label, is_train=False)
label = label
output = output.long()
correct_prediction = torch.eq(output, label)
test_accuracy = correct_prediction.float()
test_acc = torch.mean(test_accuracy, dim=0).item()
res.append(test_acc)
f1 = metrics.f1_score(label.cpu(), output.cpu())
f1s.append(f1)
if mean(res) > best_acc and epoch > 20:
best_acc = mean(res)
best_epoch = epoch + 1
# torch.save(Model.state_dict(), './data/models/bq/2-{}-{}.pth'.format(best_epoch, best_acc))
print('epoch =', epoch + 1, 'test_acc=', mean(res), 'f1=', mean(f1s), ' best acc epoch:', best_epoch,
' best acc:', best_acc)
if epoch % 20 == 0:
torch.save(Model.state_dict(), './data/models/bq/8-{}-{}.pth'.format(epoch, mean(res)))
| 11,154 | 38.556738 | 118 | py |
sememes_codriven_text_matching | sememes_codriven_text_matching-main/Pre-processing.py | import jieba
import torch
import pandas as pd
from torch.utils.data import DataLoader,Dataset
from gensim.models import word2vec
import json
import re
from how_net import is_sememe
import args
from tqdm import tqdm
from args import *
def load_word_vocab():
path ='data/chinese/AFQMC/word_vocab.txt'
vocab = [line.strip() for line in open(path, encoding='utf-8').readlines()]
word2idx = {word: index for index, word in enumerate(vocab)}
idx2word = {index: word for index, word in enumerate(vocab)}
return word2idx, idx2word
def word2index(sentence1,sentence2):
word2idx,idx2word = load_word_vocab()
s1_index_all,s2_index_all = [],[]
s1_mask_all,s2_mask_all = [],[]
mat = []
for s1,s2 in tqdm(zip(sentence1,sentence2)):
res =[[0] * max_len for _ in range(max_len)]
s1_ = jieba.lcut(s1)
s2_ = jieba.lcut(s2)
s1_index, s2_index = [], []
for s1_word in s1_:
if len(s1_word)>0 and s1_word in word2idx.keys():
s1_index.append(word2idx[s1_word])
else:
s1_index.append(1)
for s2_word in s2_:
if len(s2_word)>0 and s2_word in word2idx.keys():
s2_index.append(word2idx[s2_word])
else:
s2_index.append(1)
if len(s1_index) >= args.max_len:
s1_index = s1_index[:args.max_len]
s1_mask = [True] * args.max_len
else:
s1_mask = [True] * len(s1_index) + [False] * (max_len - len(s1_index))
s1_index = s1_index + [0] * (max_len-len(s1_index))
if len(s2_index) >= args.max_len:
s2_index = s2_index[:max_len]
s2_mask = [True] * args.max_len
else:
s2_mask = [True] * len(s2_index) + [False] * (max_len - len(s2_index))
s2_index = s2_index + [0] * (max_len-len(s2_index))
for i in range(max_len):
for j in range(max_len):
if is_sememe(idx2word[s1_index[i]], idx2word[s2_index[j]]) and s1_index != 0 and s2_index != 0 and s1_index != 0 and s2_index != 0:
res[i][j] = 1
s1_index_all.append(s1_index)
s1_mask_all.append(s1_mask)
s2_index_all.append(s2_index)
s2_mask_all.append(s2_mask)
mat.append(res)
return s1_index_all,s1_mask_all,s2_index_all,s2_mask_all,mat
def get_txt(dir):
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]")
ff = open(dir, 'r', encoding='utf8')
sent1, sent2, label = [], [], []
for line in ff.readlines():
# i = json.load(line)
i = line.rstrip().split('\t')
sent1.append(cop.sub('', i[0]))
sent2.append(cop.sub('', i[1]))
label.append(int(cop.sub('',i[2])))
return sent1,sent2,label
# class LoadTestData(Dataset):
# def __init__(self,dir,max_len=args.max_len):
# s1,s2,label = get_txt(dir)
# self.sent1 = s1[:]
# self.sent2 = s2[:]
# self.tag = label[:]
# self.max_len = max_len
# self.s1_index,self.s2_index = word2index(self.sent1,self.sent2)
#
#
# def __getitem__(self, i):
# return self.s1_index[i], self.s2_index[i], self.tag[i]
# def __len__(self):
# return len(self.tag)
#
# def collate_t(batch):
# s1 = torch.LongTensor([item[0] for item in batch])
# s2 = torch.LongTensor([item[1] for item in batch])
# label = torch.LongTensor([item[2] for item in batch])
#
# return s1,s2,label
if __name__ == '__main__':
import pickle
# sent1, sent2, label = get_txt('data/chinese/AFQMC/train.txt')
# s1_index, s1_mask, s2_index, s2_mask, mat = word2index(sent1, sent2)
# data = [s1_index, s1_mask, s2_index, s2_mask, mat, label]
# f = open('AFQMC_train.pickle','wb')
# pickle.dump(data,f)
sent1, sent2, label = get_txt('data/chinese/BQ/train.txt')
s1_index, s1_mask, s2_index, s2_mask, mat = word2index(sent1, sent2)
data = [s1_index, s1_mask, s2_index, s2_mask, mat, label]
f = open('BQ_train.pickle','wb')
pickle.dump(data,f) | 4,059 | 33.40678 | 147 | py |
sememes_codriven_text_matching | sememes_codriven_text_matching-main/util_for_BQ.py | import jieba
import torch
import pandas as pd
from torch.utils.data import DataLoader,Dataset
from gensim.models import word2vec
import json
import re
from how_net import is_sememe
import args
from tqdm import tqdm
from args import *
import pickle
def load_word_vocab():
path ='data/chinese/bq_corpus/word_vocab.txt'
vocab = [line.strip() for line in open(path, encoding='utf-8').readlines()]
word2idx = {word: index for index, word in enumerate(vocab)}
idx2word = {index: word for index, word in enumerate(vocab)}
return word2idx, idx2word
def word2index(sentence1,sentence2):
word2idx,idx2word = load_word_vocab()
s1_index_all,s2_index_all = [],[]
s1_mask_all,s2_mask_all = [],[]
mat = []
for s1,s2 in tqdm(zip(sentence1,sentence2)):
res =[[0] * max_len for _ in range(max_len)]
s1_ = jieba.lcut(s1)
s2_ = jieba.lcut(s2)
s1_index, s2_index = [], []
for s1_word in s1_:
if len(s1_word)>0 and s1_word in word2idx.keys():
s1_index.append(word2idx[s1_word])
else:
s1_index.append(1)
for s2_word in s2_:
if len(s2_word)>0 and s2_word in word2idx.keys():
s2_index.append(word2idx[s2_word])
else:
s2_index.append(1)
if len(s1_index) >= args.max_len:
s1_index = s1_index[:args.max_len]
s1_mask = [True] * args.max_len
else:
s1_mask = [True] * len(s1_index) + [False] * (max_len - len(s1_index))
s1_index = s1_index + [0] * (max_len-len(s1_index))
if len(s2_index) >= args.max_len:
s2_index = s2_index[:max_len]
s2_mask = [True] * args.max_len
else:
s2_mask = [True] * len(s2_index) + [False] * (max_len - len(s2_index))
s2_index = s2_index + [0] * (max_len-len(s2_index))
for i in range(max_len):
for j in range(max_len):
if is_sememe(idx2word[s1_index[i]], idx2word[s2_index[j]]) and s1_index != 0 and s2_index != 0 :
res[i][j] = 1
s1_index_all.append(s1_index)
s1_mask_all.append(s1_mask)
s2_index_all.append(s2_index)
s2_mask_all.append(s2_mask)
mat.append(res)
return s1_index_all,s1_mask_all,s2_index_all,s2_mask_all,mat
def get_txt(dir):
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]")
ff = open(dir, 'r', encoding='utf8')
sent1, sent2, label = [], [], []
for line in ff.readlines():
# i = json.load(line)
i = line.rstrip().split('\t')
sent1.append(cop.sub('', i[0]))
sent2.append(cop.sub('', i[1]))
label.append(int(cop.sub('',i[2])))
return sent1,sent2,label
class LoadData(Dataset):
def __init__(self,dir,max_len=args.max_len):
# s1,s2,label = get_txt(dir)
# self.sent1 = s1[:1000]
# self.sent2 = s2[:1000]
# self.tag = label[:1000]
# self.max_len = max_len
f = open(dir,'rb')
self.s1_index,self.s1_mask,self.s2_index,self.s2_mask,self.mat,self.tag = pickle.load(f)
def __getitem__(self, i):
return self.s1_index[i],self.s1_mask[i] ,self.s2_index[i], self.s2_mask[i],self.mat[i],self.tag[i]
def __len__(self):
return len(self.tag)
def collate(batch):
s1 = torch.LongTensor([item[0] for item in batch])
s1_mask = torch.BoolTensor([item[1] for item in batch])
s2 = torch.LongTensor([item[2] for item in batch])
s2_mask = torch.BoolTensor([item[3] for item in batch])
mat = torch.LongTensor([item[4] for item in batch])
label = torch.LongTensor([item[5] for item in batch])
return s1,s1_mask,s2,s2_mask,mat,label
# class LoadTestData(Dataset):
# def __init__(self,dir,max_len=args.max_len):
# s1,s2,label = get_txt(dir)
# self.sent1 = s1[:]
# self.sent2 = s2[:]
# self.tag = label[:]
# self.max_len = max_len
# self.s1_index,self.s2_index = word2index(self.sent1,self.sent2)
#
#
# def __getitem__(self, i):
# return self.s1_index[i], self.s2_index[i], self.tag[i]
# def __len__(self):
# return len(self.tag)
#
# def collate_t(batch):
# s1 = torch.LongTensor([item[0] for item in batch])
# s2 = torch.LongTensor([item[1] for item in batch])
# label = torch.LongTensor([item[2] for item in batch])
#
# return s1,s2,label
if __name__ == '__main__':
import pickle
sent1, sent2, label = get_txt('data/chinese/bq_corpus/train.txt')
s1_index, s1_mask, s2_index, s2_mask, mat = word2index(sent1, sent2)
data = [s1_index, s1_mask, s2_index, s2_mask, mat, label]
f = open('BQ_train.pickle','wb')
pickle.dump(data,f) | 4,742 | 33.369565 | 112 | py |
sememes_codriven_text_matching | sememes_codriven_text_matching-main/hownet.py | import torch
import math
import torch.nn as nn
import torch.optim as optim
import args
from numpy import *
from util_for_BQ import *
from tqdm import tqdm_notebook,tqdm
from torch.nn import functional as F
from sklearn import metrics
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def length_to_mask(lengths):
a = torch.zeros(lengths.shape,dtype=torch.int64)
mask = a == lengths
return mask
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len = args.max_len):
super(PositionalEncoding, self).__init__()
pe=torch.zeros(max_len,d_model)
position = torch.arange(0,max_len,dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0,d_model,2).float() * (-math.log(10000.0)/d_model))
pe[:,0::2] = torch.sin(position * div_term)
pe[:,1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0,1)
self.register_buffer('pe',pe)
def forward(self,x):
x = torch.transpose(x,0,1)
x = x + self.pe[:x.size(0),:]
return x
class M1(nn.Module):
def __init__(self,vocab_size, hidden_dim,num_class):
super(M1, self).__init__()
self.embedding_dim = args.embedding_dim
self.hidden_dim = hidden_dim
self.num_class = num_class
self.embeds = nn.Embedding(vocab_size,self.embedding_dim)
self.bn_embeds = nn.BatchNorm1d(self.embedding_dim)
# self.embeddings = nn.Embedding(vocab_size,embedding_dim)
self.position_embedding = PositionalEncoding(self.embedding_dim)
encoder_layer1 = nn.TransformerEncoderLayer(self.embedding_dim, 4,dim_feedforward=512, dropout=0.1,activation='relu')
self.transformer = nn.TransformerEncoder(encoder_layer1,8).to(device)
self.vc = nn.Linear(args.embedding_dim, 1, bias=False).to(device)
self.vd = nn.Linear(args.embedding_dim, 1, bias=False).to(device)
self.vm = nn.Linear(args.embedding_dim, 1, bias=False).to(device)
self.alpha = nn.Parameter(torch.FloatTensor([0.1])).to(device)
self.att_fc= nn.Linear(4*max_len,max_len)
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim, batch_first=True, bidirectional=True).to(device)
self.lr = nn.Linear(200,100)
self.fc = nn.Sequential(
nn.Linear(4548,1000),
nn.Linear(1000,self.num_class)
)
def apply_multiple(self, x):
# input: batch_size * seq_len * (2 * hidden_size)
p1 = F.avg_pool1d(x.transpose(1, 2), x.size(1)).squeeze(-1)
p2 = F.max_pool1d(x.transpose(1, 2), x.size(1)).squeeze(-1)
# output: batch_size * (4 * hidden_size)
return torch.cat([p1, p2], 1)
def soft_attention_align(self, x1, x2,mat):
attention = torch.matmul(x1, x2.transpose(1, 2)).to(device) + self.alpha * mat.to(device)
weight1 = F.softmax(attention, dim=-1)
x1_align = torch.matmul(weight1, x2).to(device)
weight2 = F.softmax(attention.transpose(1, 2), dim=-1)
x2_align = torch.matmul(weight2, x1).to(device)
return x1_align, x2_align
def forward(self,sent1,s1_mask,sent2,s2_mask,mat,label,is_train = True):
# embeds: batch_size * seq_len => batch_size * seq_len * dim
# x1 = self.bn_embeds(self.embeds(sent1).transpose(1, 2).contiguous()).transpose(1, 2)
# x2 = self.bn_embeds(self.embeds(sent2).transpose(1, 2).contiguous()).transpose(1, 2)
x1 = self.bn_embeds(self.embeds(sent1.to(device)).transpose(1, 2).contiguous()).transpose(1, 2).to(device)
x2 = self.bn_embeds(self.embeds(sent2.to(device)).transpose(1, 2).contiguous()).transpose(1, 2).to(device)
x1_ = self.position_embedding(x1)
tf_1 = self.transformer(x1_, src_key_padding_mask=s1_mask.to(device))
x1_tf = tf_1.transpose(0, 1)
x2_ = self.position_embedding(x2)
tf_2 = self.transformer(x2_, src_key_padding_mask=s2_mask.to(device))
x2_tf = tf_2.transpose(0, 1)
x_1,x_2 = self.soft_attention_align(x1_tf,x2_tf,mat)
row = torch.sum(mat,dim=1).to(device)
line = torch.sum(mat,dim=2).to(device)
o1, _ = self.lstm(x_1)
o2, _ = self.lstm(x_2)
row_ = row.unsqueeze(-1).to(device)
line_ = line.unsqueeze(-1).to(device)
output1 = torch.cat([x1_tf, o1], dim=2)
output2 = torch.cat([x2_tf, o2], dim=2)
q1_rep = self.apply_multiple(output1)
q2_rep = self.apply_multiple(output2)
m = self.lr(torch.cat([line,row],dim=-1).float())
x = torch.cat([q1_rep, q2_rep, q1_rep - q2_rep,q1_rep*q2_rep,m], -1)
# batch_size * seq_len * dim => batch_size * seq_len * hidden_size
logits = self.fc(x)
out = torch.softmax(logits,1).to(device)
if is_train:
loss1= nn.CrossEntropyLoss()
loss_1 = loss1(out,label.to(device))
# loss2 = cosent(lam=20)
# out = out[:,1]
# loss_2 = loss2(out,label)
out = torch.argmax(out, 1)
return loss_1,out
else:
out = torch.argmax(out,1)
return out
Model = M1(vocab_size=args.vocab_size,hidden_dim=args.hidden_dim,num_class=args.class_size).to(device)
train_dataset = LoadData('PAWSX_train.pickle')
train_loader = DataLoader(train_dataset,batch_size=args.batch_size,collate_fn=collate,shuffle=True,drop_last=False)
print('train data has been loaded')
test_dataset = LoadData('PAWSX_dev.pickle')
test_loader = DataLoader(test_dataset,batch_size=50,collate_fn=collate,shuffle=True,drop_last=True)
print('test data has been loaded')
optimizer = optim.Adam(Model.parameters(),lr=1e-5)
total_params = sum(p.numel() for p in Model.parameters())
LOSS = nn.CrossEntropyLoss()
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in Model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
print('start training ....')
best_acc = 0
best_epoch = 0
for epoch in range(args.epoch):
process_bar = tqdm(train_loader,leave = False)
loss = 0
train_acc = 0
train_res = []
for sent1,s1_mask,sent2,s2_mask,mat,label in process_bar:
loss,output = Model(sent1,s1_mask,sent2,s2_mask,mat,label)
optimizer.zero_grad()
label = label.to(device)
output = output.long().to(device)
correct_prediction = torch.eq(output, label).to(device)
train_accuracy = correct_prediction.float().to(device)
train_acc = torch.mean(train_accuracy, dim=0).item()
train_res.append(train_acc)
loss.backward()
optimizer.step()
print('epoch={},loss={},train_acc = {}'.format(epoch,loss.item(),mean(train_res)))
res = []
f1s = []
for sent1,s1_mask,sent2,s2_mask,mat,label in test_loader:
output = Model(sent1,s1_mask,sent2,s2_mask,mat,label,is_train = False)
label = label.to(device)
output = output.long().to(device)
correct_prediction = torch.eq(output, label).to(device)
test_accuracy = correct_prediction.float().to(device)
test_acc = torch.mean(test_accuracy, dim=0).item()
res.append(test_acc)
f1 = metrics.f1_score(label.cpu(),output.cpu())
f1s.append(f1)
if mean(res) > best_acc and epoch>20:
best_acc = mean(res)
best_epoch = epoch + 1
# torch.save(Model.state_dict(), './data/models/bq/2-{}-{}.pth'.format(best_epoch, best_acc))
print('epoch =', epoch + 1, 'test_acc=', mean(res),'f1=',mean(f1s), ' best acc epoch:', best_epoch, ' best acc:', best_acc)
# if epoch % 100 == 0:
# torch.save(Model.state_dict(), './data/models/bq/2-{}-{}.pth'.format(epoch, mean(res)))
| 7,787 | 38.135678 | 127 | py |
EOS | EOS-main/cifar_FE.py | import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from imblearn.metrics import geometric_mean_score
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
import pandas as pd
import resnet_cifar_FE as models
from utils import *
from imbalance_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from losses import LDAMLoss, FocalLoss, ASLSingleLabel
##########################################################################
t00 = time.time()
t0 = time.time()
torch.set_printoptions(precision=4, threshold=20000, sci_mode=False)
np.set_printoptions(precision=4, suppress=True)
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
print('model names ', model_names)
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset', default='cifar10', help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH',
default='resnet32',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('--loss_type',
default="CE",
type=str, help='loss type')
parser.add_argument('--imb_type', default="exp",
type=str, help='imbalance type')
parser.add_argument('--imb_factor', default=0.01,
type=float, help='imbalance factor')
parser.add_argument('--train_rule',
default='None',
type=str,
help='data sampling strategy for train loader')
parser.add_argument('--rand_number', default=0, type=int,
help='fix random number for data sampling')
parser.add_argument('--exp_str', default='0', type=str,
help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=1,
type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size',
default=128,
type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=2e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed',
default=0,
type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu',
default=0,
type=int,
help='GPU id to use.')
parser.add_argument('--model_path',
default=".../CE_cif10_7_best.pth",
type=str,
help='model path.')
parser.add_argument('--data_path',
default='.../data/',
type=str,
help='data path.')
parser.add_argument('--save_file',
default=".../saved.csv",
type=str,
help='saved file path.')
parser.add_argument('--root_log', type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
torch.cuda.manual_seed(0)
###################################################################
best_acc1 = 0
best_acc = 0 # best test accuracy
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
print()
args.store_name = '_'.join([args.dataset, args.arch, args.loss_type,
args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
num_classes = 100 if args.dataset == 'cifar100' else 10
use_norm = True if args.loss_type == 'LDAM' else False
model = models.__dict__[args.arch](num_classes=num_classes, use_norm=use_norm)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
model.load_state_dict(torch.load(args.model_path))
epoch = args.epochs
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = IMBALANCECIFAR10(root=args.data_path,
imb_type=args.imb_type, imb_factor=args.imb_factor,
rand_number=args.rand_number, train=True, download=True,
transform=transform_val)
val_dataset = datasets.CIFAR10(root=args.data_path,
train=False,
download=True, transform=transform_val)
cls_num_list = train_dataset.get_cls_num_list()
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=100, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.train_rule == 'None':
train_sampler = None
per_cls_weights = None
elif args.train_rule == 'DRW':
train_sampler = None
idx = epoch // 160
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / \
np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
if args.loss_type == 'CE':
criterion = nn.CrossEntropyLoss(weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'LDAM':
criterion = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30,
weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'Focal':
criterion = FocalLoss(weight=per_cls_weights, gamma=2).cuda(args.gpu)
elif args.loss_type == 'ASL':
criterion=ASLSingleLabel()
def validate(val_loader, model, criterion, epoch, args, f):
losses = AverageMeter('Loss', ':.4e')
global best_acc
train_loss = 0
correct = 0
total = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
count = 0
train_on_gpu = torch.cuda.is_available()
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
# switch to evaluate mode
model.eval()
all_preds = []
all_targets = []
all_values = []
all_feats = []
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output, out1 = model(input)
out1 = out1.detach().cpu().numpy()
loss = criterion(output, target)
m = nn.Softmax(dim=1)
soft = m(output)
values, pred = torch.max(soft, 1)
losses.update(loss.item(), input.size(0))
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
all_values.extend(values.detach().cpu().numpy())
all_feats.extend(out1)
tar_np = target.detach().cpu().numpy()
tar_len = len(tar_np)
total += target.size(0)
pred_np = pred.detach().cpu().numpy()
if count == 0:
y_true = np.copy(tar_np)
y_pred = np.copy(pred_np)
else:
y_true = np.concatenate((y_true, tar_np), axis=None)
y_pred = np.concatenate((y_pred, pred_np), axis=None)
count += 1
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(
correct_tensor.cpu().numpy())
for i in range(tar_len):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
if epoch % 1 == 0:
for i in range(10):
if class_total[i] > 0:
print('Validation Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print(
'Validation Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nValidation Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
target_names = ['class 0', 'class 1', 'class 2', 'class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8', 'class 9']
print(classification_report_imbalanced(y_true, y_pred,
target_names=target_names))
gm = geometric_mean_score(y_true, y_pred, average='macro')
pm = precision_score(y_true, y_pred, average='macro')
fm = f1_score(y_true, y_pred, average='macro', zero_division=1)
acsa = accuracy_score(y_true, y_pred) # acsa
bacc = balanced_accuracy_score(y_true, y_pred)
print('ACSA ', acsa)
print('bacc ', bacc)
print('GM ', gm)
print('PM ', pm)
print('FM ', fm)
allp = pd.DataFrame(data=all_preds, columns=['pred'])
print('allp ', allp.shape)
allt = pd.DataFrame(data=all_targets, columns=['actual'])
print('allt ', allt.shape)
allv = pd.DataFrame(data=all_values, columns=['certainty'])
print('allv ', allv.shape)
allf = pd.DataFrame(all_feats)
print('allf ', allf.shape)
allcomb = pd.concat([allt, allp, allv, allf], axis=1)
print('comb ', allcomb.shape)
print(allcomb.head())
allcomb.to_csv(f, index=False) # changed 4.25.22
######################################################
# CE
#validate(val_loader, model, criterion, 1, args,args.save_file)
validate(train_loader, model, criterion, 1, args,args.save_file)
| 12,215 | 32.105691 | 96 | py |
EOS | EOS-main/losses.py | """
code adapted from: https://github.com/kaidic/LDAM-DRW
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def focal_loss(input_values, gamma):
"""Computes the focal loss"""
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
class FocalLoss(nn.Module):
def __init__(self, weight=None, gamma=0.):
super(FocalLoss, self).__init__()
assert gamma >= 0
self.gamma = gamma
self.weight = weight
def forward(self, input, target):
return focal_loss(F.cross_entropy(input, target, reduction='none',
weight=self.weight), self.gamma)
class LDAMLoss(nn.Module):
def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30):
super(LDAMLoss, self).__init__()
m_list = 1.0 / np.sqrt(np.sqrt(cls_num_list))
m_list = m_list * (max_m / np.max(m_list))
m_list = torch.cuda.FloatTensor(m_list)
self.m_list = m_list
assert s > 0
self.s = s
self.weight = weight
def forward(self, x, target):
index = torch.zeros_like(x, dtype=torch.uint8)
index.scatter_(1, target.data.view(-1, 1), 1)
index_float = index.type(torch.cuda.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :], index_float.transpose(0,1))
batch_m = batch_m.view((-1, 1))
x_m = x - batch_m
output = torch.where(index, x_m, x)
return F.cross_entropy(self.s*output, target, weight=self.weight)
class ASLSingleLabel(nn.Module):
'''
code adpated from: https://github.com/Alibaba-MIIL/ASL
This loss is intended for single-label classification problems
'''
def __init__(self, gamma_pos=0, gamma_neg=4, eps: float = 0.1, reduction='mean'):
super(ASLSingleLabel, self).__init__()
self.eps = eps
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.targets_classes = []
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.reduction = reduction
def forward(self, inputs, target):
'''
"input" dimensions: - (batch_size,number_classes)
"target" dimensions: - (batch_size)
'''
num_classes = inputs.size()[-1]
log_preds = self.logsoftmax(inputs)
self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
# ASL weights
targets = self.targets_classes
anti_targets = 1 - targets
xs_pos = torch.exp(log_preds)
xs_neg = 1 - xs_pos
xs_pos = xs_pos * targets
xs_neg = xs_neg * anti_targets
asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
self.gamma_pos * targets + self.gamma_neg * anti_targets)
log_preds = log_preds * asymmetric_w
if self.eps > 0: # label smoothing
self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes)
# loss calculation
loss = - self.targets_classes.mul(log_preds)
loss = loss.sum(dim=-1)
if self.reduction == 'mean':
loss = loss.mean()
return loss
| 3,252 | 30.582524 | 101 | py |
EOS | EOS-main/resnet_cifar_FE.py | '''
Properly implemented ResNet for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn import Parameter
__all__ = ['ResNet_s', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_s(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, use_norm=False):
super(ResNet_s, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
#self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
if use_norm:
self.linear = NormedLinear(64, num_classes)
else:
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out1 = out.view(out.size(0), -1)
out = self.linear(out1)
return out, out1
def resnet20(num_classes=10, use_norm=False):
return ResNet_s(BasicBlock, [3, 3, 3])
def resnet32(num_classes=10, use_norm=False):
return ResNet_s(BasicBlock, [5, 5, 5], num_classes=num_classes, use_norm=use_norm)
def resnet44():
return ResNet_s(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet_s(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet_s(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet_s(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print() | 5,692 | 31.718391 | 120 | py |
EOS | EOS-main/linear_sm.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn import Parameter
class Lin(nn.Module):
def __init__(self):
super(Lin, self).__init__()
self.linear = nn.Linear(64, 10)
def forward(self, x):
out = self.linear(x)
return out
| 375 | 14.04 | 39 | py |
EOS | EOS-main/cifar_train_os.py | """
code adapted from: https://github.com/kaidic/LDAM-DRW
"""
import argparse
import os
import random
import time
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from sklearn.metrics import accuracy_score
import pandas as pd
from sklearn.metrics import balanced_accuracy_score
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from imblearn.metrics import geometric_mean_score
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from linear_sm import Lin
from utils import *
from imbalance_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from losses import LDAMLoss, FocalLoss, ASLSingleLabel
t00 = time.time()
t0=time.time()
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset', default='cifar10', help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH',
default='resnet32')
parser.add_argument('--loss_type',
default="CE",
type=str, help='loss type')
parser.add_argument('--imb_type', default="exp",
type=str, help='imbalance type')
parser.add_argument('--imb_factor', default=0.01, type=float,
help='imbalance factor')
parser.add_argument('--train_rule',
default='None',
type=str,
help='data sampling strategy for train loader')
parser.add_argument('--rand_number', default=0, type=int,
help='fix random number for data sampling')
parser.add_argument('--exp_str', default='0', type=str,
help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default = 10,
type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size',
default=128,
type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=2e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed',
default=0,
type=int,
help='seed for initializing training. ')
parser.add_argument('--data_path',
default='.../data/',
type=str,
help='data path.')
parser.add_argument('--input_data',
default='.../CE_cif_trn_EOS1.csv',
type=str,
help='EOS augmented data')
parser.add_argument('--saved_model_path',
default='.../CE_cif_trn_EOS1_',
type=str,
help='path to save trained models.')
parser.add_argument('--val_data',
default='CE_cif_imb_val.csv',
type=str,
help='path to validation data in FE space.')
parser.add_argument('--gpu',
default = 0,
type=int,
help='GPU id to use.')
parser.add_argument('--root_log',type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.set_printoptions(precision=5, threshold=20000, sci_mode=False)
def main1(f_in,f_out,f_val):
best_acc = 0 # best test accuracy
global t00
args = parser.parse_args()
for arg in vars(args):
print (arg, getattr(args, arg))
print()
args.store_name = '_'.join([args.dataset, args.arch, args.loss_type,
args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
prepare_folders(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
main_worker(args.gpu, ngpus_per_node, args,f_in,f_out, best_acc)
print('total time (min): %.3f\n' % ((time.time()-t00)/60))
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def seed_worker(worker_id):
worker_seed = torch.initial_seed(0) #% 2**32
np.random.seed(0)
random.seed(0)
def main_worker(gpu, ngpus_per_node, args,f_in,f_out,best_acc):
global t0
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model
print("=> creating model '{}'".format(args.arch))
num_classes = 100 if args.dataset == 'cifar100' else 10
use_norm = True if args.loss_type == 'LDAM' else False
model = Lin()
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
count = count_parameters(model)
print('num params ',count)
print()
torch.cuda.manual_seed(0)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cuda:0')
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
trans_os = transforms.Compose([
transforms.ToTensor()])
if args.dataset == 'cifar10':
train_dataset = IMBALANCECIFAR10(root=args.data_path,
imb_type=args.imb_type, imb_factor=args.imb_factor,
rand_number=args.rand_number, train=True, download=True,
transform=trans_os)
dec_x = pd.read_csv(f_in)
dec_x = dec_x.to_numpy()
dec_y = dec_x[:,0]
dec_x = dec_x[:,1:]
dec_y = dec_y.astype(np.uint8)
print('dec x ',dec_x.shape)
dec_x = dec_x.reshape(-1,64)
dec_y = np.squeeze(dec_y)
dec_y.shape
print('dec y ', dec_y.shape)
train_dataset.data = dec_x
train_dataset.targets = dec_y
val_dataset = datasets.CIFAR10(root=args.data_path,
train=False,
download=True, transform=trans_os)
dec_x = pd.read_csv(f_val)
dec_x = dec_x.to_numpy()
dec_y = dec_x[:,0]
dec_x = dec_x[:,3:]
dec_y = dec_y.astype(np.uint8)
dec_x = dec_x.reshape(-1,64)
dec_y = np.squeeze(dec_y)
dec_y.shape
val_dataset.data = dec_x
val_dataset.targets = dec_y
elif args.dataset == 'cifar100':
train_dataset = IMBALANCECIFAR100(root='./data', imb_type=args.imb_type,
imb_factor=args.imb_factor, rand_number=args.rand_number,
train=True, download=True, transform=transform_train)
val_dataset = datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform_val)
else:
warnings.warn('Dataset is not listed')
return
print('val dataset ',len(val_dataset),type(val_dataset))
cls_num_list = train_dataset.get_cls_num_list()
print()
print('train cls num list:')
print(cls_num_list)
args.cls_num_list = cls_num_list
cls_num_listv = len(val_dataset)
print()
print('val cls num list:')
print(cls_num_listv)
print()
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True,
sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=100, shuffle=False,
num_workers=args.workers, pin_memory=True)
print('train loader len ',len(train_loader))
print('val loader len ',len(val_loader))
print()
# init log for training
log_training = open(os.path.join(args.root_log, args.store_name,
'log_train.csv'), 'w')
log_testing = open(os.path.join(args.root_log, args.store_name,
'log_test.csv'), 'w')
with open(os.path.join(args.root_log, args.store_name, 'args.txt'),
'w') as f:
f.write(str(args))
tf_writer = SummaryWriter(log_dir=os.path.join(args.root_log,
args.store_name))
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
print('epoch ',epoch)
if args.train_rule == 'None':
train_sampler = None
per_cls_weights = None
elif args.train_rule == 'Resample':
train_sampler = ImbalancedDatasetSampler(train_dataset)
per_cls_weights = None
elif args.train_rule == 'Reweight':
print('rewt')
train_sampler = None
beta = 0.9999
effective_num = 1.0 - np.power(beta, cls_num_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
elif args.train_rule == 'DRW':
train_sampler = None
idx = epoch // 160
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
else:
warnings.warn('Sample rule is not listed')
if args.loss_type == 'CE':
criterion = nn.CrossEntropyLoss(weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'LDAM':
criterion = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30,
weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'Focal':
criterion = FocalLoss(weight=per_cls_weights, gamma=2).cuda(args.gpu)
elif args.loss_type == 'ASL':
criterion=ASLSingleLabel()
else:
warnings.warn('Loss type is not listed')
return
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args,
log_training,
tf_writer)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args,
f_out, best_acc,
log_testing, tf_writer)
is_best = acc1 > best_acc
best_acc = max(acc1, best_acc)
tf_writer.add_scalar('acc/test_top1_best', best_acc, epoch)
output_best = 'Best Prec@1: %.3f\n' % (best_acc)
print(output_best)
log_testing.write(output_best + '\n')
log_testing.flush()
save_checkpoint(args, {
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best)
if epoch % 10 == 0:
t1 = (time.time() - t0)/60
print('%2d epoch time (min): %.3f\n' % (epoch,t1))
t0 = time.time()
def train(train_loader, model, criterion, optimizer, epoch, args, log,
tf_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
train_loss = 0
correct = 0
total = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
count = 0
train_on_gpu = torch.cuda.is_available()
classes = ('0', '1', '2', '3', '4','5', '6', '7', '8', '9')
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
target = target.long()
input = input.reshape(-1,64)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
tar_np = target.detach().cpu().numpy()
tar_len = len(tar_np)
total += target.size(0)
_, pred = torch.max(output, 1)
pred_np = pred.detach().cpu().numpy()
if count == 0:
y_true = np.copy(tar_np)
y_pred = np.copy(pred_np)
else:
y_true = np.concatenate((y_true,tar_np),axis=None)
y_pred = np.concatenate((y_pred,pred_np),axis=None)
count+=1
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
if tar_len > 1:
for n in range(tar_len):
label = target.data[n]
class_correct[label] += correct[n].item()
class_total[label] += 1
else:
for n in range(tar_len):
label = target.data[n]
class_correct[label] += correct.item()#[n]
class_total[label] += 1
if i % 25 == 0:
output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5,
lr=optimizer.param_groups[-1]['lr'] * 0.1)) # TODO
print(output)
log.write(output + '\n')
log.flush()
tf_writer.add_scalar('loss/train', losses.avg, epoch)
tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)
tf_writer.add_scalar('lr', optimizer.param_groups[-1]['lr'], epoch)
if epoch % 1 == 0:
for i in range(10):
if class_total[i] > 0:
print('Train Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Train Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTrain Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
target_names = ['class 0', 'class 1', 'class 2','class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8','class 9']
print(classification_report_imbalanced(y_true, y_pred,
target_names=target_names))
gm = geometric_mean_score(y_true, y_pred, average='macro')
pm = precision_score(y_true, y_pred, average='macro')
fm = f1_score(y_true, y_pred, average='macro',zero_division=1)
acsa = accuracy_score(y_true, y_pred) #acsa
bacc = balanced_accuracy_score(y_true, y_pred)
print('ACSA ',acsa)
print('bacc ',bacc)
print('GM ',gm)
print('PM ',pm)
print('FM ',fm)
def validate(val_loader, model, criterion, epoch, args, f_out, best_acc,
log=None, tf_writer=None,
flag='val'):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
print('validate')
print()
train_loss = 0
correct = 0
total = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
count = 0
train_on_gpu = torch.cuda.is_available()
classes = ('0', '1', '2', '3', '4','5', '6', '7', '8', '9')
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
input = torch.squeeze(input)
output = model(input)
target = target.long()
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
tar_np = target.detach().cpu().numpy()
tar_len = len(tar_np)
total += target.size(0)
_, pred = torch.max(output, 1)
pred_np = pred.detach().cpu().numpy()
if count == 0:
y_true = np.copy(tar_np)
y_pred = np.copy(pred_np)
else:
y_true = np.concatenate((y_true,tar_np),axis=None)
y_pred = np.concatenate((y_pred,pred_np),axis=None)
count+=1
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
for i in range(tar_len):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
if i % args.print_freq == 0:
output = ('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(output)
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
output = ('{flag} Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
.format(flag=flag, top1=top1, top5=top5, loss=losses))
out_cls_acc = '%s Class Accuracy: %s'%(flag,(np.array2string(cls_acc, separator=',', formatter={'float_kind':lambda x: "%.3f" % x})))
print(output)
print(out_cls_acc)
if log is not None:
log.write(output + '\n')
log.write(out_cls_acc + '\n')
log.flush()
tf_writer.add_scalar('loss/test_'+ flag, losses.avg, epoch)
tf_writer.add_scalar('acc/test_' + flag + '_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/test_' + flag + '_top5', top5.avg, epoch)
tf_writer.add_scalars('acc/test_' + flag + '_cls_acc', {str(i):x for i, x in enumerate(cls_acc)}, epoch)
if epoch % 1 == 0:
for i in range(10):
if class_total[i] > 0:
print('Validation Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Validation Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nValidation Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
target_names = ['class 0', 'class 1', 'class 2','class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8','class 9']
gm = geometric_mean_score(y_true, y_pred, average='macro')
pm = precision_score(y_true, y_pred, average='macro')
fm = f1_score(y_true, y_pred, average='macro',zero_division=1)
acsa = accuracy_score(y_true, y_pred) #acsa
bacc = balanced_accuracy_score(y_true, y_pred)
print('ACSA ',acsa)
print('bacc ',bacc)
print('GM ',gm)
print('PM ',pm)
print('FM ',fm)
if fm > best_acc:
print('Saving..')
sfile = f_out + str(epoch) + '_' + 'best.pth'
torch.save(model.state_dict(), sfile)
best_acc = fm
return best_acc
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
epoch = epoch + 1
if epoch <= 5:
lr = args.lr * epoch / 5
elif epoch > 180:
lr = args.lr * 0.0001
elif epoch > 160:
lr = args.lr * 0.001 #.01
else:
lr = args.lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
args = parser.parse_args()
f1_input = [args.input_data]
savef = [args.saved_model_path]
valf = [args.val_data]
for s in range(len(f1_input)):
print(s)
f_in = f1_input[s]
f_out = savef[s]
f_val = valf[s]
print(f_in)
print(f_out)
print(f_val)
main1(f_in,f_out,f_val)
print()
print()
print()
| 26,091 | 33.65073 | 141 | py |
EOS | EOS-main/utils.py | """
code adapted from: https://github.com/kaidic/LDAM-DRW
"""
import torch
import shutil
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = [0] * len(np.unique(dataset.targets))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
beta = 0.9999
effective_num = 1.0 - np.power(beta, label_to_count)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
# weight for each sample
weights = [per_cls_weights[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples
def calc_confusion_mat(val_loader, model, args):
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
print('Class Accuracy : ')
print(cls_acc)
classes = [str(x) for x in args.cls_num_list]
plot_confusion_matrix(all_targets, all_preds, classes)
plt.savefig(os.path.join(args.root_log, args.store_name, 'confusion_matrix.png'))
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def prepare_folders(args):
folders_util = [args.root_log, args.root_model,
os.path.join(args.root_log, args.store_name),
os.path.join(args.root_model, args.store_name)]
for folder in folders_util:
if not os.path.exists(folder):
print('creating folder ' + folder)
os.mkdir(folder)
def save_checkpoint(args, state, is_best):
filename = '%s/%s/ckpt.pth.tar' % (args.root_model, args.store_name)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename.replace('pth.tar', 'best.pth.tar'))
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
#correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res | 5,820 | 31.519553 | 97 | py |
EOS | EOS-main/cifar_train.py | """
code adapted from: https://github.com/kaidic/LDAM-DRW
"""
import argparse
import os
import random
import time
import warnings
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from tensorboardX import SummaryWriter
from sklearn.metrics import confusion_matrix
from imblearn.metrics import geometric_mean_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from collections import Counter
import resnet_cifar as models
from utils import *
from imbalance_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from losses import LDAMLoss, FocalLoss, ASLSingleLabel
############################################################
t00 = time.time()
t0=time.time()
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
print('model names ',model_names)
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset',
default='cifar10',
help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH',
default='resnet32',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('--loss_type',
default="CE",
#default = 'LDAM',
#default = 'Focal',
#default = 'ASL',
type=str, help='loss type')
parser.add_argument('--imb_type',
default="exp",
#default = 'step',
type=str, help='imbalance type')
parser.add_argument('--imb_factor',
default=0.01,
type=float, help='imbalance factor')
parser.add_argument('--train_rule',
default='None',
#default='Reweight',
#default='Resample',
#default='DRW',
type=str,
help='data sampling strategy for train loader')
parser.add_argument('--rand_number',
default=0, #1
#default=10, #2
#default =100, #3
type=int,
help='fix random number for data sampling')
parser.add_argument('--exp_str', default='0', type=str,
help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=0, type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default = 200,
type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size',
default=128,
type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1,
type=float,
metavar='LR', help='initial learning rate',
dest='lr')
parser.add_argument('--momentum', default=0.9, type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=2e-4,
type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed',
default=0,
type=int,
help='seed for initializing training. ')
parser.add_argument('--data_path',
default='.../data/',
type=str,
help='data path.')
parser.add_argument('--save_file_path',
default='.../models/',
type=str,
help='data path.')
parser.add_argument('--gpu',
default = 0,
type=int,
help='GPU id to use.')
parser.add_argument('--root_log',type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
best_acc1 = 0
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.set_printoptions(precision=4, sci_mode=False, threshold=50000)
np.set_printoptions(precision=4, suppress=True,threshold=50000)
best_acc = 0 # best test accuracy
def main():
global t00
args = parser.parse_args()
for arg in vars(args):
print (arg, getattr(args, arg))
print()
args.store_name = '_'.join([args.dataset, args.arch, args.loss_type,
args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
prepare_folders(args)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
ngpus_per_node = torch.cuda.device_count()
print('n gpus ',ngpus_per_node)
main_worker(args.gpu, ngpus_per_node, args)
print()
print('total time (min): %.3f\n' % ((time.time()-t00)/60))
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def seed_worker(worker_id):
worker_seed = torch.initial_seed(0)
np.random.seed(0)
random.seed(0)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
global t0
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model
print("=> creating model '{}'".format(args.arch))
num_classes = 100 if args.dataset == 'cifar100' else 10
use_norm = True if args.loss_type == 'LDAM' else False
model = models.__dict__[args.arch](num_classes=num_classes, use_norm=use_norm)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
count = count_parameters(model)
print('num params ',count)
print()
torch.cuda.manual_seed(0)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cuda:0')
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
#transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
if args.dataset == 'cifar10':
train_dataset = IMBALANCECIFAR10(root=args.data_path,
imb_type=args.imb_type, imb_factor=args.imb_factor,
rand_number=args.rand_number, train=True, download=True,
transform=transform_train)
print(train_dataset.data.shape, type(train_dataset.data[0]),
type(train_dataset.data), train_dataset.data.dtype)
print(train_dataset.targets[0].dtype,
type(train_dataset.targets))
print(Counter(train_dataset.targets), len(train_dataset.targets))
val_dataset = datasets.CIFAR10(root=args.data_path,
train=False,
download=True, transform=transform_val)
elif args.dataset == 'cifar100':
train_dataset = IMBALANCECIFAR100(root=args.data_path,
imb_type=args.imb_type,
imb_factor=args.imb_factor,
rand_number=args.rand_number,
train=True, download=True,
transform=transform_train)
val_dataset = datasets.CIFAR100(root=args.data_path,
train=False,
download=True,
transform=transform_val)
else:
warnings.warn('Dataset is not listed')
return
cls_num_list = train_dataset.get_cls_num_list()
args.cls_num_list = cls_num_list
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True,
sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=100, shuffle=False,
num_workers=args.workers, pin_memory=True)
# init log for training
log_training = open(os.path.join(args.root_log, args.store_name, 'log_train.csv'), 'w')
log_testing = open(os.path.join(args.root_log, args.store_name, 'log_test.csv'), 'w')
with open(os.path.join(args.root_log, args.store_name, 'args.txt'), 'w') as f:
f.write(str(args))
tf_writer = SummaryWriter(log_dir=os.path.join(args.root_log, args.store_name))
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
if args.train_rule == 'None':
train_sampler = None
per_cls_weights = None
elif args.train_rule == 'Resample':
train_sampler = ImbalancedDatasetSampler(train_dataset)
per_cls_weights = None
elif args.train_rule == 'Reweight':
print('rewt')
train_sampler = None
beta = 0.9999
effective_num = 1.0 - np.power(beta, cls_num_list)
if epoch == 0:
print('eff num ',effective_num)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
if epoch == 0:
print('per cls wts ',per_cls_weights)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
if epoch == 0:
print('per cls wts ',per_cls_weights)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
elif args.train_rule == 'DRW':
train_sampler = None
idx = epoch // 160
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
else:
warnings.warn('Sample rule is not listed')
if args.loss_type == 'CE':
criterion = nn.CrossEntropyLoss(weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'LDAM':
criterion = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30,
weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'Focal':
criterion = FocalLoss(weight=per_cls_weights, gamma=2).cuda(args.gpu)
#no clip in single
elif args.loss_type == 'ASL':
criterion=ASLSingleLabel()
else:
warnings.warn('Loss type is not listed')
return
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, log_training,
tf_writer)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args, log_testing, tf_writer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
tf_writer.add_scalar('acc/test_top1_best', best_acc1, epoch)
output_best = 'Best Prec@1: %.3f\n' % (best_acc1)
print(output_best)
log_testing.write(output_best + '\n')
log_testing.flush()
save_checkpoint(args, {
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
if epoch % 10 == 0:
t1 = (time.time() - t0)/60
print('%2d epoch time (min): %.3f\n' % (epoch,t1))
t0 = time.time()
def train(train_loader, model, criterion, optimizer, epoch, args, log, tf_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
#global best_acc
train_loss = 0
correct = 0
total = 0
#count = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
count = 0
train_on_gpu = torch.cuda.is_available()
classes = ('0', '1', '2', '3', '4','5', '6', '7', '8', '9')
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
tar_np = target.detach().cpu().numpy()
tar_len = len(tar_np)
total += target.size(0)
_, pred = torch.max(output, 1)
pred_np = pred.detach().cpu().numpy()
if count == 0:
y_true = np.copy(tar_np)
y_pred = np.copy(pred_np)
else:
y_true = np.concatenate((y_true,tar_np),axis=None)
y_pred = np.concatenate((y_pred,pred_np),axis=None)
count+=1
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
if tar_len > 1:
for n in range(tar_len):
label = target.data[n]
class_correct[label] += correct[n].item()
class_total[label] += 1
else:
for n in range(tar_len):
label = target.data[n]
class_correct[label] += correct.item()#[n]
class_total[label] += 1
if i % 25 == 0:
output = ('Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5,
lr=optimizer.param_groups[-1]['lr'] * 0.1)) # TODO
print(output)
log.write(output + '\n')
log.flush()
tf_writer.add_scalar('loss/train', losses.avg, epoch)
tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)
tf_writer.add_scalar('lr', optimizer.param_groups[-1]['lr'], epoch)
if epoch % 10 == 0:
for i in range(10):
if class_total[i] > 0:
print('Train Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Train Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTrain Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
target_names = ['class 0', 'class 1', 'class 2','class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8','class 9']
gm = geometric_mean_score(y_true, y_pred, average='macro')
pm = precision_score(y_true, y_pred, average='macro')
fm = f1_score(y_true, y_pred, average='macro',zero_division=1)
acsa = accuracy_score(y_true, y_pred) #acsa
bacc = balanced_accuracy_score(y_true, y_pred)
print('ACSA ',acsa)
print('bacc ',bacc)
print('GM ',gm)
print('PM ',pm)
print('FM ',fm)
def validate(val_loader, model, criterion, epoch, args, log=None, tf_writer=None,
flag='val'):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
global best_acc
train_loss = 0
correct = 0
total = 0
#count = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
count = 0
train_on_gpu = torch.cuda.is_available()
classes = ('0', '1', '2', '3', '4','5', '6', '7', '8', '9')
# switch to evaluate mode
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, pred = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
tar_np = target.detach().cpu().numpy()
tar_len = len(tar_np)
total += target.size(0)
_, pred = torch.max(output, 1)
pred_np = pred.detach().cpu().numpy()
if count == 0:
y_true = np.copy(tar_np)
y_pred = np.copy(pred_np)
else:
y_true = np.concatenate((y_true,tar_np),axis=None)
y_pred = np.concatenate((y_pred,pred_np),axis=None)
count+=1
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
for i in range(tar_len):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
if i % args.print_freq == 0:
output = ('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(output)
cf = confusion_matrix(all_targets, all_preds).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
cls_acc = cls_hit / cls_cnt
output = ('{flag} Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
.format(flag=flag, top1=top1, top5=top5, loss=losses))
out_cls_acc = '%s Class Accuracy: %s'%(flag,(np.array2string(cls_acc, separator=',', formatter={'float_kind':lambda x: "%.3f" % x})))
print(output)
print(out_cls_acc)
if log is not None:
log.write(output + '\n')
log.write(out_cls_acc + '\n')
log.flush()
tf_writer.add_scalar('loss/test_'+ flag, losses.avg, epoch)
tf_writer.add_scalar('acc/test_' + flag + '_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/test_' + flag + '_top5', top5.avg, epoch)
tf_writer.add_scalars('acc/test_' + flag + '_cls_acc', {str(i):x for i, x in enumerate(cls_acc)}, epoch)
if epoch % 1 == 0:
for i in range(10):
if class_total[i] > 0:
print('Validation Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Validation Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nValidation Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
target_names = ['class 0', 'class 1', 'class 2','class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8','class 9']
gm = geometric_mean_score(y_true, y_pred, average='macro')
pm = precision_score(y_true, y_pred, average='macro')
fm = f1_score(y_true, y_pred, average='macro',zero_division=1)
acsa = accuracy_score(y_true, y_pred) #acsa
bacc = balanced_accuracy_score(y_true, y_pred)
print('ACSA ',acsa)
print('bacc ',bacc)
print('GM ',gm)
print('PM ',pm)
print('FM ',fm)
if fm > best_acc:
print('Saving..')
sfile = args.save_file + args.loss_type + \
'_C10_' + str(epoch) + '_' + args.train_rule + '_best.pth'
torch.save(model.state_dict(), sfile)
best_acc = fm
return top1.avg
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
epoch = epoch + 1
if epoch <= 5:
lr = args.lr * epoch / 5
elif epoch > 180:
lr = args.lr * 0.0001
elif epoch > 160:
lr = args.lr * 0.01
else:
lr = args.lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
main() | 25,808 | 35.147059 | 141 | py |
EOS | EOS-main/imbalance_cifar.py | """
code adapted from: https://github.com/kaidic/LDAM-DRW
"""
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
torch.cuda.manual_seed(0)
class IMBALANCECIFAR10(torchvision.datasets.CIFAR10):
cls_num = 10
torch.cuda.manual_seed(0)
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0, train=True,
transform=None, target_transform=None,
download=False):
super(IMBALANCECIFAR10, self).__init__(root, train, transform, target_transform, download)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
self.train = train
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.data) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
self.num_per_cls_dict = dict()
selected=[]
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(targets_np == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
selected.extend(selec_idx)
new_data.append(self.data[selec_idx, ...])
new_targets.extend([the_class, ] * the_img_num)
new_data = np.vstack(new_data)
self.data = new_data
self.targets = new_targets
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
class IMBALANCECIFAR100(IMBALANCECIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
cls_num = 100
if __name__ == '__main__':
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = IMBALANCECIFAR100(root='./data', train=True,
download=True, transform=transform)
trainloader = iter(trainset)
data, label = next(trainloader)
import pdb; pdb.set_trace() | 3,529 | 34.3 | 98 | py |
EOS | EOS-main/resnet_cifar.py | '''
code adapted from: https://github.com/kaidic/LDAM-DRW
Properly implemented ResNet for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn import Parameter
__all__ = ['ResNet_s', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_s(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, use_norm=False):
super(ResNet_s, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
if use_norm:
self.linear = NormedLinear(64, num_classes)
else:
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet_s(BasicBlock, [3, 3, 3])
def resnet32(num_classes=10, use_norm=False):
return ResNet_s(BasicBlock, [5, 5, 5], num_classes=num_classes, use_norm=use_norm)
def resnet44():
return ResNet_s(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet_s(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet_s(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet_s(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print() | 5,614 | 32.622754 | 120 | py |
EOS | EOS-main/reassemble.py | import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from sklearn.metrics import accuracy_score
from losses import LDAMLoss, FocalLoss, ASLSingleLabel
import resnet_cifar_XAI as models
import pandas as pd
from sklearn.metrics import balanced_accuracy_score
from utils import *
from imbalance_cifar import IMBALANCECIFAR10, IMBALANCECIFAR100
from imblearn.metrics import geometric_mean_score
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
t00 = time.time()
t0 = time.time()
torch.set_printoptions(precision=4, threshold=20000, sci_mode=False)
np.set_printoptions(precision=4, suppress=True)
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
print('model names ', model_names)
parser = argparse.ArgumentParser(description='PyTorch Cifar Training')
parser.add_argument('--dataset', default='cifar10', help='dataset setting')
parser.add_argument('-a', '--arch', metavar='ARCH',
default='resnet32',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet32)')
parser.add_argument('--loss_type',
default="CE",
type=str, help='loss type')
parser.add_argument('--imb_type', default="exp",
type=str, help='imbalance type')
parser.add_argument('--imb_factor', default=0.01,
type=float, help='imbalance factor')
parser.add_argument('--train_rule',
default='None',
type=str,
help='data sampling strategy for train loader')
parser.add_argument('--rand_number', default=0, type=int,
help='fix random number for data sampling')
parser.add_argument('--exp_str', default='0', type=str,
help='number to indicate which experiment it is')
parser.add_argument('-j', '--workers', default=0, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=1,
type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size',
default=128,
type=int,
metavar='N',
help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=2e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--seed',
default=0,
type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu',
default=0,
type=int,
help='GPU id to use.')
parser.add_argument('--extractor_model_path',
default=".../CEbal_133_None_res32_best.pth",
type=str,
help='path to extractor CNN network model')
parser.add_argument('--classifier_model_path',
default=".../CE/cif10/1/CE_cif_trn_EOS1_6_best.pth",
type=str,
help='path to classifier CNN network model')
parser.add_argument('--data_root',
default=".../data/",
type=str,
help='path to data')
parser.add_argument('--save_data_path',
default=".../CE_cif_test.csv",
type=str,
help='path to saved data')
parser.add_argument('--save_model_path',
default=".../CE_cif_combined.path",
type=str,
help='path to saved data')
parser.add_argument('--root_log', type=str, default='log')
parser.add_argument('--root_model', type=str, default='checkpoint')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
torch.cuda.manual_seed(0)
best_acc1 = 0
best_acc = 0 # best test accuracy
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
print()
args.store_name = '_'.join([args.dataset, args.arch, args.loss_type,
args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
num_classes = 100 if args.dataset == 'cifar100' else 10
use_norm = True if args.loss_type == 'LDAM' else False
model = models.__dict__[args.arch](num_classes=num_classes, use_norm=use_norm)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# CIF10
model.load_state_dict(torch.load(args.extractor_model_path))
from linear_sm import Lin
cmodel = Lin()
cmodel = cmodel.cuda(args.gpu)
cmodel.load_state_dict(torch.load(args.classifier_model_path))
model.linear.weight = cmodel.linear.weight
model.linear.bias = cmodel.linear.bias
torch.save(model.state_dict(), args.save_model_path)
epoch = args.epochs
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = IMBALANCECIFAR10(root=args.data_root,
imb_type=args.imb_type, imb_factor=args.imb_factor,
rand_number=args.rand_number, train=True,
download=True,
transform=transform_val)
val_dataset = datasets.CIFAR10(root=args.data_root,
train=False,
download=True, transform=transform_val)
print('val dataset ', len(val_dataset), type(val_dataset))
cls_num_list = train_dataset.get_cls_num_list()
print()
print('train cls num list:')
print(cls_num_list)
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=100, shuffle=False,
num_workers=args.workers, pin_memory=True)
print('train loader len ', len(train_loader))
print('val loader len ', len(val_loader))
print()
if args.train_rule == 'None':
train_sampler = None
per_cls_weights = None
elif args.train_rule == 'DRW':
train_sampler = None
idx = epoch // 160
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / \
np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda(args.gpu)
if args.loss_type == 'CE':
criterion = nn.CrossEntropyLoss(weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'LDAM':
criterion = LDAMLoss(cls_num_list=cls_num_list, max_m=0.5, s=30,
weight=per_cls_weights).cuda(args.gpu)
elif args.loss_type == 'Focal':
criterion = FocalLoss(weight=per_cls_weights, gamma=2).cuda(args.gpu)
elif args.loss_type == 'ASL':
criterion=ASLSingleLabel()#,
def validate(val_loader, model, criterion, epoch, args, f):
losses = AverageMeter('Loss', ':.4e')
print('validate')
print()
global best_acc
train_loss = 0
correct = 0
total = 0
#count = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
count = 0
train_on_gpu = torch.cuda.is_available()
classes = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
# switch to evaluate mode
model.eval()
all_preds = []
all_targets = []
all_values = []
all_feats = []
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output, out1 = model(input)
out1 = out1.detach().cpu().numpy()
loss = criterion(output, target)
m = nn.Softmax(dim=1)
soft = m(output)
values, pred = torch.max(soft, 1)
losses.update(loss.item(), input.size(0))
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
all_values.extend(values.detach().cpu().numpy())
all_feats.extend(out1)
tar_np = target.detach().cpu().numpy()
tar_len = len(tar_np)
total += target.size(0)
pred_np = pred.detach().cpu().numpy()
if count == 0:
y_true = np.copy(tar_np)
y_pred = np.copy(pred_np)
else:
y_true = np.concatenate((y_true, tar_np), axis=None)
y_pred = np.concatenate((y_pred, pred_np), axis=None)
count += 1
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(
correct_tensor.cpu().numpy())
for i in range(tar_len):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
if epoch % 1 == 0:
for i in range(10):
if class_total[i] > 0:
print('Validation Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print(
'Validation Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nValidation Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
target_names = ['class 0', 'class 1', 'class 2', 'class 3', 'class 4',
'class 5', 'class 6', 'class 7', 'class 8', 'class 9']
print(classification_report_imbalanced(y_true, y_pred,
target_names=target_names))
gm = geometric_mean_score(y_true, y_pred, average='macro')
pm = precision_score(y_true, y_pred, average='macro')
fm = f1_score(y_true, y_pred, average='macro', zero_division=1)
acsa = accuracy_score(y_true, y_pred) # acsa
bacc = balanced_accuracy_score(y_true, y_pred)
print('ACSA ', acsa)
print('bacc ', bacc)
print('GM ', gm)
print('PM ', pm)
print('FM ', fm)
allp = pd.DataFrame(data=all_preds, columns=['pred'])
print('allp ', allp.shape)
allt = pd.DataFrame(data=all_targets, columns=['actual'])
print('allt ', allt.shape)
allv = pd.DataFrame(data=all_values, columns=['certainty'])
print('allv ', allv.shape)
allf = pd.DataFrame(all_feats)
print('allf ', allf.shape)
allcomb = pd.concat([allt, allp, allv, allf], axis=1)
print('comb ', allcomb.shape)
print(allcomb.head())
allcomb.to_csv(f, index=False)
#############################################################
# CE
validate(val_loader, model, criterion, 1, args,args.save_data_path)
| 13,195 | 29.759907 | 96 | py |
knodle-develop | knodle-develop/examples/trainer/preprocessing.py | from typing import List, Union, Tuple
from joblib import dump
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from torch.utils.data import TensorDataset
def get_tfidf_features(
train_data: List, test_data: List = None, dev_data: List = None, path_to_cache: str = None,
max_features: int = None
) -> Union[Tuple[np.ndarray, np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, None]]:
"""
Convert input data to a matrix of TF-IDF features.
:param train_data: training samples that are to be encoded with TF-IDF features. Can be given as Series or
as DataFrames with specified column number where the sample are stored.
:param test_data: if DataFrame/Series with test data is provided
:param dev_data: if DataFrame/Series with development data is provided, it will be encoded as well
:param path_to_cache: a path to the folder where calculated cached TF-IDF values should be saved
:param max_features: If not None, build a vocabulary that only consider the top max_features ordered by term
frequency across the corpus.
:return: TensorDataset with encoded data
"""
dev_transformed_data, test_transformed_data = None, None
vectorizer = TfidfVectorizer(max_features=max_features)
train_transformed_data = vectorizer.fit_transform(train_data)
if test_data is not None:
test_transformed_data = vectorizer.transform(test_data)
if dev_data is not None:
dev_transformed_data = vectorizer.transform(dev_data)
if path_to_cache:
dump(train_transformed_data, path_to_cache)
dump(dev_transformed_data, path_to_cache)
dump(test_transformed_data, path_to_cache)
return train_transformed_data, test_transformed_data, dev_transformed_data
def convert_text_to_transformer_input(tokenizer, texts: List[str]) -> TensorDataset:
"""
Convert input data to BERT encoded features (more details could be found at
https://huggingface.co/transformers/model_doc)
:param texts: training/dev/test samples that are to be encoded with BERT features. Can be given as Series or
as DataFrames with specified column number where the sample are stored.
:param tokenizer: DistilBertTokenizer tokenizer for english from HuggingFace
:return: TensorDataset with encoded data
"""
encoding = tokenizer(texts, return_tensors="pt", padding=True, truncation=True)
input_ids = encoding.get('input_ids')
attention_mask = encoding.get('attention_mask')
input_values_x = TensorDataset(input_ids, attention_mask)
return input_values_x
| 2,606 | 43.186441 | 112 | py |
knodle-develop | knodle-develop/examples/trainer/cleanlab/cleanlab_training_tutorial.py | import argparse
import os
import statistics
import sys
import json
from itertools import product
from torch import Tensor, LongTensor
from torch.nn import CrossEntropyLoss
from torch.optim import Adam
from torch.utils.data import TensorDataset
from examples.trainer.preprocessing import get_tfidf_features
from examples.utils import read_train_dev_test
from knodle.model.logistic_regression_model import LogisticRegressionModel
from knodle.trainer.cleanlab.cleanlab import CleanLabTrainer
from knodle.trainer.cleanlab.config import CleanLabConfig
def train_cleanlab(path_to_data: str) -> None:
""" This is an example of launching cleanlab trainer """
num_experiments = 10
parameters = dict(
# seed=None,
lr=[0.1],
cv_n_folds=[3, 5, 8],
prune_method=['prune_by_class', 'prune_by_noise_rate', 'both'],
epochs=[200],
batch_size=[128],
psx_calculation_method=['signatures', 'rules', 'random'], # how the splitting into folds will be performed
)
parameter_values = [v for v in parameters.values()]
df_train, df_dev, df_test, train_rule_matches_z, _, mapping_rules_labels_t = read_train_dev_test(path_to_data)
train_input_x, test_input_x, _ = get_tfidf_features(df_train["sample"], test_data=df_test["sample"])
train_features_dataset = TensorDataset(Tensor(train_input_x.toarray()))
test_features_dataset = TensorDataset(Tensor(test_input_x.toarray()))
test_labels = df_test["label"].tolist()
test_labels_dataset = TensorDataset(LongTensor(test_labels))
num_classes = max(test_labels) + 1
results = []
for run_id, (lr, cv_n_folds, prune_method, epochs, batch_size, psx_calculation_method) in \
enumerate(product(*parameter_values)):
print("======================================")
params = f'seed = None lr = {lr} cv_n_folds = {cv_n_folds} prune_method = {prune_method} epochs = {epochs} ' \
f'batch_size = {batch_size} psx_calculation_method = {psx_calculation_method}'
print(f"Parameters: {params}")
print("======================================")
exp_results = []
for exp in range(0, num_experiments):
model = LogisticRegressionModel(train_input_x.shape[1], num_classes)
custom_cleanlab_config = CleanLabConfig(
# seed=seed,
cv_n_folds=cv_n_folds,
output_classes=num_classes,
optimizer=Adam,
criterion=CrossEntropyLoss,
lr=lr,
epochs=epochs,
batch_size=batch_size,
psx_calculation_method=psx_calculation_method,
prune_method=prune_method,
use_probabilistic_labels=False
)
trainer = CleanLabTrainer(
model=model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=train_features_dataset,
rule_matches_z=train_rule_matches_z,
trainer_config=custom_cleanlab_config
)
trainer.train()
clf_report, _ = trainer.test(test_features_dataset, test_labels_dataset)
print(f"Accuracy is: {clf_report['accuracy']}")
print(clf_report)
exp_results.append(clf_report['accuracy'])
results.append({
# "seed": seed,
"lr": lr, "cv_n_folds": cv_n_folds, "prune_method": prune_method, "epochs": epochs,
"batch_size": batch_size, "psx_calculation_method": psx_calculation_method, "accuracy": exp_results,
"mean_accuracy": statistics.mean(exp_results),
"std_accuracy": statistics.stdev(exp_results)
})
with open(os.path.join(path_to_data, 'cl_results.json'), 'w') as file:
json.dump(results, file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]))
parser.add_argument("--path_to_data", help="")
args = parser.parse_args()
train_cleanlab(args.path_to_data) | 4,080 | 36.787037 | 120 | py |
knodle-develop | knodle-develop/examples/trainer/simple_auto_trainer/auto_trainer_tutorial.py | import os
from typing import List
from tqdm.auto import tqdm
import joblib
from minio import Minio
import pandas as pd
import numpy as np
import scipy.sparse as sp
import torch
from torch.utils.data import TensorDataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW
from examples.trainer.preprocessing import convert_text_to_transformer_input
from knodle.trainer import AutoTrainer, AutoConfig
# This python script contains rarely any explanation. For more description, we refer to the corresponding
# jupyter notebook. There the steps are explained in more detail
# Define some functions
def np_array_to_tensor_dataset(x: np.ndarray) -> TensorDataset:
"""
:rtype: object
"""
if isinstance(x, sp.csr_matrix):
x = x.toarray()
x = torch.from_numpy(x)
x = TensorDataset(x)
return x
# Define constants
imdb_data_dir = os.path.join(os.getcwd(), "datasets", "spouse")
processed_data_dir = os.path.join(imdb_data_dir, "processed")
os.makedirs(processed_data_dir, exist_ok=True)
# Download data
client = Minio("knodle.cc", secure=False)
files = [
"df_train.csv", "df_dev.csv", "df_test.csv",
"train_rule_matches_z.lib", "dev_rule_matches_z.lib", "test_rule_matches_z.lib",
"mapping_rules_labels_t.lib"
]
for file in tqdm(files):
client.fget_object(
bucket_name="knodle",
object_name=os.path.join("datasets/spouse/processed/", file),
file_path=os.path.join(processed_data_dir, file),
)
# Load data into memory
df_train = pd.read_csv(os.path.join(processed_data_dir, "df_train.csv"))
df_dev = pd.read_csv(os.path.join(processed_data_dir, "df_dev.csv"))
df_test = pd.read_csv(os.path.join(processed_data_dir, "df_test.csv"))
mapping_rules_labels_t = joblib.load(os.path.join(processed_data_dir, "mapping_rules_labels_t.lib"))
train_rule_matches_z = joblib.load(os.path.join(processed_data_dir, "train_rule_matches_z.lib"))
dev_rule_matches_z = joblib.load(os.path.join(processed_data_dir, "dev_rule_matches_z.lib"))
test_rule_matches_z = joblib.load(os.path.join(processed_data_dir, "test_rule_matches_z.lib"))
print(f"Train Z dimension: {train_rule_matches_z.shape}")
print(f"Train avg. matches per sample: {train_rule_matches_z.sum() / train_rule_matches_z.shape[0]}")
model_name = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
X_train = convert_text_to_transformer_input(tokenizer, df_train["sample"].tolist())
X_dev = convert_text_to_transformer_input(tokenizer, df_dev["sample"].tolist())
X_test = convert_text_to_transformer_input(tokenizer, df_test["sample"].tolist())
y_dev = np_array_to_tensor_dataset(df_dev['label'].values)
y_test = np_array_to_tensor_dataset(df_test['label'].values)
# Load AutoTrainer
model = AutoModelForSequenceClassification.from_pretrained(model_name)
trainer_type = "majority"
custom_model_config = AutoConfig.create_config(
name=trainer_type,
optimizer=AdamW,
lr=1e-4,
batch_size=16,
epochs=2,
filter_non_labelled=True
)
print(custom_model_config)
trainer = AutoTrainer(
name="majority",
model=model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=X_train,
rule_matches_z=train_rule_matches_z,
dev_model_input_x=X_dev,
dev_gold_labels_y=y_dev,
trainer_config=custom_model_config,
)
# Run training
trainer.train()
# Run evaluation
eval_dict, _ = trainer.test(X_test, y_test)
print(f"Accuracy: {eval_dict.get('accuracy')}")
| 3,496 | 29.146552 | 105 | py |
knodle-develop | knodle-develop/examples/trainer/simple_auto_trainer/multi_trainer_tutorial.py | import os
from torch import Tensor
from tqdm.auto import tqdm
import joblib
from minio import Minio
import pandas as pd
import numpy as np
import scipy.sparse as sp
import torch
from torch.utils.data import TensorDataset
from transformers import AdamW
from examples.trainer.preprocessing import get_tfidf_features
from knodle.trainer import MajorityConfig, KNNConfig, SnorkelConfig, SnorkelKNNConfig
from knodle.model.logistic_regression_model import LogisticRegressionModel
# This python script contains rarely any explanation. For more description, we refer to the corresponding
# jupyter notebook. There the steps are explained in more detail
# Define some functions
from knodle.trainer.multi_trainer import MultiTrainer
from knodle.trainer.wscrossweigh.config import WSCrossWeighConfig
# Define constants
imdb_data_dir = os.path.join(os.getcwd(), "datasets", "spam")
processed_data_dir = os.path.join(imdb_data_dir, "processed")
os.makedirs(processed_data_dir, exist_ok=True)
# Download data
client = Minio("knodle.cc", secure=False)
files = [
"df_train.csv", "df_test.csv",
"train_rule_matches_z.lib", "test_rule_matches_z.lib",
"mapping_rules_labels_t.lib"
]
for file in tqdm(files):
client.fget_object(
bucket_name="knodle",
object_name=os.path.join("datasets/spam/processed/", file),
file_path=os.path.join(processed_data_dir, file),
)
# Load data into memory
df_train = pd.read_csv(os.path.join(processed_data_dir, "df_train.csv"))
df_test = pd.read_csv(os.path.join(processed_data_dir, "df_test.csv"))
def np_array_to_tensor_dataset(x: np.ndarray) -> TensorDataset:
if isinstance(x, sp.csr_matrix):
x = x.toarray()
x = torch.from_numpy(x)
x = TensorDataset(x)
return x
mapping_rules_labels_t = joblib.load(os.path.join(processed_data_dir, "mapping_rules_labels_t.lib"))
train_rule_matches_z = joblib.load(os.path.join(processed_data_dir, "train_rule_matches_z.lib"))
test_rule_matches_z = joblib.load(os.path.join(processed_data_dir, "test_rule_matches_z.lib"))
print(f"Train Z dimension: {train_rule_matches_z.shape}")
print(f"Train avg. matches per sample: {train_rule_matches_z.sum() / train_rule_matches_z.shape[0]}")
# tfidf
X_train_tfidf, X_test_tfidf, _ = get_tfidf_features(
train_data=df_train["sample"].tolist(),
test_data=df_test["sample"].tolist()
)
# convert input features to datasets
X_train_tfidf_dataset = TensorDataset(Tensor(X_train_tfidf.toarray()))
X_test_tfidf_dataset = TensorDataset(Tensor(X_test_tfidf.toarray()))
# get test labels
y_test = np_array_to_tensor_dataset(df_test['label'].values)
# initilize model
logreg_model = LogisticRegressionModel(X_train_tfidf.shape[1], 2)
configs = [
MajorityConfig(optimizer=AdamW, lr=1e-4, batch_size=16, epochs=3),
KNNConfig(optimizer=AdamW, k=2, lr=1e-4, batch_size=32, epochs=2),
SnorkelConfig(optimizer=AdamW),
SnorkelKNNConfig(optimizer=AdamW, radius=0.8),
WSCrossWeighConfig(optimizer=AdamW)
]
trainer = MultiTrainer(
name=["majority", "knn", "snorkel", "snorkel_knn", "wscrossweigh"],
model=logreg_model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=X_train_tfidf_dataset,
rule_matches_z=train_rule_matches_z,
trainer_config=configs,
)
# Run training
trainer.train()
# Run evaluation
metrics = trainer.test(X_test_tfidf_dataset, y_test)
for trainer, metric in metrics.items():
print(f"Trainer: {trainer}, accuracy: {metric[0].get('accuracy')}") | 3,491 | 31.333333 | 105 | py |
knodle-develop | knodle-develop/examples/trainer/[WIP]_baseline/baseline_training_example.py | import logging
import os
from torch import Tensor
from torch.optim import SGD
from torch.utils.data import TensorDataset
from knodle.data.download import MinioConnector
from knodle.model.logistic_regression_model import (
LogisticRegressionModel,
)
from examples.ImdbDataset.utils import init_logger
from examples.utils import read_train_dev_test
from examples.trainer.preprocessing import get_tfidf_features
from knodle.trainer import TrainerConfig
from knodle.trainer.trainer import BaseTrainer
logger = logging.getLogger(__name__)
OUTPUT_CLASSES = 2
RANDOM_STATE = 123
TARGET_PATH = 'data/imdb'
MAX_FEATURES = 40000
def train_simple_ds_model():
init_logger()
if not not os.path.exists('data/imdb/mapping_rules_labels_t.lib'):
minio_connect = MinioConnector()
minio_connect.download_dir("datasets/imdb/processed/", TARGET_PATH)
train_df, dev_df, test_df, train_rule_matches_z, dev_rule_matches_z, test_rule_matches_z, imdb_dataset, \
mapping_rules_labels_t = \
read_train_dev_test(
TARGET_PATH)
logger.info("Train knn tfidf similarity model")
X_train = train_df.reviews_preprocessed
X_dev = dev_df.reviews_preprocessed
X_test = test_df.reviews_preprocessed
tfidf_values = get_tfidf_features(
imdb_dataset.reviews_preprocessed.values, path_to_cache="tutorials/ImdbDataset/tfidf.lib",
max_features=MAX_FEATURES
)
train_dataset = TensorDataset(Tensor(tfidf_values[X_train.index].toarray()))
dev_dataset = TensorDataset(Tensor(tfidf_values[X_dev.index].toarray()))
model = LogisticRegressionModel(tfidf_values.shape[1], 2)
custom_model_config = TrainerConfig(
model=model, epochs=35, optimizer_=SGD(model.parameters(), lr=0.1)
)
trainer = BaseTrainer(
model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=train_dataset,
rule_matches_z=train_rule_matches_z,
trainer_config=custom_model_config,
)
trainer.train()
tfidf_values_sparse = Tensor(tfidf_values[X_test.index].toarray())
tfidf_values_sparse = tfidf_values_sparse.to(custom_model_config.device)
test_tfidf = TensorDataset(tfidf_values_sparse)
y_test = Tensor(imdb_dataset.loc[X_test.index, "label_id"].values)
y_test = y_test.to(custom_model_config.device)
y_test = TensorDataset(y_test)
clf_report, _ = trainer.test(test_tfidf, y_test)
print(clf_report)
if __name__ == "__main__":
train_simple_ds_model()
| 2,507 | 29.962963 | 109 | py |
knodle-develop | knodle-develop/examples/trainer/wscrossweigh/wscrossweigh_training_tutorial.py | import argparse
import os
import sys
import joblib
import pandas as pd
from minio import Minio
from torch import Tensor, LongTensor
from torch.optim import Adam
from torch.utils.data import TensorDataset
from tqdm import tqdm
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification, AdamW
from examples.trainer.preprocessing import convert_text_to_transformer_input, get_tfidf_features
from knodle.model.logistic_regression_model import LogisticRegressionModel
from knodle.trainer.wscrossweigh.config import WSCrossWeighConfig
from knodle.trainer.wscrossweigh.wscrossweigh import WSCrossWeighTrainer
def train_wscrossweigh(path_to_data: str, num_classes: int) -> None:
"""
We are going to train a BERT classification model using weakly annotated data with additional WSCrossWeigh
denoising. The sample weights in WSCrossWeigh will be trained with logistic regression in order to, firstly,
reduce the computational effort, and, secondly, demonstrate the ability of the algorithm to use different models
for data denoising and classifier training.
:param path_to_data: path to the folder where all the input data is stored
:param num_classes: number of output classes
"""
num_classes = int(num_classes)
# Define constants
imdb_data_dir = os.path.join(os.getcwd(), "datasets", "imdb")
processed_data_dir = os.path.join(imdb_data_dir, "processed")
os.makedirs(processed_data_dir, exist_ok=True)
# Download data
client = Minio("knodle.cc", secure=False)
files = [
"df_train.csv", "df_dev.csv", "df_test.csv",
"train_rule_matches_z.lib", "dev_rule_matches_z.lib", "test_rule_matches_z.lib",
"mapping_rules_labels_t.lib"
]
for file in tqdm(files):
client.fget_object(
bucket_name="knodle",
object_name=os.path.join("datasets/imdb/processed/", file),
file_path=os.path.join(processed_data_dir, file),
)
# Load data into memory
df_train = pd.read_csv(os.path.join(processed_data_dir, "df_train.csv"))
df_dev = pd.read_csv(os.path.join(processed_data_dir, "df_dev.csv"))
df_test = pd.read_csv(os.path.join(processed_data_dir, "df_test.csv"))
mapping_rules_labels_t = joblib.load(os.path.join(processed_data_dir, "mapping_rules_labels_t.lib"))
train_rule_matches_z = joblib.load(os.path.join(processed_data_dir, "train_rule_matches_z.lib"))
# sample weights are calculated with logistic regression model (with TF-IDF features); the BERT model is used for
# the final classifier training.
train_tfidf_sparse, dev_tfidf_sparse, _ = get_tfidf_features(df_train["sample"].tolist(), df_dev["sample"].tolist())
train_tfidf = Tensor(train_tfidf_sparse.toarray())
train_dataset_tfidf = TensorDataset(train_tfidf)
# For the BERT training we convert train, dev, test data to BERT encoded features (input indices & attention mask)
model_name = 'distilbert-base-uncased'
tokenizer = DistilBertTokenizer.from_pretrained(model_name)
X_train = convert_text_to_transformer_input(df_train["sample"].tolist(), tokenizer)
X_dev = convert_text_to_transformer_input(df_dev["sample"].tolist(), tokenizer)
X_test = convert_text_to_transformer_input(df_test["sample"].tolist(), tokenizer)
y_dev = TensorDataset(LongTensor(df_dev["label"].tolist()))
y_test = TensorDataset(LongTensor(df_test["label"].tolist()))
# define the all needed parameters in a dictionary for convenience (can also be directly passed to Trainer/Config)
parameters = {
"lr": 1e-4, "cw_lr": 0.8, "epochs": 5, "cw_partitions": 2, "cw_folds": 5, "cw_epochs": 2, "weight_rr": 0.7,
"samples_start_weights": 4.0
}
# to have sample weights saved with some specific index in the file name, you can use "caching_suffix" variable
caching_suffix = f"dscw_{parameters.get('cw_partitions')}part_{parameters.get('cw_folds')}folds_" \
f"{parameters.get('weight_rr')}wrr"
# define LogReg and BERT models for training sample weights and final classifier correspondingly
cw_model = LogisticRegressionModel(train_tfidf.shape[1], num_classes)
model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=num_classes)
# define a custom WSCrossWeigh config. If no custom config is defined, the WSCrossWeighTrainer will use the default
# WSCrossWeighConfig which is stored in the fold with the WSCrossWeigh trainer
custom_wscrossweigh_config = WSCrossWeighConfig(
# general trainer parameters
output_classes=num_classes,
filter_non_labelled=False,
other_class_id=3,
seed=12345,
epochs=parameters.get("epochs"),
batch_size=16,
optimizer=AdamW,
lr=parameters.get("lr"),
grad_clipping=5,
caching_suffix=caching_suffix,
saved_models_dir=os.path.join(path_to_data, "trained_models"), # trained classifier model will be saved after each epoch
# WSCrossWeigh specific parameters
partitions=parameters.get("cw_partitions"), # number of WSCrossWeigh iterations (= splitting into folds)
folds=parameters.get("cw_folds"), # number of folds train data will be splitted into
weight_reducing_rate=parameters.get("weight_rr"), # sample weights reducing coefficient
samples_start_weights=parameters.get("samples_start_weights"), # the start weight of sample weights
cw_epochs=parameters.get("cw_epochs"), # number of epochs each WSCrossWeigh model is to be trained
cw_optimizer=Adam, # WSCrossWeigh model optimiser
cw_lr=parameters.get("cw_lr") # WSCrossWeigh model lr
)
trainer = WSCrossWeighTrainer(
# general Trainer inputs (a more detailed explanation of Knodle inputs is in README)
model=model, # classification model
mapping_rules_labels_t=mapping_rules_labels_t, # t matrix
model_input_x=X_train, # x matrix for training the classifier
rule_matches_z=train_rule_matches_z, # z matrix
trainer_config=custom_wscrossweigh_config,
# additional dev set used for classification model evaluation during training
dev_model_input_x=X_dev,
dev_gold_labels_y=y_dev,
# WSCrossWeigh specific parameters. If they are not defined, the corresponding main classification parameters
# will be used instead (model instead of cw_model etc)
cw_model=cw_model, # model that will be used for WSCrossWeigh weights calculation
cw_model_input_x=train_dataset_tfidf, # x matrix for training the WSCrossWeigh models
)
# the WSCrossWeighTrainer is trained
trainer.train()
# the trained model is tested on the test set
clf_report, _ = trainer.test(X_test, y_test)
print(clf_report)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]))
parser.add_argument("--path_to_data", help="Path to the folder where all input files are stored.")
parser.add_argument("--num_classes", help="Number of classes")
args = parser.parse_args()
train_wscrossweigh(args.path_to_data, args.num_classes) | 7,220 | 48.122449 | 129 | py |
knodle-develop | knodle-develop/examples/trainer/wscrossweigh/wscrossweigh_training_with_BiLSTM_tutorial.py | import argparse
import os
import sys
from typing import Dict
import numpy as np
import pandas as pd
import torch
from torch import Tensor, LongTensor
from torch.optim import Adam
from torch.utils.data import TensorDataset
from knodle.evaluation.other_class_metrics import score
from knodle.model.bidirectional_lstm_model import BidirectionalLSTM
from knodle.trainer.wscrossweigh.config import WSCrossWeighConfig
from knodle.trainer.wscrossweigh.wscrossweigh import WSCrossWeighTrainer
from examples.utils import read_train_dev_test, get_samples_list
NUM_CLASSES = 42
MAXLEN = 50
SPECIAL_TOKENS = ["<PAD>", "<UNK>"]
CLASS_WEIGHTS = torch.FloatTensor([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0])
def train_wscrossweigh(
path_to_data: str,
path_labels: str,
path_emb: str,
path_sample_weights: str = None,
) -> None:
""" Training the BiLSTM model with WSCrossWeigh denoising algorithm """
labels2ids = read_labels_from_file(path_labels, "no_relation")
word2id, word_embedding_matrix = vocab_and_vectors(path_emb)
df_train, df_dev, df_test, z_train_rule_matches, z_test_rule_matches, t_mapping_rules_labels = \
read_train_dev_test(path_to_data, if_dev_data=True)
train_input_x = get_samples_features(df_train, word2id, samples_column_num=1)
dev_dataset = get_samples_features(df_dev, word2id, samples_column_num=1)
dev_labels_dataset = TensorDataset(LongTensor(list(df_dev.iloc[:, 4])))
test_dataset = get_samples_features(df_test, word2id, samples_column_num=1)
test_labels_dataset = TensorDataset(LongTensor(list(df_test.iloc[:, 4])))
os.makedirs(path_sample_weights, exist_ok=True)
parameters = {
"lr": 1e-4,
"cw_lr": 0.8,
"epochs": 5,
"cw_partitions": 2,
"cw_folds": 5,
"cw_epochs": 2,
"weight_rr": 0.7,
"samples_start_weights": 4.0
}
model = BidirectionalLSTM(
word_embedding_matrix.shape[0], word_embedding_matrix.shape[1], word_embedding_matrix, NUM_CLASSES
)
custom_wscrossweigh_config = WSCrossWeighConfig(
output_classes=NUM_CLASSES,
class_weights=CLASS_WEIGHTS,
filter_non_labelled=True,
if_set_seed=True,
epochs=parameters.get("epochs"),
batch_size=16,
optimizer=Adam,
lr=parameters.get("lr"),
grad_clipping=5,
partitions=parameters.get("cw_partitions"),
folds=parameters.get("cw_folds"),
weight_reducing_rate=parameters.get("weight_rr"),
samples_start_weights=parameters.get("samples_start_weights")
)
trainer = WSCrossWeighTrainer(
model=model,
mapping_rules_labels_t=t_mapping_rules_labels,
model_input_x=train_input_x,
dev_model_input_x=dev_dataset,
dev_gold_labels_y=dev_labels_dataset,
rule_matches_z=z_train_rule_matches,
trainer_config=custom_wscrossweigh_config,
evaluation_method="tacred",
dev_labels_ids=labels2ids,
use_weights=True,
run_classifier=True
)
trainer.train()
clf_report, _ = trainer.test(test_dataset, test_labels_dataset)
print(clf_report)
def get_samples_features(input_data: pd.DataFrame, word2id: dict, samples_column_num: int = None) -> TensorDataset:
""" Encodes input samples with glove vectors and returns as a Dataset """
enc_input_samples = encode_samples(get_samples_list(input_data, samples_column_num), word2id, MAXLEN)
inputs_x_tensor = torch.LongTensor(enc_input_samples)
inputs_x_dataset = torch.utils.data.TensorDataset(inputs_x_tensor)
return inputs_x_dataset
def read_labels_from_file(path_labels: str, negative_label: str) -> dict:
""" Reads the labels from the file and encode them with ids """
relation2ids = {}
with open(path_labels, encoding="UTF-8") as file:
for line in file.readlines():
relation, relation_enc = line.replace("\n", "").split(",")
relation2ids[relation] = int(relation_enc)
# add no_match label
if negative_label:
relation2ids[negative_label] = max(list(relation2ids.values())) + 1
return relation2ids
def encode_samples(raw_samples: list, word2id: dict, maxlen: int) -> list:
""" This function turns raw text samples into encoded ones using the given word2id dict """
enc_input_samples = []
for sample in raw_samples:
enc_tokens = [word2id.get(token, 1) for token in sample.lstrip().split(" ")]
enc_input_samples.append(np.asarray(add_padding(enc_tokens, maxlen), dtype="float32"))
return enc_input_samples
def add_padding(tokens: list, maxlen: int) -> list:
""" Provide padding of the encoded tokens to the maxlen; if length of tokens > maxlen, reduce it to maxlen """
padded_tokens = [0] * maxlen
for token in range(0, min(len(tokens), maxlen)):
padded_tokens[token] = tokens[token]
return padded_tokens
def vocab_and_vectors(filename: str) -> (dict, np.ndarray):
"""
Reads pretrained word embedding and builds 1) a matrix (words x embedding dim), 2) word to id dictionary
:param filename: path to file with pretrained word embeddings
:return: word2id, embedding matrix
"""
with open(filename, encoding="UTF-8") as in_file:
parts = in_file.readline().strip().split(" ")
word_embedding_matrix = np.zeros(
(int(parts[0]) + len(SPECIAL_TOKENS), int(parts[1]))
)
word2id = dict()
for idx, token in enumerate(SPECIAL_TOKENS):
word2id[token] = idx
nextword_id = len(SPECIAL_TOKENS)
for line in in_file:
parts = line.strip().split(" ")
word = parts[0]
if word not in word2id:
emb = [float(v) for v in parts[1:]]
word_embedding_matrix[nextword_id] = emb
word2id[word] = nextword_id
nextword_id += 1
return word2id, word_embedding_matrix
# deprecated
def test(model, trainer, test_features: TensorDataset, test_labels: Tensor, labels2ids: Dict) -> Dict:
feature_labels_dataset = TensorDataset(test_features.tensors[0], test_labels)
feature_labels_dataloader = trainer._make_dataloader(feature_labels_dataset)
model.eval()
all_predictions, all_labels = torch.Tensor(), torch.Tensor()
for features, labels in feature_labels_dataloader:
outputs = model(features)
_, predicted = torch.max(outputs, 1)
all_predictions = torch.cat([all_predictions, predicted])
all_labels = torch.cat([all_labels, labels])
predictions_idx, test_labels_idx = (all_predictions.detach().type(torch.IntTensor).tolist(),
all_labels.detach().type(torch.IntTensor).tolist())
idx2labels = dict([(value, key) for key, value in labels2ids.items()])
predictions = [idx2labels[p] for p in predictions_idx]
test_labels = [idx2labels[p] for p in test_labels_idx]
return score(test_labels, predictions, verbose=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]))
parser.add_argument("--path_to_data", help="Path to the folder where all input files are stored.")
parser.add_argument("--path_to_label_ids", help="Path to the file information about labels and their ids is stored")
parser.add_argument("--path_to_word_embeddings", help="Path to the file with pretrained Glove embeddings used for "
"samples encoding")
parser.add_argument("--sample_weights", help="Path to the folder that either sample weights will be saved to"
"or will be loaded from")
parser.add_argument("--num_classes", help="Number of classes")
args = parser.parse_args()
train_wscrossweigh(
args.path_to_data, args.path_labels, args.path_emb, args.sample_weights
) | 8,183 | 40.125628 | 120 | py |
knodle-develop | knodle-develop/examples/data_preprocessing/MIMIC_CXR_dataset/prepare_mimic_cxr.py | # -*- coding: utf-8
"""
Preprocessing of MIMIC-CXR dataset
This file illustrates how weak supervision can be applied on medical images
and the corresponding reports. Since there are two sources of data (images and
reports) we establish a double layer weak supervision.
In this example the MIMIC-CXR dataset is used. There are to versions of this
dataset:
[MIMIC-CXR](https://physionet.org/content/mimic-cxr/2.0.0/) Database (Johnson,
Pollard et al. (2019) is a large publicly available dataset of chest X-rays
including radiology reports. It contains 377110 images and 227835 radiographic
. A radiographic study consists of one report and one or multiple images.
[MIMIC-CXR-JPG](https://physionet.org/content/mimic-cxr-jpg/2.0.0/) Database
(Johnson, Lungren et al. (2019) bases on MIMIC-CXR. It additionally includes
weak labels which are derived from the radiology reports using CheXpert labler
(Irvin, Rajpurkar et al. 2019) and the images are in JPG format instead of
DICOM format.
Neither versions of the MIMIC-CXR dataset have gold labels. Since both the
CheXpert data and the MIMIC-CXR data contain chest X-Rays, the CheXpert labler
was used in the MIMIC-CXR-JPG Database to obtain weak labels. We will use a
small subset of the MIMIC images and their weak labels in the data
preprocessing to finetune our image encoder CNN. Apart from that we do not
touch any labels until evaluation.
To evaluate our results in the end, we apply the trained model (Knodle output)
to the validation data of the CheXpert dataset, since they have gold labels.
In the data preprocessing we build the three input matrices knodle requires:
* The rules are generated from the CheXpert Labler phrases. The phrases
contain mentions (synonyms or related words) for each class, which we use to
build our T matrix, so the "rule to class" matrix.
* The Z matrix, so the "rule matches" matrix is generated from the reports
and the rules.
* The images are encoded with a CNN. We try two different approaches:
1) CNN with pretrained weight without finetuning and
2) CNN with pretrained weights and finetuning. Therefore, we need the weak
labels.
"""
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
import random
import copy
import csv
import itertools
import torch
import torch.nn as nn
import torchvision.models as models
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from typing import Dict
from joblib import dump
from PIL import Image
# set directory
# os.chdir("")
# set n between 1 and 50375
n = 10000
download = False
# PhysioNet
if download:
USERNAME = "your_username_her"
PASSWORD = "your_pw_here"
# Files that will be created:
Z = "rule_matches_z.lib"
T = "mapping_rules_labels_t.lib"
X = "train_X.lib"
FINETUNED_MODEL = 'finetuned_model.lib'
X_FINETUNED = "train_X_finetuned.lib"
X_TEST = "X_test.lib"
X_TEST_FINETUNED = "X_test_finetuned.lib"
Y_TEST = "gold_labels_test.lib"
if download:
# downloads from mimic-cxr
url = ["wget -N -c -np --user=", USERNAME, " --password=", PASSWORD,
" https://physionet.org/files/mimic-cxr/2.0.0/"]
command = "".join(url+["cxr-record-list.csv.gz"]) # paths to images
os.system(command)
command = "".join(url+["cxr-study-list.csv.gz"]) # paths to reports
os.system(command)
command = "".join(url+["mimic-cxr-reports.zip"]) # folder of all reports
os.system(command)
# downloads from mimic-cxr-jpg
url = ["wget -N -c -np --user=", USERNAME, " --password=", PASSWORD,
" https://physionet.org/files/mimic-cxr-jpg/2.0.0/"]
command = "".join(url+["mimic-cxr-2.0.0-chexpert.csv.gz"]) # chexpert output
# for mimic dataset
os.system(command)
# NOW UNZIP ALL DOWNLOADED FILES AND THE REPORT FOLDER WITH 7zip
##############################################################################
# MIMIC-CXR-JPG images
##############################################################################
record_list_all = pd.read_csv("cxr-record-list.csv")
study_list = pd.read_csv("cxr-study-list.csv").to_numpy()
# restrict records
# only want to include studies where there are two images
# only want to include one per person
two_records_per_study = record_list_all.groupby("study_id").count() == 2
two_records_per_study = two_records_per_study.rename(columns={"subject_id":"two_rec"})
record_list = pd.merge(record_list_all, two_records_per_study["two_rec"],
how = "left", on= ["study_id"])
record_list_reduced = record_list[record_list["two_rec"]]
record_list_reduced = record_list_reduced.groupby("subject_id").head(2)
record_list_pd = record_list_reduced.drop(columns = ["two_rec"])
record_list = record_list_pd.to_numpy()
# draw a random subset
random.seed(10)
study_indices = random.sample(range(int(len(record_list)/2)), n)
record_indices = [element * 2 for element in study_indices]+[element * 2+1 for element in study_indices]
record_indices.sort()
if download:
for i in tqdm(record_indices):
path = record_list[i,3]
url = ["wget -N -c -np --user=", USERNAME, " --password=", PASSWORD,
" https://physionet.org/files/mimic-cxr-jpg/2.0.0/",
path, " -P ", path.replace("/"+record_list[i,2]+".dcm", "")]
command = "".join(url).replace(".dcm", ".jpg")
os.system(command)
# load reports and save all in one csv
with open("mimic_cxr_text.csv", "w", newline="", encoding="utf-8") as f:
for i in tqdm(range(len(study_list))):
with open("".join(["mimic-cxr-reports/", study_list[i,2]])) as f_path:
text = "".join(f_path.readlines())
text = text.replace("\n", "")
text = text.replace(",", "")
start = text.find("FINDINGS:")
end = text.find("IMPRESSION:")
findings = text[start:end]
impressions = text[end:len(text)]
row = [study_list[i,0],study_list[i,1], findings, impressions]
csvwriter = csv.writer(f)
csvwriter.writerow(row)
# open report csv
reports = pd.read_csv("mimic_cxr_text.csv",
names = ["subject_id","study_id", "findings", "impressions"],
na_values=".")
print("average length findings section:",
np.mean(reports["findings"].str.len()))
print("average length impression section:",
np.mean(reports["impressions"].str.len()))
print("number of NAs in findings and impressions:\n",
pd.isna(reports[["findings", "impressions"]]).sum())
# if impression is missing insert finding
reports.impressions.fillna(reports.findings, inplace=True)
#if neither are there, we do not analyse this study -> drop
del reports["findings"]
reports_processed = reports.dropna()
# merge reports to record_list
record_report_list = pd.merge(record_list_pd, reports_processed,
how = "left", on= ["study_id","subject_id"])
# only first n rows, drop nas
input_list_pd = record_report_list.iloc[record_indices,:].dropna()
input_list = input_list_pd.to_numpy()
# save new n
n = int(len(input_list)/2)
##############################################################################
# make rules from reports and Chexpert-labler
##############################################################################
labels_chexpert = pd.read_csv("mimic-cxr-2.0.0-chexpert.csv")
labels = {id: cat for (cat, id) in enumerate(labels_chexpert.columns[2:16])}
# lower case & replace whitespace with _
classes = [string.lower().replace(" ", "_") for string in labels]
num_classes = len(classes)
labels2ids = {classes[i]:i for i in range(num_classes)}
if download:
# create folder
os.makedirs("".join([os.getcwd(),"/chexpert_rules"]))
# store files in folder
for i in range(len(classes)):
os.system("".join(["curl https://raw.githubusercontent.com/stanfordmlgroup/chexpert-labeler/master/phrases/mention/",
classes[i], ".txt ", "-o chexpert_rules/", classes[i], ".txt"]))
# make T matrix
# read txt in
lines = {}
for i in range(len(classes)):
with open("".join(["chexpert_rules/", classes[i], ".txt"])) as f:
lines[classes[i]] = [each_string.replace("\n", "") for each_string in f.readlines()]
mentions = pd.DataFrame({"label": label, "rule": rule} for (label, rule) in lines.items())
mentions.head()
rules = pd.DataFrame([i for i in itertools.chain.from_iterable(mentions["rule"])], columns = ["rule"])
rules["rule_id"] = range(len(rules))
rules["label"] = np.concatenate([
np.repeat(mentions["label"][i], len(mentions["rule"][i])) for i in range(num_classes)])
rules["label_id"] = [labels2ids[rules["label"][i]] for i in range(len(rules))]
rules.head()
rule2rule_id = dict(zip(rules["rule"], rules["rule_id"]))
rule2label = dict(zip(rules["rule_id"], rules["label_id"]))
def get_mapping_rules_labels_t(rule2label: Dict, num_classes: int) -> np.ndarray:
""" Function calculates t matrix (rules x labels) using the known correspondence of relations to decision rules """
mapping_rules_labels_t = np.zeros([len(rule2label), num_classes])
for rule, labels in rule2label.items():
mapping_rules_labels_t[rule, labels] = 1
return mapping_rules_labels_t
mapping_rules_labels_t = get_mapping_rules_labels_t(rule2label, len(labels2ids))
mapping_rules_labels_t[0:5,:]
mapping_rules_labels_t.shape
dump(mapping_rules_labels_t, T)
len(np.unique(rules["rule"])) == len(rules["rule"])
rules_size = rules.groupby("rule").size()
rules_size[np.where(rules_size > 1)[0]]
# rule defib appears for two different classes
# make Z matrix
def get_rule_matches_z (data: np.ndarray, num_rules: int) -> np.ndarray:
"""
Function calculates the z matrix (samples x rules)
data: np.array (reports)
output: sparse z matrix
"""
rule_matches_z = np.zeros([len(data), num_rules])
for ind in range(len(data)):
for rule, rule_id in rule2rule_id.items():
if rule in (data[ind]):
rule_matches_z[ind, rule_id] = 1
return rule_matches_z
# insert every second row of input_list (one for each study)
rule_matches_z = get_rule_matches_z(input_list[range(0,n*2,2),4], (len(rule2rule_id)+1))
# how many studies without matches
print("proportion of studies without any matches:\n",
sum(np.sum(rule_matches_z, axis = 1)==0)/len(rule_matches_z))
dump(rule_matches_z, Z)
######################################################################
# image - encoding:
# without finetuning
######################################################################
class mimicDataset(Dataset):
def __init__(self, path, load_labels = False):
"initialization"
self.path = path
self.load_labels = load_labels
def __len__(self):
"total number of samples"
return len(self.path)
def __getitem__(self, index):
"one sample of data"
# Select sample
image = Image.open(self.path[index,3].replace(".dcm", ".jpg")).convert("RGB")
X = self.transform(image)
if self.load_labels: # for the second approach with finetuning
label = self.path[index,5]
return X, torch.tensor(label)
else:
return X # for the first approach without labels
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
model = models.resnet50(pretrained=True)
modules = list(model.children())[:-1]
model=torch.nn.Sequential(*modules)
for p in model.parameters():
p.requires_grad = False
model.eval()
# apply modified resnet50 to data
img_embedding = np.zeros([n*2,2048])
dataloaders = DataLoader(mimicDataset(input_list), batch_size=1,num_workers=0)
for i, data in enumerate(tqdm(dataloaders)):
features_var = model(data) # same model as used with training data
features = features_var.data
img_embedding[i,:] = features.reshape(1,2048).numpy()
# concatenate both image embeddings of a study to one embedding
train_X = np.zeros([n, 2048*2])
for i in range(n):
train_X[i,:] = np.concatenate((img_embedding[i,:], img_embedding[i+1,:]))
# save feature matrix
dump(train_X, X)
##############################################################################
# Finetuning a pretrained CNN and extracting the second last layer as features
##############################################################################
# For finetuning the CNN, we use the weak labels from Chexpert
labels = {id: cat for (cat, id) in enumerate(labels_chexpert.columns[2:16])}
# initialise labels with 0
labels_chexpert["label"] = 0
labels_list = labels_chexpert.columns.to_numpy()
# iterate through labels:
# three cases: only one, non, or multiple diagnoses
for i in tqdm(range(len(labels_chexpert))):
# which labels are 1?
label_is1 = labels_chexpert.iloc[i,:] == 1.0
if (sum(label_is1)==1):
labels_chexpert.iloc[i,16] = labels_list[label_is1]
elif sum(label_is1) > 1:
labels_chexpert.iloc[i,16] = random.choice(labels_list[label_is1])
else:
labels_chexpert.iloc[i,16] = "No Finding"
# merge labels with records and reports
input_list_labels_pd = pd.merge(input_list_pd,
labels_chexpert.iloc[:,[0,1,16]],
how = "left",
on = ["study_id","subject_id"])
print("classes proportions:",
input_list_labels_pd.groupby("label").size()/len(input_list_labels_pd))
# keep in mind that the dataset is unbalenced
# Changing names to indices
for i in tqdm(range(len(input_list_labels_pd))):
input_list_labels_pd.iloc[i,5] = labels.get(input_list_labels_pd.iloc[i,5])
# convert to numpy
input_list_labels = input_list_labels_pd.to_numpy()
# finetuning
# m ... number of images used for finetuning
m = min(750,n*2)
# 80% training and 20% validation
n_train = round(m*0.8)
indices_train = random.sample(range(m),n_train)
input_train = input_list_labels[:m,:][indices_train,:]
input_validate = np.delete((input_list_labels[:m,:]),indices_train, axis = 0)
# Since the dataset is unbalanced, we use a weighted sampler
class_counts = np.zeros(num_classes)
for i in range(num_classes):
class_counts[i] = sum(input_train[:,5]==i)
weight = 1/class_counts # no class count should be 0
sample_weights = np.array([weight[t] for t in input_train[:,5]])
sample_weights = torch.from_numpy(sample_weights)
sample_weights = sample_weights.double()
sampler = torch.utils.data.WeightedRandomSampler(weights=sample_weights,
num_samples=len(sample_weights))
dataset = {"train" : mimicDataset(input_train, load_labels = True),
"val": mimicDataset(input_validate, load_labels = True)}
dataloaders = {"train": DataLoader(dataset["train"] , batch_size=4, num_workers=0, sampler = sampler),
"val": DataLoader(dataset["val"] , batch_size=4, num_workers=0 )}
dataset_sizes = {x: len(dataset[x]) for x in ["train", "val"]}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch, num_epochs - 1))
print("-" * 10)
# Each epoch has a training and validation phase
for phase in ["train", "val"]:
if phase == "train":
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# forward
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == "train":
optimizer.zero_grad()
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == "train":
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print("{} Loss: {:.4f} Acc: {:.4f}".format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == "val" and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
print("Best val Acc: {:4f}".format(best_acc), )
# load best model weights
model.load_state_dict(best_model_wts)
return model
model = models.resnet50(pretrained=True)
model.train()
num_ftrs = model.fc.in_features
# set output size to 14 (number of classes)
model.fc = nn.Linear(num_ftrs, num_classes)
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
step_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
model = train_model(model, criterion, optimizer, step_lr_scheduler, num_epochs=3)
dump(model, FINETUNED_MODEL)
# delete last layer of the network
modules = list(model.children())[:-1]
model=torch.nn.Sequential(*modules)
model.eval()
for p in model.parameters():
p.requires_grad = False
# apply modified resnet50 to data
img_embedding_finetuned = np.zeros([n*2,2048])
dataloaders = DataLoader(mimicDataset(input_list_labels, load_labels = True), batch_size=1,num_workers=0)
for i, data in enumerate(tqdm(dataloaders)):
data_train, weak_labels = data
features_var = model(data_train) # same model as used with training data
features = features_var.data
img_embedding_finetuned[i,:] = features.reshape(1,2048).numpy()
# concatenate both image embeddings of a study to one embedding
train_X_finetuned = np.zeros([n, 2048*2])
for i in range(n):
train_X_finetuned[i,:] = np.concatenate((img_embedding_finetuned[i,:], img_embedding_finetuned[i+1,:]))
# save features matrix
dump(train_X_finetuned, X_FINETUNED)
##############################################################################
# Test data preprocessing
# - riqueres a model defined for image encoding
##############################################################################
# download Chexpert data and unzip it to the same directory
test_set_pd = pd.read_csv("CheXpert-v1.0-small/CheXpert-v1.0-small/valid.csv")
# add column with study_id
test_set_pd = test_set_pd.assign(study_id = lambda x: x['Path'].map(lambda string: "".join(string.split("/")[2:4])))
test_set = test_set_pd.to_numpy()
labels_test_list = test_set_pd.columns[5:19].to_numpy()
class chexpertDataset(Dataset):
def __init__(self, path):
"initialization"
self.path = path
def __len__(self):
"total number of samples"
return len(self.path)
def __getitem__(self, index):
"one sample of data"
# Select sample
image = Image.open("".join("CheXpert-v1.0-small/" + self.path[index,0])).convert("RGB")
X = self.transform(image)
return X
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# apply model from before
model.eval()
n_test = len(test_set)
img_embedding_test = np.zeros([n_test,2048])
dataloaders = DataLoader(chexpertDataset(test_set), batch_size=1,num_workers=0)
for i, data in enumerate(dataloaders):
features_var = model(data) # same model as used with training data
features = features_var.data
img_embedding_test[i,:] = features.reshape(1,2048).numpy()
# concatenate image embeddings of a study to one embedding
test_X = np.zeros([n_test, 2048*2])
j = []
for i in range(n_test-1):
if test_set[i,19] == test_set[i+1,19]: # two images of same study
test_X[i,:] = np.concatenate((img_embedding_test[i,:], img_embedding_test[i+1,:]))
j.append(i+1) # i+1 embedding is are already incuded in i-th embedding, keep indices to remove later
elif i == n_test-2: # last two entries
test_X[i,:] = np.concatenate((img_embedding_test[i,:], np.zeros(2048)))
test_X[i+1,:] = np.concatenate((img_embedding_test[i+1,:], np.zeros(2048)))
else:
test_X[i,:] = np.concatenate((img_embedding_test[i,:], np.zeros(2048)))
test_X = np.delete(test_X,j,0) # remove j rows
# extracting gold labels
ind = np.delete(range(n_test),j,0)
gold_labels_test = []
for i in ind:
label_is1 = test_set[i,5:19] == 1.0
if (sum(label_is1) != 0):
gold_labels_test.append([labels[x] for x in labels_test_list[np.where(label_is1)[0]]])
else:
gold_labels_test.append([8]) #no finding
# save test data
dump(test_X, X_TEST_FINETUNED)
dump(gold_labels_test, Y_TEST)
| 21,914 | 37.179443 | 126 | py |
knodle-develop | knodle-develop/tests/trainer/test_multi_label.py | from torch.nn import BCEWithLogitsLoss
from knodle.trainer import MajorityVoteTrainer, MajorityConfig
from tests.trainer.generic import std_trainer_input_1
def test_auto_train(std_trainer_input_1):
(
model,
model_input_x, rule_matches_z, mapping_rules_labels_t,
_
) = std_trainer_input_1
y_labels = [[1, 0]] * 25 + [[1]] * 25 + [[0]] * 14
curr_config = MajorityConfig(multi_label=True, multi_label_threshold=0.7, criterion=BCEWithLogitsLoss)
trainer = MajorityVoteTrainer(
model=model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=model_input_x,
rule_matches_z=rule_matches_z,
trainer_config=curr_config
)
trainer.train()
metrics = trainer.test(model_input_x, y_labels)
# Check whether the code ran up to here
assert True
| 849 | 27.333333 | 106 | py |
knodle-develop | knodle-develop/tests/trainer/generic.py | import pytest
import numpy as np
import torch
from torch.utils.data import TensorDataset
from knodle.model.logistic_regression_model import LogisticRegressionModel
@pytest.fixture
def std_trainer_input_1():
num_samples = 64
num_features = 16
num_rules = 6
num_classes = 2
x_np = np.ones((num_samples, num_features)).astype(np.float32)
x_tensor = torch.from_numpy(x_np)
model_input_x = TensorDataset(x_tensor)
rule_matches_z = np.zeros((num_samples, num_rules))
rule_matches_z[0, 0] = 1
rule_matches_z[1:, 1] = 1
mapping_rules_labels_t = np.zeros((num_rules, num_classes))
mapping_rules_labels_t[:, 1] = 1
y_np = np.ones((num_samples,))
y_labels = TensorDataset(torch.from_numpy(y_np))
model = LogisticRegressionModel(num_features, num_classes)
return (
model,
model_input_x, rule_matches_z, mapping_rules_labels_t,
y_labels
)
@pytest.fixture
def std_trainer_input_2():
model = LogisticRegressionModel(5, 2)
inputs_x = TensorDataset(torch.Tensor(np.array([[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[6, 6, 6, 6, 6],
[7, 7, 7, 7, 7]])))
mapping_rules_labels_t = np.array([[1, 0], [1, 0], [0, 1]])
train_rule_matches_z = np.array([[1, 0, 0], [1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 0, 1]])
test_dataset = TensorDataset(torch.Tensor(np.array([[4, 4, 4, 4, 4], [5, 5, 5, 5, 5]])))
test_labels = TensorDataset(torch.Tensor(np.array([0, 1])))
return (
model,
inputs_x, mapping_rules_labels_t, train_rule_matches_z,
test_dataset, test_labels
)
| 1,813 | 28.737705 | 92 | py |
knodle-develop | knodle-develop/tests/trainer/cleanlab/test_cl.py | from torch.nn import CrossEntropyLoss
from tests.trainer.generic import std_trainer_input_2
from knodle.trainer.cleanlab.cleanlab import CleanLabTrainer
from knodle.trainer.cleanlab.config import CleanLabConfig
def test_cleanlab_base_test(std_trainer_input_2):
(
model,
inputs_x, mapping_rules_labels_t, train_rule_matches_z,
test_dataset, test_labels
) = std_trainer_input_2
config = CleanLabConfig(cv_n_folds=2, criterion=CrossEntropyLoss, use_probabilistic_labels=False)
trainer = CleanLabTrainer(
model=model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=inputs_x,
rule_matches_z=train_rule_matches_z,
trainer_config=config
)
trainer.train()
clf_report, _ = trainer.test(test_dataset, test_labels)
# Check that this runs without error
assert True
| 875 | 27.258065 | 101 | py |
knodle-develop | knodle-develop/tests/trainer/wscrossweigh/test_wscw_data_preparation.py | import torch
from torch.utils.data import TensorDataset
import pytest
import numpy as np
from knodle.trainer.wscrossweigh.data_splitting_by_rules import get_rules_sample_ids, get_samples_labels_idx_by_rule_id
@pytest.fixture(scope='session')
def get_test_data():
rule_assignments_t = np.array([[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
inputs_x = TensorDataset(torch.Tensor(np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4],
[5, 5, 5, 5, 5, 5, 5, 5],
[6, 6, 6, 6, 6, 6, 6, 6],
[7, 7, 7, 7, 7, 7, 7, 7]])))
rule_matches_z = np.array([[0, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]])
rule2sample_id = {0: {1, 5, 6},
1: {0, 6},
2: {0, 1, 5, 6},
3: {1, 6, 7},
4: {2}, # no rel sample
5: {4}, # no rel sample
6: {3}} # no rel sample
return inputs_x, rule_matches_z, rule_assignments_t, rule2sample_id
@pytest.fixture(scope='session')
def get_cw_data_test(get_test_data):
# no filtering, no no_match samples
test_rules_idx = [1, 2]
labels = np.array([[0.5, 0.5, 0], [0.3, 0.7, 0], [0, 0, 1], [0, 0, 1],
[0, 0, 1], [0.5, 0.5, 0], [0.5, 0.5, 0], [0, 1, 0]])
return [[get_test_data[0],
labels,
test_rules_idx,
get_test_data[3],
torch.Tensor(np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[5, 5, 5, 5, 5, 5, 5, 5],
[6, 6, 6, 6, 6, 6, 6, 6]])),
np.array([[0.5, 0.5, 0], [0.3, 0.7, 0], [0.5, 0.5, 0], [0.5, 0.5, 0]]),
np.array([0, 1, 5, 6])
]]
@pytest.fixture(scope='session')
def get_cw_data_train(get_test_data):
# rules to be filtered: 1, 2, no_match samples should be added
test_samples_idx = [0, 6, 1, 5]
train_rules_idx = [0, 3, 4, 5, 6] # 1, 5, 6, 7 --> delete inters --> 7 --> + no rel --> 2, 3, 4, 7
labels = np.array([[0.5, 0.5, 0], [0.3, 0.7, 0], [0, 0, 1], [0, 0, 1],
[0, 0, 1], [0.5, 0.5, 0], [0.5, 0.5, 0], [0, 1, 0]])
return [[get_test_data[0],
labels,
train_rules_idx,
get_test_data[3],
test_samples_idx,
torch.Tensor(np.array([[2, 2, 2, 2, 2, 2, 2, 2],
[3, 3, 3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4, 4, 4],
[7, 7, 7, 7, 7, 7, 7, 7]])),
np.array([[0, 0, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0]]),
np.array([2, 3, 4, 7])
]]
def test_sample_ids_matched_rules_correspondence(get_test_data):
assert get_rules_sample_ids(get_test_data[1]) == get_test_data[3]
def test_get_cw_data_test(get_cw_data_test):
for data in get_cw_data_test:
samples, labels, ids = get_samples_labels_idx_by_rule_id(
data[0], data[1], data[2], data[3]
)
assert torch.equal(samples.tensors[0], data[4])
np.testing.assert_array_equal(labels, data[5])
np.testing.assert_array_equal(ids, data[6])
def test_get_cw_data_train(get_cw_data_train):
for data in get_cw_data_train:
samples, labels, ids = get_samples_labels_idx_by_rule_id(
data[0], data[1], data[2], data[3], data[4]
)
assert torch.equal(samples.tensors[0], data[5])
np.testing.assert_array_equal(labels, data[6])
np.testing.assert_array_equal(ids, data[7])
def test_random_check(get_cw_data_train):
for data in get_cw_data_train:
samples, labels, ids = get_samples_labels_idx_by_rule_id(
data[0], data[1], data[2], data[3], data[4]
)
rnd_tst = np.random.randint(0, samples.tensors[0].shape[0]) # take some random index
tst_sample = samples.tensors[0][rnd_tst, :]
tst_idx = ids[rnd_tst]
tst_label = labels[rnd_tst, :] if len(labels.shape) > 1 else labels[rnd_tst]
tst_sample_true = data[5][rnd_tst, :]
tst_label_true = data[6][rnd_tst, :] if len(data[6].shape) > 1 else data[6][rnd_tst]
tst_idx_true = data[7][rnd_tst]
assert torch.equal(tst_sample, tst_sample_true)
np.testing.assert_array_equal(tst_label, tst_label_true)
np.testing.assert_array_equal(tst_idx, tst_idx_true)
| 5,521 | 39.903704 | 119 | py |
knodle-develop | knodle-develop/tests/trainer/snorkel/test_utils.py | import numpy as np
from scipy import sparse as ss
import torch
from torch.utils.data import TensorDataset
from knodle.trainer.snorkel.utils import (
z_t_matrix_to_snorkel_matrix,
prepare_empty_rule_matches,
add_labels_for_empty_examples
)
def test_z_t_matrix_to_snorkel_matrix():
# test dense case
z = np.array([
[0, 1, 0, 0],
[0, 0, 1, 1]
])
t = np.array([
[1, 0],
[0, 1],
[1, 0],
[0, 1]
])
snorkel_gold = np.array([
[-1, 1, -1, -1],
[-1, -1, 0, 1]
])
snorkel_test = z_t_matrix_to_snorkel_matrix(z, t)
np.testing.assert_equal(snorkel_gold, snorkel_test)
# test sparse case
z = ss.csr_matrix([
[0, 1, 0, 0],
[0, 0, 1, 1]
])
t = ss.csr_matrix([
[1, 0],
[0, 1],
[1, 0],
[0, 1]
])
snorkel_gold = np.array([
[-1, 1, -1, -1],
[-1, -1, 0, 1]
])
snorkel_test = z_t_matrix_to_snorkel_matrix(z, t)
np.testing.assert_equal(snorkel_gold, snorkel_test)
def test_label_model_data():
num_samples = 5
num_rules = 6
rule_matches_z = np.ones((num_samples, num_rules))
rule_matches_z[[1, 4]] = 0
non_zero_mask, out_rule_matches_z = prepare_empty_rule_matches(rule_matches_z)
expected_mask = np.array([True, False, True, True, False])
expected_rule_matches = np.ones((3, num_rules))
np.testing.assert_equal(non_zero_mask, expected_mask)
np.testing.assert_equal(out_rule_matches_z, expected_rule_matches)
def test_other_class_labels():
label_probs_gen = np.array([
[0.3, 0.6, 0.0, 0.1],
[0.2, 0.2, 0.2, 0.4],
[1.0, 0.0, 0.0, 0.0]
])
output_classes = 5
other_class_id = 4
# test without empty rows
non_zero_mask = np.array([True, True, True])
expected_probs = np.array([
[0.3, 0.6, 0.0, 0.1, 0.0],
[0.2, 0.2, 0.2, 0.4, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0]
])
label_probs = add_labels_for_empty_examples(label_probs_gen, non_zero_mask, output_classes, other_class_id)
np.testing.assert_equal(label_probs, expected_probs)
# test with empty rows
non_zero_mask = np.array([True, False, False, True, True])
expected_probs = np.array([
[0.3, 0.6, 0.0, 0.1, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.2, 0.2, 0.2, 0.4, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0]
])
label_probs = add_labels_for_empty_examples(label_probs_gen, non_zero_mask, output_classes, other_class_id)
np.testing.assert_equal(label_probs, expected_probs)
| 2,626 | 24.019048 | 111 | py |
knodle-develop | knodle-develop/tests/transformation/test_filter.py | import numpy as np
import torch
from torch.utils.data import TensorDataset
from knodle.transformation.filter import filter_empty_probabilities, filter_probability_threshold
def test_filter_empty_probabilities():
input_ids = np.ones((3, 4))
input_ids[0, 0] = 0
input_mask = np.ones((3, 4))
input_mask[1, 1] = 0
class_probs = np.array([
[0.5, 0.5],
[0.3, 0.7],
[0.0, 0.0]
])
gold_ids = np.ones((2, 4))
gold_ids[0, 0] = 0
gold_mask = np.ones((2, 4))
gold_mask[1, 1] = 0
gold_probs = np.array([
[0.5, 0.5],
[0.3, 0.7]
])
input_dataset = TensorDataset(torch.from_numpy(input_ids), torch.from_numpy(input_mask))
new_input_dataset, new_probs = filter_empty_probabilities(input_dataset, class_probs)
assert np.array_equal(new_input_dataset.tensors[0].detach().numpy(), gold_ids)
assert np.array_equal(new_input_dataset.tensors[1].detach().numpy(), gold_mask)
assert np.array_equal(new_probs, gold_probs)
def test_filter_probability_threshold():
input_ids = np.ones((3, 4))
input_ids[0, 0] = 0
input_mask = np.ones((3, 4))
input_mask[1, 1] = 0
class_probs = np.array([
[0.5, 0.5],
[0.3, 0.7],
[0.0, 0.0]
])
gold_ids = np.ones((1, 4))
gold_mask = np.ones((1, 4))
gold_mask[0, 1] = 0
gold_probs = np.array([
[0.3, 0.7]
])
input_dataset = TensorDataset(torch.from_numpy(input_ids), torch.from_numpy(input_mask))
new_input_dataset, new_probs = filter_probability_threshold(input_dataset, class_probs, probability_threshold=0.6)
assert np.array_equal(new_input_dataset.tensors[0].detach().numpy(), gold_ids)
assert np.array_equal(new_input_dataset.tensors[1].detach().numpy(), gold_mask)
assert np.array_equal(new_probs, gold_probs)
| 1,837 | 28.174603 | 118 | py |
knodle-develop | knodle-develop/tests/transformation/generic.py | import pytest
import torch
from torch.utils.data import TensorDataset
import numpy as np
@pytest.fixture
def filter_input():
input_ids = np.ones((3, 4))
input_ids[0, 0] = 0
input_mask = np.ones((3, 4))
input_mask[1, 1] = 0
class_probs = np.array([
[0.5, 0.5],
[0.3, 0.7],
[0.0, 0.0]
])
input_dataset = TensorDataset(torch.from_numpy(input_ids), torch.from_numpy(input_mask))
return input_dataset, class_probs
@pytest.fixture
def majority_input():
z = np.zeros((4, 4))
t = np.zeros((4, 2))
z[0, 0] = 1
z[0, 2] = 1
z[1, 0] = 1
z[1, 1] = 1
z[1, 2] = 1
z[1, 3] = 1
z[2, 1] = 1
t[0, 0] = 1
t[1, 1] = 1
t[2, 1] = 1
t[3, 1] = 1
gold_probs = np.array([
[0.5, 0.5],
[0.25, 0.75],
[0, 1],
[0, 0]
])
gold_labels = np.array([-1, 1, 1, -1])
return z, t, gold_probs, gold_labels | 932 | 16.942308 | 92 | py |
knodle-develop | knodle-develop/tests/transformation/torch_input.py | import numpy as np
from numpy.testing import assert_array_equal
from torch import Tensor, equal
from torch.utils.data import TensorDataset
from knodle.transformation.torch_input import input_labels_to_tensordataset
def test_input_labels_to_tensordataset():
a = np.ones((4, 4))
b = np.ones((4, 3))
labels = np.ones((4,))
input_data = TensorDataset(Tensor(a), Tensor(b))
input_label_dataset = input_labels_to_tensordataset(input_data, labels)
assert len(input_label_dataset.tensors) == 3
assert_array_equal(input_label_dataset.tensors[-1].cpu().detach().numpy(), labels) | 604 | 27.809524 | 86 | py |
knodle-develop | knodle-develop/knodle/trainer/auto_trainer.py | from typing import Callable
import numpy as np
from torch.utils.data import TensorDataset
from knodle.trainer.trainer import Trainer
class AutoTrainer:
""" The factory class for creating training executors
See See https://medium.com/@geoffreykoh/implementing-the-factory-
pattern-via-dynamic-registry-and-python-decorators-479fc1537bbe
"""
registry = {}
""" Internal registry for available trainers """
def __init__(self, name, **kwargs):
self.trainer = self.create_trainer(name, **kwargs)
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
return self.trainer.train(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
def test(self, test_features: TensorDataset, test_labels: TensorDataset):
return self.trainer.test(test_features, test_labels)
@classmethod
def create_trainer(cls, name: str, **kwargs) -> Trainer:
""" Factory command to create the executor """
exec_class = cls.registry[name]
executor = exec_class(**kwargs)
return executor
@classmethod
def register(cls, name: str) -> Callable:
def inner_wrapper(wrapped_class: Trainer) -> Callable:
cls.registry[name] = wrapped_class
return wrapped_class
return inner_wrapper
| 1,469 | 30.956522 | 102 | py |
knodle-develop | knodle-develop/knodle/trainer/config.py | import pathlib
from typing import Callable, Dict
import os
import logging
from snorkel.classification import cross_entropy_with_probs
import torch
from torch import Tensor
from torch.optim import SGD
from torch.optim.optimizer import Optimizer
from knodle.trainer.utils.utils import check_and_return_device, set_seed
logger = logging.getLogger(__name__)
class TrainerConfig:
def __init__(
self,
criterion: Callable[[Tensor, Tensor], float] = cross_entropy_with_probs,
batch_size: int = 32,
optimizer: Optimizer = None,
lr: int = 0.01,
output_classes: int = 2,
class_weights: Tensor = None,
epochs: int = 3,
seed: int = None,
grad_clipping: int = None,
device: str = None,
caching_folder: str = os.path.join(pathlib.Path().absolute(), "cache"),
caching_suffix: str = "",
saved_models_dir: str = None,
multi_label: bool = False,
multi_label_threshold: float = None
):
"""
A default and minimum sufficient configuration of a Trainer instance.
:param criterion: a usual PyTorch criterion; computes a gradient according to a given loss function
:param batch_size: a usual PyTorch batch_size; the number of training examples utilized in one training iteration
:param optimizer: a usual PyTorch optimizer; which is used to solve optimization problems by minimizing the
function
:param lr: a usual PyTorch learning rate; tuning parameter in an optimization algorithm that determines the step
size at each iteration while moving toward a minimum of a loss function
:param output_classes: the number of classes used in classification
:param class_weights: introduce the weight of each class. By default, all classes have the same weights 1.0.
:param epochs: the number of epochs the classification model will be trained
:param seed: the desired seed for generating random numbers
:param grad_clipping: if set to True, gradient norm of an iterable of parameters will be clipped
:param device: what device the model will be trained on (CPU/CUDA)
:param caching_folder: a path to the folder where cache will be saved (default: root/cache)
:param caching_suffix: a specific index that could be added to the caching file name (e.g. in WSCrossWeigh for
sample weights calculated in different iterations and stored in different files.)
:param saved_models_dir: a path to the folder where trained models will be stored. If None, the trained models
:param multi_label: a boolean value whether the classification is multi-label
won't be stored.
"""
self.seed = seed
if self.seed is not None:
set_seed(seed)
self.caching_suffix = caching_suffix
self.caching_folder = caching_folder
os.makedirs(self.caching_folder, exist_ok=True)
logger.info(f"The cache will be saved to {self.caching_folder} folder")
# create directory where saved models will be stored
if saved_models_dir:
self.saved_models_dir = saved_models_dir
os.makedirs(self.saved_models_dir, exist_ok=True)
else:
self.saved_models_dir = caching_folder
logger.info(f"The trained models will be saved to the {self.saved_models_dir} directory.")
self.criterion = criterion
self.lr = lr
self.batch_size = batch_size
self.output_classes = output_classes
self.grad_clipping = grad_clipping
self.device = torch.device(device) if device is not None else check_and_return_device()
logger.info(f"Model will be trained on {self.device}")
if epochs <= 0:
raise ValueError("Epochs needs to be positive")
self.epochs = epochs
if optimizer is None:
logger.info(f"Defaulting to SGD optimizer as none specified in the config.")
self.optimizer = SGD
else:
self.optimizer = optimizer
if class_weights is None:
self.class_weights = torch.tensor([1.0] * self.output_classes)
else:
if len(class_weights) != self.output_classes:
raise Exception("Wrong class sample_weights initialisation!")
self.class_weights = class_weights
self.multi_label = multi_label
if multi_label_threshold is None:
self.multi_label_threshold = 0.5
else:
self.multi_label_threshold = multi_label_threshold
class BaseTrainerConfig(TrainerConfig):
def __init__(
self,
filter_non_labelled: bool = True,
other_class_id: int = None,
evaluate_with_other_class: bool = False,
ids2labels: Dict = None,
max_rules: int = None,
min_coverage: float = None,
drop_rules: bool = False,
**kwargs
):
"""
Additionally provided parameters needed for handling the cases where there are data samples with no rule
matched (filtering OR introducing the other class + training & evaluation with other class).
:param filter_non_labelled: if True, the samples with no rule matched will be filtered out from the dataset
:param other_class_id: id of the negative class; if set, the samples with no rule matched will be assigned to it
:param evaluate_with_other_class: if set to True, the evaluation will be done with respect to the negative class
(for more details please see knodle/evaluation/other_class_metrics.py file)
:param ids2labels: dictionary {label id: label}, which is needed to perform evaluation with the negative class
"""
super().__init__(**kwargs)
self.filter_non_labelled = filter_non_labelled
self.other_class_id = other_class_id
self.evaluate_with_other_class = evaluate_with_other_class
self.ids2labels = ids2labels
if self.other_class_id is not None and self.filter_non_labelled:
raise ValueError("You can either filter samples with no weak labels or add them to 'other_class_id'")
logger.debug(f"{self.evaluate_with_other_class} and {self.ids2labels}")
if self.evaluate_with_other_class and self.ids2labels is None:
# check if the selected evaluation type is valid
logging.warning(
"Labels to label ids correspondence is needed to make other_class specific evaluation. Since it is "
"absent now, the standard sklearn metrics will be calculated instead."
)
self.evaluate_with_other_class = False
self.max_rules = max_rules
self.min_coverage = min_coverage
self.drop_rules = drop_rules
| 6,907 | 44.447368 | 121 | py |
knodle-develop | knodle-develop/knodle/trainer/multi_trainer.py | import copy
import logging
from typing import Callable, List, Dict
import numpy as np
from torch.utils.data import TensorDataset
from knodle.trainer import AutoTrainer
from knodle.trainer.utils import log_section
logger = logging.getLogger(__name__)
class MultiTrainer:
""" The factory class for creating training executors
See See https://medium.com/@geoffreykoh/implementing-the-factory-
pattern-via-dynamic-registry-and-python-decorators-479fc1537bbe
"""
registry = {}
""" Internal registry for available trainers """
def __init__(self, name: List, **kwargs):
self.trainer, configs = [], []
if "trainer_config" in kwargs:
configs = copy.deepcopy(kwargs["trainer_config"])
for i, n in enumerate(name):
if configs:
kwargs["trainer_config"] = configs[i]
self.trainer.append(copy.deepcopy(AutoTrainer(n, **kwargs).trainer))
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
for i in range(len(self.trainer)):
trainer_name = str(type(self.trainer[i])).split(".")[-1][:-2]
log_section(f"Training {trainer_name}", logger)
self.trainer[i].train()
def test(self, test_features: TensorDataset, test_labels: TensorDataset) -> Dict:
metrics = {}
for i in range(len(self.trainer)):
trainer_name = str(type(self.trainer[i])).split(".")[-1][:-2]
metrics[trainer_name] = self.trainer[i].test(test_features, test_labels)
return metrics
| 1,699 | 33.693878 | 92 | py |
knodle-develop | knodle-develop/knodle/trainer/trainer.py | import logging
import os
from abc import ABC, abstractmethod
from typing import Union, Dict, Tuple, List
import numpy as np
import skorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import classification_report
from torch import Tensor
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.utils.data import TensorDataset, DataLoader
from tqdm.auto import tqdm
from knodle.evaluation.multi_label_metrics import evaluate_multi_label, encode_to_binary
from knodle.evaluation.other_class_metrics import classification_report_other_class
from knodle.evaluation.plotting import draw_loss_accuracy_plot
from knodle.trainer.config import TrainerConfig, BaseTrainerConfig
from knodle.trainer.utils.checks import check_other_class_id
from knodle.trainer.utils.utils import log_section, accuracy_of_probs
from knodle.transformation.rule_reduction import reduce_rule_matches
from knodle.transformation.torch_input import input_labels_to_tensordataset, dataset_to_numpy_input
logger = logging.getLogger(__name__)
class Trainer(ABC):
def __init__(
self,
model: Module,
mapping_rules_labels_t: np.ndarray,
model_input_x: TensorDataset,
rule_matches_z: np.ndarray,
dev_model_input_x: TensorDataset = None,
dev_gold_labels_y: TensorDataset = None,
trainer_config: TrainerConfig = None,
):
"""
Constructor for each Trainer.
Args:
model: PyTorch model which will be used for final classification.
mapping_rules_labels_t: Mapping of rules to labels, binary encoded. Shape: rules x classes
model_input_x: Input tensors. These tensors will be fed to the provided model.
rule_matches_z: Binary encoded array of which rules matched. Shape: instances x rules
trainer_config: Config for different parameters like loss function, optimizer, batch size.
"""
self.model = model
self.mapping_rules_labels_t = mapping_rules_labels_t
self.model_input_x = model_input_x
self.rule_matches_z = rule_matches_z
self.dev_model_input_x = dev_model_input_x
self.dev_gold_labels_y = dev_gold_labels_y
if trainer_config is None:
self.trainer_config = TrainerConfig(model)
else:
self.trainer_config = trainer_config
@abstractmethod
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
pass
@abstractmethod
def test(self, test_features: TensorDataset, test_labels: TensorDataset):
pass
def initialise_optimizer(self):
try:
return self.trainer_config.optimizer(params=self.model.parameters(), lr=self.trainer_config.lr)
except TypeError:
logger.info("Wrong optimizer parameters. Optimizer should belong to torch.optim class or be PyTorch "
"compatible.")
class BaseTrainer(Trainer):
def __init__(
self,
model: Module,
mapping_rules_labels_t: np.ndarray,
model_input_x: TensorDataset,
rule_matches_z: np.ndarray,
**kwargs):
if kwargs.get("trainer_config", None) is None:
kwargs["trainer_config"] = BaseTrainerConfig()
super().__init__(model, mapping_rules_labels_t, model_input_x, rule_matches_z, **kwargs)
check_other_class_id(self.trainer_config, self.mapping_rules_labels_t)
def _load_train_params(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
if model_input_x is not None and rule_matches_z is not None:
self.model_input_x = model_input_x
self.rule_matches_z = rule_matches_z
if dev_model_input_x is not None and dev_gold_labels_y is not None:
self.dev_model_input_x = dev_model_input_x
self.dev_gold_labels_y = dev_gold_labels_y
def _apply_rule_reduction(self):
reduced_dict = reduce_rule_matches(
rule_matches_z=self.rule_matches_z, mapping_rules_labels_t=self.mapping_rules_labels_t,
drop_rules=self.trainer_config.drop_rules, max_rules=self.trainer_config.max_rules,
min_coverage=self.trainer_config.min_coverage)
self.rule_matches_z = reduced_dict["train_rule_matches_z"]
self.mapping_rules_labels_t = reduced_dict["mapping_rules_labels_t"]
def _make_dataloader(
self, dataset: TensorDataset, shuffle: bool = True
) -> DataLoader:
dataloader = DataLoader(
dataset,
batch_size=self.trainer_config.batch_size,
drop_last=False,
shuffle=shuffle,
)
return dataloader
def _load_batch(self, batch):
input_batch = [inp.to(self.trainer_config.device) for inp in batch[0: -1]]
label_batch = batch[-1].to(self.trainer_config.device)
return input_batch, label_batch
def _train_loop(
self, feature_label_dataloader: DataLoader, use_sample_weights: bool = False, draw_plot: bool = False
):
log_section("Training starts", logger)
if self.trainer_config.multi_label and self.trainer_config.criterion not in [nn.BCELoss, nn.BCEWithLogitsLoss]:
raise ValueError(
"Criterion for multi-label classification should be Binary Cross-Entropy "
"(BCELoss or BCEWithLogitsLoss in Pytorch.) "
)
self.model.to(self.trainer_config.device)
self.model.train()
train_losses, train_acc = [], []
if self.dev_model_input_x is not None:
dev_losses, dev_acc = [], []
for current_epoch in range(self.trainer_config.epochs):
logger.info("Epoch: {}".format(current_epoch))
epoch_loss, epoch_acc, steps = 0.0, 0.0, 0
for batch in tqdm(feature_label_dataloader):
input_batch, label_batch = self._load_batch(batch)
steps += 1
if use_sample_weights:
input_batch, sample_weights = input_batch[:-1], input_batch[-1]
# forward pass
self.trainer_config.optimizer.zero_grad()
outputs = self.model(*input_batch)
if isinstance(outputs, torch.Tensor):
logits = outputs
else:
logits = outputs[0]
if use_sample_weights:
loss = self.calculate_loss_with_sample_weights(logits, label_batch, sample_weights)
else:
loss = self.calculate_loss(logits, label_batch)
# backward pass
loss.backward()
if isinstance(self.trainer_config.grad_clipping, (int, float)):
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.trainer_config.grad_clipping)
self.trainer_config.optimizer.step()
acc = accuracy_of_probs(logits, label_batch)
epoch_loss += loss.detach().item()
epoch_acc += acc.item()
# print epoch loss and accuracy after each 10% of training is done
try:
if steps % (int(round(len(feature_label_dataloader) / 10))) == 0:
logger.info(f"Train loss: {epoch_loss / steps:.3f}, Train accuracy: {epoch_acc / steps:.3f}")
except ZeroDivisionError:
continue
avg_loss = epoch_loss / len(feature_label_dataloader)
avg_acc = epoch_acc / len(feature_label_dataloader)
train_losses.append(avg_loss)
train_acc.append(avg_acc)
logger.info("Epoch train loss: {}".format(avg_loss))
logger.info("Epoch train accuracy: {}".format(avg_acc))
if self.dev_model_input_x is not None:
dev_clf_report, dev_loss = self.test(
self.dev_model_input_x, self.dev_gold_labels_y, loss_calculation=True
)
dev_losses.append(dev_loss)
if dev_clf_report["accuracy"]:
dev_acc.append(dev_clf_report["accuracy"])
logger.info("Epoch development accuracy: {}".format(dev_clf_report["accuracy"]))
# saving model
if self.trainer_config.saved_models_dir is not None:
model_path = os.path.join(
self.trainer_config.saved_models_dir,
f"model_state_dict_epoch_{current_epoch}.pt"
)
torch.save(self.model.cpu().state_dict(), model_path)
self.model.to(self.trainer_config.device)
log_section("Training done", logger)
if draw_plot:
if self.dev_model_input_x:
draw_loss_accuracy_plot(
{"train loss": train_losses, "train acc": train_acc, "dev loss": dev_losses, "dev acc": dev_acc}
)
else:
draw_loss_accuracy_plot({"train loss": train_losses, "train acc": train_acc})
self.model.eval()
def _prediction_loop(
self, feature_label_dataloader: DataLoader, loss_calculation: str = False
) -> [np.ndarray, np.ndarray]:
self.model.to(self.trainer_config.device)
self.model.eval()
predictions_list, label_list = [], []
dev_loss, dev_acc = 0.0, 0.0
i = 0
# Loop over predictions
with torch.no_grad():
for batch in tqdm(feature_label_dataloader):
input_batch, label_batch = self._load_batch(batch)
# forward pass
self.trainer_config.optimizer.zero_grad()
outputs = self.model(*input_batch)
if isinstance(outputs, torch.Tensor):
prediction_vals = outputs
else:
prediction_vals = outputs[0]
if loss_calculation:
dev_loss += self.calculate_loss(prediction_vals, label_batch.long())
# add predictions and labels
predictions = np.argmax(prediction_vals.detach().cpu().numpy(), axis=-1)
predictions_list.append(predictions)
label_list.append(label_batch.detach().cpu().numpy())
predictions = np.squeeze(np.hstack(predictions_list))
return predictions, dev_loss
def test(
self, features_dataset: TensorDataset, labels: Union[TensorDataset, List], loss_calculation: bool = False
) -> Tuple[Dict, Union[float, None]]:
if type(labels) is list:
gold_labels = encode_to_binary(labels, self.trainer_config.output_classes)
else:
gold_labels = labels.tensors[0].cpu().numpy()
if isinstance(self.model, skorch.NeuralNetClassifier):
# when the pytorch model is wrapped as a sklearn model (e.g. cleanlab)
predictions = self.model.predict(dataset_to_numpy_input(features_dataset))
else:
feature_label_dataset = input_labels_to_tensordataset(features_dataset, gold_labels)
feature_label_dataloader = self._make_dataloader(feature_label_dataset, shuffle=False)
predictions, dev_loss = self._prediction_loop(feature_label_dataloader, loss_calculation)
if self.trainer_config.multi_label:
clf_report = evaluate_multi_label(
y_true=gold_labels, y_pred=predictions, threshold=self.trainer_config.multi_label_threshold,
num_classes=self.trainer_config.output_classes
)
elif self.trainer_config.evaluate_with_other_class:
clf_report = classification_report_other_class(
y_true=gold_labels, y_pred=predictions, ids2labels=self.trainer_config.ids2labels,
other_class_id=self.trainer_config.other_class_id
)
else:
clf_report = classification_report(y_true=gold_labels, y_pred=predictions, output_dict=True)
if loss_calculation:
return clf_report, dev_loss / len(feature_label_dataloader)
else:
return clf_report, None
def calculate_loss_with_sample_weights(self, logits: Tensor, gold_labels: Tensor, sample_weights: Tensor) -> float:
if isinstance(self.trainer_config.criterion, type) and issubclass(self.trainer_config.criterion, _Loss):
criterion = self.trainer_config.criterion(
weight=self.trainer_config.class_weights, reduction="none"
).cuda() if self.trainer_config.device == torch.device("cuda") else self.trainer_config.criterion(
weight=self.trainer_config.class_weights, reduction="none"
)
loss_no_reduction = criterion(logits, gold_labels)
else:
loss_no_reduction = self.trainer_config.criterion(
logits, gold_labels, weight=self.trainer_config.class_weights, reduction="none"
)
return (loss_no_reduction * sample_weights).mean()
def calculate_loss(self, logits: Tensor, gold_labels: Tensor) -> float:
if isinstance(self.trainer_config.criterion, type) and issubclass(self.trainer_config.criterion, _Loss):
criterion = self.trainer_config.criterion(weight=self.trainer_config.class_weights).cuda() \
if self.trainer_config.device == torch.device("cuda") \
else self.trainer_config.criterion(weight=self.trainer_config.class_weights)
return criterion(logits, gold_labels)
else:
if len(gold_labels.shape) == 1:
gold_labels = torch.nn.functional.one_hot(
gold_labels.to(torch.int64), num_classes=self.trainer_config.output_classes
)
return self.trainer_config.criterion(logits, gold_labels, weight=self.trainer_config.class_weights)
| 14,273 | 42.386018 | 119 | py |
knodle-develop | knodle-develop/knodle/trainer/baseline/majority.py | import logging
import numpy as np
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import TensorDataset
from knodle.transformation.majority import input_to_majority_vote_input
from knodle.transformation.torch_input import input_labels_to_tensordataset
from knodle.trainer.trainer import BaseTrainer
from knodle.trainer.auto_trainer import AutoTrainer
from knodle.trainer.baseline.config import MajorityConfig
from knodle.transformation.filter import filter_probability_threshold
logger = logging.getLogger(__name__)
@AutoTrainer.register('majority')
class MajorityVoteTrainer(BaseTrainer):
"""
The baseline class implements a baseline model for labeling data with weak supervision.
A simple majority vote is used for this purpose.
"""
def __init__(self, **kwargs):
if kwargs.get("trainer_config") is None:
kwargs["trainer_config"] = MajorityConfig(optimizer=SGD, lr=0.001)
super().__init__(**kwargs)
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
"""
This function gets final labels with a majority vote approach and trains the provided model.
"""
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
# initialise optimizer
self.trainer_config.optimizer = self.initialise_optimizer()
self.model_input_x, noisy_y_train, self.rule_matches_z = input_to_majority_vote_input(
self.rule_matches_z,
self.mapping_rules_labels_t,
self.model_input_x,
use_probabilistic_labels=self.trainer_config.use_probabilistic_labels,
filter_non_labelled=self.trainer_config.filter_non_labelled,
probability_threshold=self.trainer_config.probability_threshold,
other_class_id=self.trainer_config.other_class_id,
multi_label=self.trainer_config.multi_label,
multi_label_threshold=self.trainer_config.multi_label_threshold
)
feature_label_dataset = input_labels_to_tensordataset(self.model_input_x, noisy_y_train)
feature_label_dataloader = self._make_dataloader(feature_label_dataset)
self._train_loop(feature_label_dataloader)
| 2,436 | 37.68254 | 100 | py |
knodle-develop | knodle-develop/knodle/trainer/cleanlab/cleanlab.py | import logging
import numpy as np
from cleanlab.classification import LearningWithNoisyLabels
from skorch import NeuralNetClassifier
from torch.utils.data import TensorDataset
from knodle.trainer import MajorityVoteTrainer
from knodle.trainer.auto_trainer import AutoTrainer
from knodle.trainer.cleanlab.config import CleanLabConfig
from knodle.trainer.cleanlab.latent_estimation import estimate_cv_predicted_probabilities_split_by_rules, \
estimate_cv_predicted_probabilities_split_by_signatures
from knodle.transformation.majority import input_to_majority_vote_input
from knodle.transformation.torch_input import dataset_to_numpy_input
logger = logging.getLogger(__name__)
@AutoTrainer.register('cleanlab')
class CleanLabTrainer(MajorityVoteTrainer):
def __init__(self, **kwargs):
if kwargs.get("trainer_config", None) is None:
kwargs["trainer_config"] = CleanLabConfig()
super().__init__(**kwargs)
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
) -> None:
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
if dev_model_input_x is not None and dev_gold_labels_y is not None:
logger.info("Validation data is not used during Cleanlab training")
# since CL accepts only sklearn.classifier compliant class, we wraps the PyTorch model
self.model = NeuralNetClassifier(
self.model,
criterion=self.trainer_config.criterion,
optimizer=self.trainer_config.optimizer,
lr=self.trainer_config.lr,
max_epochs=self.trainer_config.epochs,
batch_size=self.trainer_config.batch_size,
train_split=None,
callbacks="disable",
device=self.trainer_config.device
)
# calculate labels based on t and z; perform additional filtering if applicable
self.model_input_x, noisy_y_train, self.rule_matches_z = input_to_majority_vote_input(
self.rule_matches_z, self.mapping_rules_labels_t, self.model_input_x,
use_probabilistic_labels=self.trainer_config.use_probabilistic_labels,
filter_non_labelled=self.trainer_config.filter_non_labelled,
other_class_id=self.trainer_config.other_class_id,
multi_label=self.trainer_config.multi_label,
multi_label_threshold=self.trainer_config.multi_label_threshold
)
# turn input to the CL-compatible format
model_input_x_numpy = dataset_to_numpy_input(self.model_input_x)
# calculate psx in advance with splitting by rules
if self.trainer_config.psx_calculation_method == "rules":
psx = estimate_cv_predicted_probabilities_split_by_rules(
self.model_input_x, noisy_y_train, self.rule_matches_z, self.model, self.trainer_config.output_classes,
seed=self.trainer_config.seed, cv_n_folds=self.trainer_config.cv_n_folds
)
elif self.trainer_config.psx_calculation_method == "signatures":
psx = estimate_cv_predicted_probabilities_split_by_signatures(
self.model_input_x, noisy_y_train, self.rule_matches_z, self.model, self.trainer_config.output_classes,
seed=self.trainer_config.seed, cv_n_folds=self.trainer_config.cv_n_folds
)
elif self.trainer_config.psx_calculation_method == "random":
# if no psx calculation method is specified, psx will be calculated in CL with random folder splitting
psx = None
else:
raise ValueError("Unknown psx calculation method.")
# CL denoising and training
rp = LearningWithNoisyLabels(
clf=self.model, seed=self.trainer_config.seed,
cv_n_folds=self.trainer_config.cv_n_folds,
prune_method=self.trainer_config.prune_method,
converge_latent_estimates=self.trainer_config.converge_latent_estimates,
pulearning=self.trainer_config.pulearning,
n_jobs=self.trainer_config.n_jobs
)
_ = rp.fit(model_input_x_numpy, noisy_y_train, psx=psx)
logging.info("Training is done.")
| 4,376 | 44.59375 | 119 | py |
knodle-develop | knodle-develop/knodle/trainer/wscrossweigh/wscrossweigh_weights_calculator.py | import copy
import logging
import os
import torch
from torch.utils.data import DataLoader
from joblib import dump
from knodle.trainer.baseline.majority import MajorityVoteTrainer
from knodle.trainer.utils import log_section
from knodle.trainer.wscrossweigh.data_splitting_by_rules import k_folds_splitting_by_rules
from knodle.transformation.filter import filter_empty_probabilities
from knodle.transformation.majority import z_t_matrices_to_majority_vote_probs
logger = logging.getLogger(__name__)
torch.set_printoptions(edgeitems=100)
class WSCrossWeighWeightsCalculator(MajorityVoteTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# save the copy of the original model; later wscrossweigh models for each training with a new hold-out fold
# will be copied from it
self.wscrossweigh_model = copy.deepcopy(self.model).to(self.trainer_config.device)
self.sample_weights = torch.empty(0)
def calculate_weights(self) -> torch.FloatTensor:
"""
This function calculates the sample_weights for samples using WSCrossWeigh method
:return matrix of the sample sample_weights
"""
# initialize optimizer
self.trainer_config.optimizer = self.initialise_optimizer()
if self.trainer_config.folds < 2:
raise ValueError("Number of folds should be at least 2 to perform WSCrossWeigh denoising")
logger.info("======= Denoising with WSCrossWeigh is started =======")
os.makedirs(self.trainer_config.caching_folder, exist_ok=True)
noisy_y_train = z_t_matrices_to_majority_vote_probs(
self.rule_matches_z, self.mapping_rules_labels_t, self.trainer_config.other_class_id
)
if self.trainer_config.filter_non_labelled:
self.model_input_x, noisy_y_train, self.rule_matches_z = filter_empty_probabilities(
self.model_input_x, noisy_y_train, self.rule_matches_z
)
# initialise sample weights
self.sample_weights = self.initialise_sample_weights()
train_datasets, test_datasets = \
k_folds_splitting_by_rules(
self.model_input_x,
noisy_y_train,
self.rule_matches_z,
self.trainer_config.partitions,
self.trainer_config.folds,
self.trainer_config.other_class_id
)
for iter, (train_dataset, test_dataset) in enumerate(zip(train_datasets, test_datasets)):
log_section(
f"WSCrossWeigh Iteration {iter + 1}/{self.trainer_config.partitions * self.trainer_config.folds}:",
logger
)
# for each fold the model is trained from scratch
self.model = copy.deepcopy(self.wscrossweigh_model).to(self.trainer_config.device)
test_loader = self._make_dataloader(test_dataset)
train_loader = self._make_dataloader(train_dataset)
self._train_loop(train_loader)
self.cw_test(test_loader)
log_section(f"WSCrossWeigh Partition {iter + 1} is done", logger)
dump(self.sample_weights, os.path.join(
self.trainer_config.caching_folder, f"sample_weights_{self.trainer_config.caching_suffix}.lib"))
logger.info("======= Denoising with WSCrossWeigh is completed =======")
return self.sample_weights
def cw_test(self, test_loader: DataLoader) -> None:
"""
This function tests of trained WSCrossWeigh model on a hold-out fold, compared the predicted labels with the
ones got with weak supervision and reduces sample_weights of disagreed samples
:param test_loader: loader with the data which is used for testing (hold-out fold)
"""
self.wscrossweigh_model.eval()
correct_predictions, wrong_predictions = 0, 0
with torch.no_grad():
for batch in test_loader:
features, labels = self._load_batch(batch)
data_features, data_indices = features[:-1], features[-1]
outputs = self.wscrossweigh_model(*data_features)
outputs = outputs[0] if not isinstance(outputs, torch.Tensor) else outputs
_, predicted = torch.max(outputs.data, -1)
predictions = predicted.tolist()
for curr_pred in range(len(predictions)):
gold = labels.tolist()[curr_pred]
gold_classes = gold.index(max(gold))
guess = predictions[curr_pred]
if guess != gold_classes: # todo: what if more than one class could be predicted? e.g. conll
wrong_predictions += 1
curr_id = data_indices[curr_pred].tolist()
self.sample_weights[curr_id] *= self.trainer_config.weight_reducing_rate
else:
correct_predictions += 1
logger.info("Correct predictions: {:.3f}%, wrong predictions: {:.3f}%".format(
correct_predictions * 100 / (correct_predictions + wrong_predictions),
wrong_predictions * 100 / (correct_predictions + wrong_predictions)))
def initialise_sample_weights(self) -> torch.FloatTensor:
""" Initialise a sample_weights matrix (num_samples x 1): weights for all samples equal sample_start_weights """
return torch.FloatTensor([self.trainer_config.samples_start_weights] * self.model_input_x.tensors[0].shape[0])
| 5,533 | 43.99187 | 120 | py |
knodle-develop | knodle-develop/knodle/trainer/wscrossweigh/utils.py | import logging
import random
from typing import Dict
import numpy as np
import torch
from torch.utils.data import TensorDataset
logger = logging.getLogger(__name__)
def get_labels_randomly(
rule_matches_z: np.ndarray, rule_assignments_t: np.ndarray
) -> np.ndarray:
""" Calculates sample labels basing on z and t matrices. If several patterns matched, select one randomly """
if rule_matches_z.shape[1] != rule_assignments_t.shape[0]:
raise ValueError("Dimensions mismatch!")
one_hot_labels = rule_matches_z.dot(rule_assignments_t)
one_hot_labels[one_hot_labels > 0] = 1
labels = [np.random.choice(np.where(r == 1)[0], 1)[0] for r in one_hot_labels]
return np.stack(labels, axis=0)
def vocab_and_vectors(filename: str, special_tokens: list) -> (dict, dict, np.ndarray):
"""special tokens have all-zero word vectors"""
with open(filename, encoding="UTF-8") as in_file:
parts = in_file.readline().strip().split(" ")
num_vecs = int(parts[0]) + len(special_tokens) # + 1
dim = int(parts[1])
matrix = np.zeros((num_vecs, dim))
word_to_id = dict()
nextword_id = 0
for token in special_tokens:
word_to_id[token] = nextword_id
nextword_id += 1
for line in in_file:
parts = line.strip().split(" ")
word = parts[0]
if word not in word_to_id:
emb = [float(v) for v in parts[1:]]
matrix[nextword_id] = emb
word_to_id[word] = nextword_id
nextword_id += 1
return word_to_id, matrix
def get_embedding_matrix(pretrained_embedding_file: str) -> np.ndarray:
""" Return matrix with pretrained glove embeddings"""
with open(pretrained_embedding_file, encoding="UTF-8") as in_file:
emb_matrix_size = in_file.readline().strip().split(" ")
embeddings = []
for line in in_file:
parts = line.strip().split(" ")
embeddings.append([float(v) for v in parts[1:]])
emb_matrix = np.array(embeddings)
assert emb_matrix.shape[0] == int(emb_matrix_size[0]) and emb_matrix.shape[
1
] == int(emb_matrix_size[1])
return emb_matrix
def set_device(enable_cuda: bool):
""" Set where the calculations will be done (cpu or cuda) depending on whether the cuda is available and chosen """
if enable_cuda and torch.cuda.is_available():
logger.info("Using GPU")
return torch.device("cuda")
else:
logger.info("Using CPU")
return torch.device("cpu")
# deprecated
def check_splitting(
tst_samples: TensorDataset,
tst_labels: np.ndarray,
tst_idx: np.ndarray,
samples: torch.Tensor,
labels: np.ndarray,
) -> None:
""" Custom function to check that the splitting into train and test sets fro WSCrossWeigh was done correctly"""
rnd_tst = np.random.randint(0, tst_samples.tensors[0].shape[0]) # take some random index
tst_sample = tst_samples.tensors[0][rnd_tst, :]
tst_idx = tst_idx[rnd_tst]
tst_label = tst_labels[rnd_tst, :] if len(tst_labels.shape) > 1 else tst_labels[rnd_tst]
tst_label_true = labels[tst_idx, :] if len(labels.shape) > 1 else tst_labels[rnd_tst]
if not torch.equal(tst_sample, samples[tst_idx, :]):
raise RuntimeError(
"The splitting of original training set into cw train and test sets have been done "
"incorrectly! A sample does not correspond to one in original dataset"
)
if not np.array_equal(tst_label, tst_label_true):
raise RuntimeError(
"The splitting of original training set into cw train and test sets have been done "
"incorrectly! A sample label does not correspond to one in original dataset"
)
def return_unique(where_to_find: np.ndarray, what_to_find: np.ndarray) -> np.ndarray:
""" Checks intersections between the 1st and the 2nd arrays and return unique values of the 1st array """
intersections = np.intersect1d(where_to_find, what_to_find, return_indices=True)[1].tolist()
return np.delete(where_to_find, intersections)
| 4,180 | 36.666667 | 119 | py |
knodle-develop | knodle-develop/knodle/trainer/wscrossweigh/config.py | from torch.optim import Optimizer
from knodle.trainer.baseline.config import MajorityConfig
from knodle.trainer.auto_config import AutoConfig
@AutoConfig.register("wscrossweigh")
class WSCrossWeighConfig(MajorityConfig):
def __init__(
self,
partitions: int = 2,
folds: int = 10,
weight_reducing_rate: float = 0.5,
samples_start_weights: float = 2.0,
cw_epochs: int = None,
cw_batch_size: int = None,
cw_optimizer: Optimizer = None,
cw_lr: int = 0.1,
cw_filter_non_labelled: bool = None,
cw_other_class_id: int = None,
cw_grad_clipping: int = None,
cw_seed: int = None,
draw_plot: bool = False,
**kwargs
):
"""
A default configuration of WSCrossWeigh Trainer.
:param partitions: number of times the sample weights calculation procedure will be performed
(with different folds splitting)
:param folds: number of folds the samples will be splitted into in each sample weights calculation iteration
:param weight_reducing_rate: a value the sample weight is reduced by each time the sample is misclassified
:param samples_start_weights: a start weight of all samples
:param cw_epochs: number of epochs WSCrossWeigh models are to be trained
:param cw_batch_size: batch size for WSCrossWeigh models training
:param cw_optimizer: optimizer for WSCrossWeigh models training
:param cw_lr: learning rate for WSCrossWeigh models training
:param cw_filter_non_labelled: whether the samples where no rule matched are to be filtered out in WSCrossWeigh
:param cw_other_class_id: id of the negative class; if set, the samples with no rule matched will be assigned to
it in WSCrossWeigh
:param cw_grad_clipping: if set to True, gradient norm of an iterable of parameters will be clipped in WSCrossWeigh
:param cw_seed: the desired seed for generating random numbers in WSCrossWeigh
:param draw_plot: draw a plot of development data (accuracy & loss)
"""
super().__init__(**kwargs)
self.draw_plot = draw_plot
self.partitions = partitions
self.folds = folds
self.weight_reducing_rate = weight_reducing_rate
self.samples_start_weights = samples_start_weights
if cw_grad_clipping is None:
self.cw_grad_clipping = self.grad_clipping
else:
self.cw_grad_clipping = cw_grad_clipping
if cw_epochs is None:
self.cw_epochs = self.epochs
else:
self.cw_epochs = cw_epochs
if cw_batch_size is None:
self.cw_batch_size = self.batch_size
else:
self.cw_batch_size = cw_batch_size
if cw_optimizer is None:
self.cw_optimizer = self.optimizer
else:
self.cw_optimizer = cw_optimizer
if cw_filter_non_labelled is None and cw_other_class_id is None:
self.cw_filter_non_labelled = self.filter_non_labelled
self.cw_other_class_id = self.other_class_id
else:
self.cw_filter_non_labelled = cw_filter_non_labelled
self.cw_other_class_id = cw_other_class_id
self.cw_seed = cw_seed
self.cw_lr = cw_lr
| 3,386 | 39.807229 | 123 | py |
knodle-develop | knodle-develop/knodle/trainer/wscrossweigh/data_splitting_by_rules.py | import logging
import random
from typing import List, Dict, Union, Tuple
import scipy.sparse as sp
import numpy as np
import torch
from torch.utils.data import TensorDataset
from knodle.trainer.wscrossweigh.utils import return_unique
from knodle.transformation.torch_input import input_info_labels_to_tensordataset, input_labels_to_tensordataset
logger = logging.getLogger(__name__)
def k_folds_splitting_by_rules(
data_features: TensorDataset, labels: np.ndarray, rule_matches_z: np.ndarray, partitions: int, num_folds: int,
seed: int = None, other_class_id: int = None
) -> Tuple[List, List]:
"""
This function allows to perform the splitting of data instances into k folds according to the rules matched
in them. The splitting could be performed in several iterations (defined by "partition" parameter).
The logic is the following:
for each partition:
- the rules are shuffled
- the rules are splitted into k folds
- each fold iteratively becomes a hold-out fold
- the samples that matched the rules from the hold-out fold are added to the hold-out test set
- other samples are added to the train set
The train and test sets do not intersect. If a sample matched several rules and one of the rules is included
in the test set (and therefore the sample as well), this sample won't be included in the training set.
Correspondingly, such samples (where multiple rules matched) are included into several hold-out folds depending on
which of the relevant rules is selected to the hold-out fold in the current splitting.
:param data_features: encoded data samples
:param labels: array of labels (num_samples x 1)
:param rule_matches_z: matrix of rule matches (num_samples x num_rules)
:param partitions: number of partitions that are to be performed; in each partition the dataset will be splitted
into k folds
:param num_folds: number of folds the data instances are to be slitted into in each partition
:param seed: optionally, the seed could be fixed in order to provide reproducibility
:param other_class_id: if you don't want to include the negative samples (the ones that belong to the other class)
to the test set, but only to the training set, you can pass the id of other class
should be
:return: two lists; the first contains the training sets, the second contains the test (hold-out) sets.
"""
random.seed(seed) if seed is not None else random.choice(range(9999))
rule_id2samples_ids = get_rules_sample_ids(rule_matches_z)
return compose_train_n_test_datasets(
data_features, rule_id2samples_ids, labels, num_folds, partitions, other_class_id
)
def k_folds_splitting_by_signatures(
data_features: np.ndarray, labels: np.ndarray, rule_matches_z: np.ndarray, partitions: int, num_folds: int,
seed: int = None, other_class_id: int = None
) -> Tuple[List, List]:
"""
This function allows to perform the splitting of data instances into k folds according to the signatures.
The sample signature is composed from the rule matched in the sample. For example, if rules with ids 1, 5, 7 matched
in the sample, the sample signature is 1_5_7. Thus, the signature serves as sample identifier.
The logic is the following:
- the sample signatures are calculated
for each partition:
- the sample signatures are shuffled
- the sample signatures are splitted into k folds
- each fold iteratively becomes a hold-out fold
- the samples with signatures from the hold-out fold are added to the hold-out test set
- other samples are added to the train set
The train and test sets do not intersect.
:param data_features: encoded data samples (num_samples x num_features)
:param labels: array of labels (num_samples x 1)
:param rule_matches_z: matrix of rule matches (num_samples x num_rules)
:param partitions: number of partitions that are to be performed; in each partition the dataset will be splitted
into k folds
:param num_folds: number of folds the data instances are to be slitted into in each partition
:param seed: optionally, the seed could be fixed in order to provide reproducibility
:param other_class_id: if you don't want to include the negative samples (the ones that belong to the other class)
to the test set, but only to the training set, you can pass the id of other class should be
:return: two lists; the first contains the training sets, the second contains the test (hold-out) sets.
"""
random.seed(seed) if seed is not None else random.choice(range(9999))
signature2samples = get_signature_sample_ids(rule_matches_z)
return compose_train_n_test_datasets(
data_features, signature2samples, labels, num_folds, partitions, other_class_id
)
def get_rules_sample_ids(rule_matches_z: Union[np.ndarray, sp.csr_matrix]) -> Dict[str, List[int]]:
"""
This function creates a dictionary {rule id : sample id where this rule matched}. The dictionary is needed as a
support tool for faster calculation of train and test sets.
:param rule_matches_z: matrix of rule matches as a numpy or sparse matrix (num_samples x num_rules)
:return: dictionary {rule_id: [sample_id1, sample_id2, ...]}
"""
if isinstance(rule_matches_z, sp.csr_matrix):
rule2sample_id = {key: set() for key in range(rule_matches_z.shape[1])}
for row_id, col_id in zip(*rule_matches_z.nonzero()): # row_id = sample id, col_id = rule id
rule2sample_id[col_id].add(row_id)
else:
rule2sample_id = dict((i, set()) for i in range(0, rule_matches_z.shape[1]))
for row_id, row in enumerate(rule_matches_z):
rule_ids = np.where(row == 1)[0].tolist()
for rule_id in rule_ids:
rule2sample_id[rule_id].add(row_id)
return rule2sample_id
def get_signature_sample_ids(rule_matches_z: np.ndarray) -> Dict:
"""
This function calculates the signature for each sample (e.g. if rules with ids 1, 5, 7 matched in a sample, a sample
signature id is 1_5_7) and id for each signature.
:param rule_matches_z: matrix of rule matches (num_samples x num_rules)
:return: two dictionaries:
- {signature: signature id}
- {signature id: [sample_id1, sample_id2, ...]}
"""
signature2samples = {}
if isinstance(rule_matches_z, sp.csr_matrix):
samples_id_rules_dict = {key: [] for key in range(rule_matches_z.shape[0])}
for row_id, col_id in zip(*rule_matches_z.nonzero()):
samples_id_rules_dict[row_id].append(col_id)
else:
samples_id_rules_dict = dict((i, set()) for i in range(0, rule_matches_z.shape[0]))
for row_id, row in enumerate(rule_matches_z):
samples_id_rules_dict[row_id] = np.where(row == 1)[0].tolist()
for sample_id, rules in samples_id_rules_dict.items():
signature = "_".join(map(str, sorted(list(rules))))
signature2samples.setdefault(signature, []).append(sample_id)
return signature2samples
def compose_train_n_test_datasets(
data_features: TensorDataset, rule2samples: Dict, labels: np.ndarray, num_folds: int,
partitions: int, other_class_id: int = None
) -> Tuple[List, List]:
"""
This function creates train and test datasets for k-folds cross-validation.
:param data_features: encoded data samples
:param rule2samples: {rule: [sample_id1, sample_id2, ...]}. Rule in this context means anything basing on
which splitting is performed (matched rule, sample signature, ...).
:param labels: array of labels
:param num_folds: number of folds the data instances are to be slitted into in each partition
:param partitions: number of partitions that are to be performed
:param other_class_id: if you don't want to include the negative samples (the ones that belong to the other class)
to the test set, but only to the training set, you can pass the id of other class should be
:return: list of train sets and list of corresponding test (hold-out) sets
"""
# calculate ids of all samples that belong to the 'other_class'
other_sample_ids = np.where(labels[:, other_class_id] == 1)[0].tolist() if other_class_id else None
# make a list of rule ids to shuffle them later
rule_ids = list(rule2samples.keys())
train_datasets, test_datasets = [], []
for partition in range(partitions):
logger.info(f"Partition {partition + 1}/{partitions}:")
random.shuffle(rule_ids) # shuffle anew for each splitting
for fold_id in range(num_folds):
train_dataset, test_dataset = get_train_test_datasets_by_rule_indices(
data_features, rule_ids, rule2samples, labels, fold_id, num_folds, other_sample_ids
)
train_datasets.append(train_dataset)
test_datasets.append(test_dataset)
return train_datasets, test_datasets
def get_train_test_datasets_by_rule_indices(
data_features: TensorDataset, rules_ids: List[int], rule2samples: Dict, labels: np.ndarray, fold_id: int,
num_folds: int, other_sample_ids: List[int]
) -> Tuple[TensorDataset, TensorDataset]:
"""
This function returns train and test datasets for k-fold cross validation training. Each dataloader comprises
encoded samples, labels and sample indices in the original matrices.
:param data_features: encoded data samples
:param rules_ids: list of shuffled rules indices
:param rule2samples: dictionary that contains information about corresponding between rules ids and sample ids.
Rule in this context means anything basing on which splitting is performed (matched rule, sample signature, ...)
:param labels: labels of all training samples (num_samples, num_classes)
:param fold_id: number of a current hold-out fold
:param num_folds: the whole number of folds the data should be splitted into
:param other_sample_ids: a list of sample ids that belong to the other class. They won't be included in the test
set, but only to the training set.
should be
:return: dataloaders for cw training and testing
"""
train_rules, test_rules = calculate_rules_indices(rules_ids, fold_id, num_folds)
# select train and test samples and labels according to the selected rules idx
test_samples, test_labels, test_idx = get_samples_labels_idx_by_rule_id(
data_features, labels, test_rules, rule2samples, check_intersections=None,
other_sample_ids=other_sample_ids
)
train_samples, train_labels, _ = get_samples_labels_idx_by_rule_id(
data_features, labels, train_rules, rule2samples, check_intersections=test_idx,
other_sample_ids=other_sample_ids
)
train_dataset = input_labels_to_tensordataset(train_samples, train_labels)
test_dataset = input_info_labels_to_tensordataset(test_samples, test_idx, test_labels)
logger.info(
f"Fold {fold_id} Rules in training set: {len(train_rules)}, rules in test set: {len(test_rules)}, "
f"samples in training set: {len(train_samples)}, samples in test set: {len(test_samples)}"
)
return train_dataset, test_dataset
def calculate_rules_indices(rules_idx: list, fold_id: int, num_folds: int) -> Tuple[list, list]:
"""
Calculates the indices of the samples which are to be included in training and test sets for k-fold cross validation
:param rules_idx: all rules indices (shuffled) that are to be splitted into cw training & cw test set rules
:param fold_id: number of a current hold-out fold
:param num_folds: the whole number of folds the data should be splitted into
:return: two arrays containing indices of rules that will be used for cw training and cw test set accordingly
"""
test_rules_idx = rules_idx[fold_id::num_folds]
train_rules_idx = [rule_id for rule_id in rules_idx if rule_id not in test_rules_idx]
if not set(test_rules_idx).isdisjoint(set(train_rules_idx)):
raise ValueError("Splitting into train and test rules is done incorrectly.")
return train_rules_idx, test_rules_idx
def get_samples_labels_idx_by_rule_id(
data_features: TensorDataset, labels: np.ndarray, indices: list, rule2samples: Dict,
check_intersections: np.ndarray = None, other_sample_ids: list = None
) -> Tuple[TensorDataset, np.ndarray, np.ndarray]:
"""
Extracts the samples and labels from the original matrices by indices. If intersection is filled with
another sample matrix, it also checks whether the sample is not in this other matrix yet.
:param data_features: encoded data samples
:param labels: all training samples labels (num_samples, num_classes)
:param indices: indices of rules; samples, where these rules matched & their labels are to be included in set
:param rule2samples: dictionary that contains information about corresponding from rules to sample ids.
:param check_intersections: optional parameter that indicates that intersections should be checked (used to
exclude the sentences from the training set which are already in the test set)
:param other_sample_ids: a list of sample ids that belong to the other class. They won't be included in the test
set, but only to the training set.
:return: samples, labels and indices in the original matrix
"""
sample_ids = [list(rule2samples.get(idx)) for idx in indices]
sample_ids = list(set([value for sublist in sample_ids for value in sublist]))
if other_sample_ids is not None:
sample_ids = list(set(sample_ids).union(set(other_sample_ids)))
if check_intersections is not None:
sample_ids = return_unique(np.array(sample_ids), check_intersections)
# samples_dataset = TensorDataset(torch.Tensor(data_features.tensors[0][sample_ids]))
samples_dataset = TensorDataset(*[inp[sample_ids] for inp in data_features.tensors])
samples_labels = np.array(labels[sample_ids])
samples_idx = np.array(sample_ids)
return samples_dataset, samples_labels, samples_idx
| 14,185 | 49.483986 | 120 | py |
knodle-develop | knodle-develop/knodle/trainer/wscrossweigh/wscrossweigh.py | import logging
import os
from copy import copy
import numpy as np
import torch
from joblib import load
from torch.nn import Module
from torch.optim import SGD
from torch.utils.data import TensorDataset
from knodle.trainer.auto_trainer import AutoTrainer
from knodle.trainer.baseline.majority import MajorityVoteTrainer
from knodle.trainer.wscrossweigh.config import WSCrossWeighConfig
from knodle.trainer.wscrossweigh.wscrossweigh_weights_calculator import WSCrossWeighWeightsCalculator
from knodle.transformation.filter import filter_empty_probabilities
from knodle.transformation.majority import z_t_matrices_to_majority_vote_probs
from knodle.transformation.torch_input import input_info_labels_to_tensordataset
torch.set_printoptions(edgeitems=100)
logger = logging.getLogger(__name__)
logging.getLogger('matplotlib.font_manager').disabled = True
@AutoTrainer.register('wscrossweigh')
class WSCrossWeighTrainer(MajorityVoteTrainer):
def __init__(
self,
cw_model: Module = None,
cw_model_input_x: TensorDataset = None,
cw_rule_matches_z: np.ndarray = None,
run_classifier: bool = True, # set to False if you want only the calculation of the sample weights
use_weights: bool = True, # set to False if you want to use weights = 1 (baseline)
**kwargs
):
self.cw_model = cw_model if cw_model else kwargs.get("model")
self.cw_model_input_x = cw_model_input_x if cw_model_input_x else kwargs.get("model_input_x")
self.cw_rule_matches_z = cw_rule_matches_z if cw_rule_matches_z else kwargs.get("rule_matches_z")
if kwargs.get("trainer_config") is None:
kwargs["trainer_config"] = WSCrossWeighConfig(
optimizer=SGD,
cw_optimizer=SGD,
lr=0.001,
cw_lr=0.001,
)
super().__init__(**kwargs)
self.run_classifier = run_classifier
self.use_weights = use_weights
logger.info("CrossWeigh Config is used: {}".format(self.trainer_config.__dict__))
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
""" This function sample_weights the samples with WSCrossWeigh method and train the model """
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
# initialise optimizer
self.trainer_config.optimizer = self.initialise_optimizer()
train_labels = self.calculate_labels()
sample_weights = self._get_sample_weights() if self.use_weights \
else torch.FloatTensor([1] * len(self.model_input_x))
if not self.run_classifier:
logger.info("No classifier is to be trained")
return
logger.info("Classifier training is started")
train_loader = self._make_dataloader(
input_info_labels_to_tensordataset(self.model_input_x, sample_weights.cpu().detach().numpy(), train_labels)
)
self._train_loop(train_loader, use_sample_weights=True, draw_plot=self.trainer_config.draw_plot)
def calculate_labels(self) -> np.ndarray:
""" This function calculates label probabilities and filter out non labelled samples, when needed """
train_labels = z_t_matrices_to_majority_vote_probs(
self.rule_matches_z, self.mapping_rules_labels_t, self.trainer_config.other_class_id
)
if self.trainer_config.filter_non_labelled:
self.model_input_x, train_labels, self.rule_matches_z = filter_empty_probabilities(
self.model_input_x, train_labels, self.rule_matches_z
)
if train_labels.shape[1] != self.trainer_config.output_classes:
raise ValueError(
f"The number of output classes {self.trainer_config.output_classes} do not correspond to labels "
f"probabilities dimension {train_labels.shape[1]}"
)
return train_labels
def _get_sample_weights(self) -> torch.FloatTensor:
""" This function checks whether there are accessible already pretrained sample weights. If yes, return
them. If not, calculates sample weights calling method of WSCrossWeighWeightsCalculator class"""
if os.path.isfile(os.path.join(
self.trainer_config.caching_folder, f"sample_weights_{self.trainer_config.caching_suffix}.lib")
):
logger.info("Already pretrained samples sample_weights will be used.")
sample_weights = load(os.path.join(
self.trainer_config.caching_folder, f"sample_weights_{self.trainer_config.caching_suffix}.lib")
)
else:
logger.info("No pretrained sample weights are found, they will be calculated now")
sample_weights = WSCrossWeighWeightsCalculator(
model=self.cw_model,
mapping_rules_labels_t=self.mapping_rules_labels_t,
model_input_x=self.cw_model_input_x,
rule_matches_z=self.cw_rule_matches_z,
trainer_config=self.get_denoising_config(),
).calculate_weights()
logger.info(f"Sample weights are calculated and saved to {self.trainer_config.caching_folder} folder")
return sample_weights
def get_denoising_config(self):
""" Get a config for WSCrossWeigh sample weights calculation """
weights_calculation_config = copy(self.trainer_config)
weights_calculation_config.epochs = self.trainer_config.cw_epochs
weights_calculation_config.optimizer = self.trainer_config.cw_optimizer
weights_calculation_config.lr = self.trainer_config.cw_lr
weights_calculation_config.batch_size = self.trainer_config.cw_batch_size
weights_calculation_config.filter_non_labelled = self.trainer_config.cw_filter_non_labelled
weights_calculation_config.other_class_id = self.trainer_config.cw_other_class_id
weights_calculation_config.grad_clipping = self.trainer_config.cw_grad_clipping
weights_calculation_config.seed = self.trainer_config.cw_seed
weights_calculation_config.saved_models_dir = None
return weights_calculation_config
| 6,409 | 45.115108 | 119 | py |
knodle-develop | knodle-develop/knodle/trainer/knn_aggregation/knn.py | import os
import logging
import joblib
import numpy as np
from torch.optim import SGD
from torch.utils.data import TensorDataset
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from annoy import AnnoyIndex
from knodle.transformation.majority import input_to_majority_vote_input
from knodle.transformation.torch_input import input_labels_to_tensordataset
from knodle.trainer.auto_trainer import AutoTrainer
from knodle.trainer.baseline.majority import MajorityVoteTrainer
from knodle.trainer.knn_aggregation.config import KNNConfig
from knodle.trainer.utils.denoise import activate_neighbors
logger = logging.getLogger(__name__)
@AutoTrainer.register('knn')
class KNNAggregationTrainer(MajorityVoteTrainer):
def __init__(
self,
knn_feature_matrix: np.ndarray = None,
**kwargs
):
if kwargs.get("trainer_config") is None:
kwargs["trainer_config"] = KNNConfig(optimizer=SGD, lr=0.001)
super().__init__(**kwargs)
if knn_feature_matrix is None:
self.knn_feature_matrix = csr_matrix(self.model_input_x.tensors[0].numpy())
else:
self.knn_feature_matrix = knn_feature_matrix
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
# initialise optimizer
self.trainer_config.optimizer = self.initialise_optimizer()
self.rule_matches_z = self.rule_matches_z.astype(np.int8)
self.mapping_rules_labels_t = self.mapping_rules_labels_t.astype(np.int8)
self._knn_denoise_rule_matches()
self.model_input_x, noisy_input_y, self.rule_matches_z = input_to_majority_vote_input(
self.rule_matches_z, self.mapping_rules_labels_t.astype(np.int64), self.model_input_x,
use_probabilistic_labels=self.trainer_config.use_probabilistic_labels,
filter_non_labelled=self.trainer_config.filter_non_labelled,
probability_threshold=self.trainer_config.probability_threshold,
other_class_id=self.trainer_config.other_class_id,
multi_label=self.trainer_config.multi_label,
multi_label_threshold=self.trainer_config.multi_label_threshold
)
feature_label_dataset = input_labels_to_tensordataset(self.model_input_x, noisy_input_y)
feature_label_dataloader = self._make_dataloader(feature_label_dataset)
self._train_loop(feature_label_dataloader)
def _knn_denoise_rule_matches(self) -> np.ndarray:
"""
Denoises the applied weak supervision source.
Args:
rule_matches_z: Matrix with all applied weak supervision sources. Shape: (Instances x Rules)
Returns: Denoised / Improved applied labeling function matrix. Shape: (Instances x Rules)
"""
k = self.trainer_config.k
if k == 1:
return self.rule_matches_z
# load cached data, if available
if self.trainer_config.caching_folder:
cache_file = self.trainer_config.get_cache_file()
if os.path.isfile(cache_file):
logger.info(f"Loaded knn matrix from cache: {cache_file}")
return joblib.load(cache_file)
logger.info(f"Start denoising labeling functions with k: {k}.")
# ignore zero-match rows for knn construction & activation
if self.trainer_config.activate_no_match_instances:
ignore = np.zeros((self.knn_feature_matrix.shape[0],), dtype=np.bool)
else:
ignore = self.rule_matches_z.sum(-1) == 0
# Set up data structure, to quickly find nearest neighbors
if self.trainer_config.use_approximation:
# use annoy fast ANN
if k is not None:
knn_matrix_shape = self.knn_feature_matrix.shape
logger.info("Creating annoy index...")
t = AnnoyIndex(knn_matrix_shape[1], 'dot')
for i, v in enumerate(self.knn_feature_matrix):
if not ignore[i]:
t.add_item(i, v)
t.build(10, n_jobs=self.trainer_config.n_jobs)
self.knn_feature_matrix = None
logger.info("Retrieving neighbor indices...")
indices = ( # make a generator: no memory is allocated at this moment
np.array(t.get_nns_by_item(i, k, search_k=-1, include_distances=False))
if not ignore[i] else np.array([])
for i in range(knn_matrix_shape[0])
)
else:
# possible radius implementation; delete error in config then
pass
else:
# use standard precise kNN
if k is not None:
logger.info("Creating NN index...")
neighbors = NearestNeighbors(n_neighbors=k, n_jobs=self.trainer_config.n_jobs)\
.fit(self.knn_feature_matrix)
logger.info("Retrieving neighbor indices...")
indices = neighbors.kneighbors(self.knn_feature_matrix, n_neighbors=k, return_distance=False)
else:
logger.info("Creating NN index...")
neighbors = NearestNeighbors(radius=self.trainer_config.radius, n_jobs=self.trainer_config.n_jobs)\
.fit(self.knn_feature_matrix)
logger.info("Retrieving neighbor indices...")
indices = neighbors.radius_neighbors(self.knn_feature_matrix, return_distance=False)
# activate matches.
logger.info("Activating neighbors...")
self.rule_matches_z = activate_neighbors(self.rule_matches_z, indices)
# save data for caching
if self.trainer_config.caching_folder:
os.makedirs(self.trainer_config.caching_folder, exist_ok=True)
joblib.dump(self.rule_matches_z, cache_file)
return self.rule_matches_z
def print_step_update(self, step: int, max_steps: int):
if step % 40 == 0 and not step == 0:
logger.info(f" Batch {step} of {max_steps}.")
| 6,376 | 40.953947 | 115 | py |
knodle-develop | knodle-develop/knodle/trainer/utils/utils.py | import random
import logging
import torch
from torch import Tensor, argmax
from torch.utils.data import TensorDataset
import numpy as np
import matplotlib.pyplot as plt
def log_section(text: str, logger: logging, additional_info: {} = None) -> None:
"""
Prints a section
Args:
text: Text to print
logger: Logger object
Returns:
"""
logger.info("======================================")
logger.info(text)
if additional_info:
for key, value in additional_info.items():
logger.info("{}: {}".format(key, value))
logger.info("======================================")
def accuracy_of_probs(predictions: Tensor, ground_truth: Tensor):
"""
Function to calculate the accuracy of two tensors with probabilities attached.
Args:
predictions: Predictions, shape: instances x labels
ground_truth: Ground truth, shape one of (instances x 1) or (instances x labels)
Returns: Accuracy
"""
ground_truth = (
ground_truth if len(ground_truth.shape) == 1 else argmax(ground_truth, dim=-1)
)
y_pred_max = argmax(predictions, dim=-1)
correct_pred = (y_pred_max == ground_truth).float()
acc = correct_pred.sum() / len(correct_pred)
return acc
def extract_tensor_from_dataset(dataset: TensorDataset, tensor_index: int) -> Tensor:
"""
Extracts a tensor from a dataset.
Args:
dataset: Dataset to extract tensor from
tensor_index: Which tensor to extract
Returns: Tensor
"""
return dataset.tensors[tensor_index]
def check_and_return_device() -> torch.device:
"""
Function to check if a GPU is available and sets the device for pytorch.
If a GPU is available -> The device is GPU, if not the device is CPU
"""
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
return device
def set_seed(seed: int) -> None:
""" Fix seed for all shuffle processes in order to get the reproducible result """
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
| 2,163 | 26.392405 | 88 | py |
knodle-develop | knodle-develop/knodle/trainer/snorkel/snorkel.py | from typing import Tuple
import numpy as np
from snorkel.labeling.model import LabelModel
from torch.optim import SGD
from torch.utils.data import TensorDataset
from knodle.transformation.torch_input import input_labels_to_tensordataset
from knodle.trainer.auto_trainer import AutoTrainer
from knodle.trainer.baseline.majority import MajorityVoteTrainer
from knodle.trainer.knn_aggregation.knn import KNNAggregationTrainer
from knodle.trainer.snorkel.config import SnorkelConfig, SnorkelKNNConfig
from knodle.trainer.snorkel.utils import (
z_t_matrix_to_snorkel_matrix,
prepare_empty_rule_matches,
add_labels_for_empty_examples
)
from knodle.transformation.filter import filter_tensor_dataset_by_indices
@AutoTrainer.register('snorkel')
class SnorkelTrainer(MajorityVoteTrainer):
"""Provides a wrapper around the Snorkel system. See https://github.com/snorkel-team/snorkel for more details.
Formally, a generative model P(Y, Y') is learned, with Y' = Z * T, followed by a discriminative model specified
by the user.
"""
def __init__(self, **kwargs):
if kwargs.get("trainer_config", None) is None:
kwargs["trainer_config"] = SnorkelConfig(optimizer=SGD, lr=0.001)
super().__init__(**kwargs)
def _snorkel_denoising(
self, model_input_x: TensorDataset, rule_matches_z: np.ndarray
) -> Tuple[TensorDataset, np.ndarray]:
"""
Trains the generative model.
Premise:
Snorkel can not make use of rule-unlabeled examples (no rule matches).
The generative LabelModel assigns such examples a uniform distribution over all available labels,
which contradicts the desired behaviour. Such examples should be either filtered or assigned an
"other class id".
Filtering / other class strategy:
filter_non_labelled = True:
Drop the unlabeled examples completely prior to LabelModel.
filter_non_labelled = False:
However, we might want to keep negative examples in the training data, but we should not pass them
through the LabelModel. Therefore, the rule-unlabeled part of the data skips the LabelModel step
and is added directly to the output data with manually assigned "other class id".
Args:
model_input_x: feature input to the classifier
rule_matches_z: input rule matches
Returns:
eventually filtered model input,
corresponding probability distributions over labels generated by Snorkel. Shape: (#Instances x #Labels)
"""
# initialise optimizer
self.trainer_config.optimizer = self.initialise_optimizer()
# create Snorkel matrix
non_empty_mask, rule_matches_z = prepare_empty_rule_matches(rule_matches_z)
L_train = z_t_matrix_to_snorkel_matrix(rule_matches_z, self.mapping_rules_labels_t)
# train LabelModel
label_model = LabelModel(cardinality=self.mapping_rules_labels_t.shape[1], verbose=True)
fitting_kwargs = {}
if self.trainer_config.seed is not None:
fitting_kwargs["seed"] = self.trainer_config.seed
label_model.fit(
L_train,
n_epochs=self.trainer_config.label_model_num_epochs,
log_freq=self.trainer_config.label_model_log_freq,
**fitting_kwargs
)
label_probs_gen = label_model.predict_proba(L_train)
if self.trainer_config.filter_non_labelled:
# filter out respective input irrevocably from all data
model_input_x = filter_tensor_dataset_by_indices(dataset=model_input_x, filter_ids=non_empty_mask)
label_probs = label_probs_gen
else:
# add "other class" labels for empty examples in model input x, that were not given to Snorkel
label_probs = add_labels_for_empty_examples(
label_probs_gen=label_probs_gen, non_zero_mask=non_empty_mask,
output_classes=self.trainer_config.output_classes,
other_class_id=self.trainer_config.other_class_id)
return model_input_x, label_probs
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
model_input_x, label_probs = self._snorkel_denoising(self.model_input_x, self.rule_matches_z)
# Standard training
feature_label_dataset = input_labels_to_tensordataset(model_input_x, label_probs)
feature_label_dataloader = self._make_dataloader(feature_label_dataset)
self._train_loop(feature_label_dataloader)
@AutoTrainer.register('snorkel_knn')
class SnorkelKNNAggregationTrainer(SnorkelTrainer, KNNAggregationTrainer):
"""Calls k-NN denoising, before the Snorkel generative and discriminative training is started.
"""
def __init__(self, **kwargs):
if kwargs.get("trainer_config", None) is None:
kwargs["trainer_config"] = SnorkelKNNConfig(optimizer=SGD, lr=0.001)
super().__init__(**kwargs)
def train(
self,
model_input_x: TensorDataset = None, rule_matches_z: np.ndarray = None,
dev_model_input_x: TensorDataset = None, dev_gold_labels_y: TensorDataset = None
):
self._load_train_params(model_input_x, rule_matches_z, dev_model_input_x, dev_gold_labels_y)
self._apply_rule_reduction()
# Snorkel denoising
denoised_rule_matches_z = self._knn_denoise_rule_matches()
model_input_x, label_probs = self._snorkel_denoising(self.model_input_x, denoised_rule_matches_z)
# Standard training
feature_label_dataset = input_labels_to_tensordataset(model_input_x, label_probs)
feature_label_dataloader = self._make_dataloader(feature_label_dataset)
self._train_loop(feature_label_dataloader)
| 6,133 | 43.129496 | 115 | py |
knodle-develop | knodle-develop/knodle/model/logistic_regression_model.py | import torch
from torch import nn
class LogisticRegressionModel(nn.Module):
def __init__(self, input_dim: int, output_classes: int):
super(LogisticRegressionModel, self).__init__()
self.linear = torch.nn.Linear(input_dim, output_classes)
def forward(self, x):
x = x.float()
outputs = self.linear(x)
return outputs
| 365 | 25.142857 | 64 | py |
knodle-develop | knodle-develop/knodle/model/logisitc_regression_with_emb_layer.py | import torch
from torch import nn
import numpy as np
class LogisticRegressionModel(nn.Module):
def __init__(
self,
input_size: int,
word_input_dim: int,
word_output_dim: int,
word_embedding_matrix: np.ndarray,
output_classes: int,
):
super(LogisticRegressionModel, self).__init__()
self.word_embedding = nn.Embedding(
word_input_dim, word_output_dim, padding_idx=0
)
self.word_embedding.weight = nn.Parameter(
torch.tensor(word_embedding_matrix, dtype=torch.float32)
)
self.word_embedding.weight.requires_grad = False
# self.td_dense = nn.Linear(input_size * word_output_dim, size_factor)
self.linear = nn.Linear(input_size * word_output_dim, output_classes)
def forward(self, x):
word_embeddings = self.word_embedding(x)
word_embeddings = word_embeddings.view(x.shape[0], -1)
outputs = self.linear(word_embeddings)
return outputs
| 1,040 | 30.545455 | 78 | py |
knodle-develop | knodle-develop/knodle/model/bidirectional_lstm_model.py | import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
class BidirectionalLSTM(nn.Module):
def __init__(
self,
word_input_dim,
word_output_dim,
word_embedding_matrix,
num_classes,
size_factor=200,
):
super(BidirectionalLSTM, self).__init__()
self.word_input_dim = word_input_dim
self.word_output_dim = word_output_dim
self.word_embedding_matrix = word_embedding_matrix
self.size_factor = size_factor
self.num_classes = num_classes
self.word_embedding = nn.Embedding(
word_input_dim, word_output_dim, padding_idx=0
)
self.word_embedding.weight = nn.Parameter(
torch.tensor(word_embedding_matrix, dtype=torch.float32)
)
self.word_embedding.weight.requires_grad = False
self.type_linear = nn.Linear(20, size_factor * 2)
self.td_dense = nn.Linear(word_output_dim, size_factor)
self.biLSTM = nn.LSTM(
size_factor, size_factor, bidirectional=True, batch_first=True
)
self.predict = nn.Linear(size_factor * 2, num_classes)
self.init_weights()
def forward(self, x):
word_embeddings = self.word_embedding(x)
td_dense = self.td_dense(word_embeddings)
biLSTM, (h_n, c_n) = self.biLSTM(td_dense)
self.biLSTM.flatten_parameters()
final_state = h_n.view(1, 2, x.shape[0], self.size_factor)[-1]
h_1, h_2 = final_state[0], final_state[1] # forward & backward pass
concat = torch.cat((h_1, h_2), 1) # concatenate both states
final = self.predict(concat)
return final
def init_weights(self):
"""
Here we reproduce Keras default initialization weights to initialize Embeddings/LSTM weights
"""
torch.manual_seed(12345)
ih = (
param.data for name, param in self.named_parameters() if "weight_ih" in name
)
hh = (
param.data for name, param in self.named_parameters() if "weight_hh" in name
)
b = (param.data for name, param in self.named_parameters() if "bias" in name)
for t in ih:
nn.init.xavier_uniform_(t)
for t in hh:
nn.init.orthogonal_(t)
for t in b:
nn.init.constant_(t, 0)
| 2,448 | 31.653333 | 100 | py |
knodle-develop | knodle-develop/knodle/model/EarlyStopping/__init__.py | import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, name="checkpoint"):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.name = name
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
print(f"EarlyStopping counter: {self.counter} out of {self.patience}")
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
print(
f"Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ..."
)
torch.save(model.state_dict(), "models/" + self.name + ".pt")
self.val_loss_min = val_loss
| 1,854 | 34.673077 | 108 | py |
knodle-develop | knodle-develop/knodle/transformation/filter.py | from typing import Tuple, Union, List
import numpy as np
from torch.utils.data import TensorDataset
def filter_tensor_dataset_by_indices(dataset: TensorDataset, filter_ids: Union[np.ndarray, List[int]]) -> TensorDataset:
"""Filters each tensor of a TensorDataset, given some "filter_ids".
Args:
dataset: TensorDataset with a list of tensors, each having first dimension N
filter_ids: A list of K indices to be kept, K <= N
Returns: TensorDataset with filtered indices
"""
new_tensors = []
for i in range(len(dataset.tensors)):
new_tensors.append(dataset.tensors[i][filter_ids])
dataset_new = TensorDataset(*new_tensors)
return dataset_new
def filter_empty_probabilities(
input_data_x: TensorDataset, class_probas_y: np.ndarray, rule_matches_z: np.ndarray = None
) -> Union[Tuple[TensorDataset, np.ndarray, np.ndarray], Tuple[TensorDataset, np.ndarray]]:
"""Delete rows of TensorDataset's where the cumulative probability equals 0.
Args:
input_data_x: A TensorDataset serving as input to a model
class_probas_y: Array, holding class probabilities, shape=num_samples, num_classes
rule_matches_z: optional array with rules matched in samples. If given, will also be filtered
:return: Modified TensorDataset's
"""
if len(class_probas_y.shape) != 2:
raise ValueError("y_probs needs to be a matrix of dimensions num_samples x num_classes")
prob_sums = class_probas_y.sum(axis=-1)
non_zeros = np.where(prob_sums != 0)[0]
new_x = filter_tensor_dataset_by_indices(dataset=input_data_x, filter_ids=non_zeros)
if rule_matches_z is not None:
return new_x, class_probas_y[non_zeros], rule_matches_z[non_zeros]
return new_x, class_probas_y[non_zeros]
def filter_probability_threshold(
input_data_x: TensorDataset, class_probas_y: np.ndarray, rule_matches_z: np.ndarray = None,
probability_threshold: float = 0.7
) -> Union[Tuple[TensorDataset, np.ndarray, np.ndarray], Tuple[TensorDataset, np.ndarray]]:
"""Filters instances where no single class probability exceeds "probability_threshold".
"""
prob_sums = class_probas_y.max(axis=-1)
conclusive_idx = np.where(prob_sums >= probability_threshold)[0]
new_x = filter_tensor_dataset_by_indices(dataset=input_data_x, filter_ids=conclusive_idx)
if rule_matches_z is not None:
return new_x, class_probas_y[conclusive_idx], rule_matches_z[conclusive_idx]
return new_x, class_probas_y[conclusive_idx]
| 2,543 | 39.380952 | 120 | py |
knodle-develop | knodle-develop/knodle/transformation/majority.py | import logging
import random
import warnings
import numpy as np
import scipy.sparse as sp
from torch.utils.data import TensorDataset
from knodle.transformation.filter import filter_empty_probabilities, filter_probability_threshold
logger = logging.getLogger(__name__)
def probabilities_to_majority_vote(
probs: np.ndarray, choose_random_label: bool = True, other_class_id: int = None
) -> int:
"""Transforms a vector of probabilities to its majority vote. If there is one class with clear majority, return it.
If there are more than one class with equal probabilities: either select one of the classes randomly or assign to
the sample the other class id.
Args:
probs: Vector of probabilities for 1 sample. Shape: classes x 1
choose_random_label: Choose a random label, if there's no clear majority.
other_class_id: Class ID being used, if there's no clear majority
Returns: An array of classes.
"""
if choose_random_label and other_class_id is not None:
raise ValueError("You can either choose a random class, or transform undefined cases to an other class.")
row_max = np.max(probs)
num_occurrences = (row_max == probs).sum()
if num_occurrences == 1:
return int(np.argmax(probs))
elif choose_random_label:
max_ids = np.where(probs == row_max)[0]
return int(np.random.choice(max_ids))
elif other_class_id is not None:
return other_class_id
else:
raise ValueError("Specify a way how to resolve unclear majority votes.")
def z_t_matrices_to_majority_vote_probs(
rule_matches_z: np.ndarray,
mapping_rules_labels_t: np.ndarray,
other_class_id: int = None,
normalization: str = "softmax"
) -> np.ndarray:
"""
This function calculates a majority vote probability for all rule_matches_z. The difference from simple
get_majority_vote_probs function is the following: samples, where no rules matched (that is, all elements in
the corresponding raw in rule_matches_z matrix equal 0), are assigned to no_match_class (that is, a value in the
corresponding column in rule_counts_probs matrix is changed to 1).
Args:
rule_matches_z: Binary encoded array of which rules matched. Shape: instances x rules
mapping_rules_labels_t: Mapping of rules to labels, binary encoded. Shape: rules x classes
other_class_id: Class which is chosen, if no function is hitting.
normalization: The way how the vectors will be normalized. Currently, there are two supported normalizations:
- softmax
- sigmoid
Returns: Array with majority vote probabilities. Shape: instances x classes
"""
if rule_matches_z.shape[1] != mapping_rules_labels_t.shape[0]:
raise ValueError(f"Dimensions mismatch! Z matrix has shape {rule_matches_z.shape}, while "
f"T matrix has shape {mapping_rules_labels_t.shape}")
if isinstance(rule_matches_z, sp.csr_matrix):
rule_counts = rule_matches_z.dot(mapping_rules_labels_t)
if isinstance(rule_counts, sp.csr_matrix):
rule_counts = rule_counts.toarray()
else:
rule_counts = np.matmul(rule_matches_z, mapping_rules_labels_t)
if other_class_id:
if other_class_id < 0:
raise RuntimeError("Label for negative samples should be greater than 0 for correct matrix multiplication")
if other_class_id < mapping_rules_labels_t.shape[1] - 1:
warnings.warn(f"Negative class {other_class_id} is already present in data")
if rule_counts.shape[1] == other_class_id:
rule_counts = np.hstack((rule_counts, np.zeros([rule_counts.shape[0], 1])))
rule_counts[~rule_counts.any(axis=1), other_class_id] = 1
elif rule_counts.shape[1] >= other_class_id:
rule_counts[~rule_counts.any(axis=1), other_class_id] = 1
else:
raise ValueError("Other class id is incorrect")
if normalization == "softmax":
rule_counts_probs = rule_counts / rule_counts.sum(axis=1).reshape(-1, 1)
elif normalization == "sigmoid":
rule_counts_probs = 1 / (1 + np.exp(-rule_counts))
zeros = np.where(rule_counts == 0) # the values that were 0s (= no LF from this class matched) should remain 0s
rule_counts_probs[zeros] = rule_counts[zeros]
else:
raise ValueError(
"Unknown label probabilities normalization; currently softmax and sigmoid normalization are supported"
)
rule_counts_probs[np.isnan(rule_counts_probs)] = 0
return rule_counts_probs
def z_t_matrices_to_majority_vote_labels(
rule_matches_z: np.ndarray, mapping_rules_labels_t: np.ndarray, choose_random_label: bool = True,
other_class_id: int = None
) -> np.array:
"""Computes the majority labels. If no clear "winner" is found, other_class_id is used instead.
Args:
rule_matches_z: Binary encoded array of which rules matched. Shape: instances x rules
mapping_rules_labels_t: Mapping of rules to labels, binary encoded. Shape: rules x classes
choose_random_label: Whether a random label is chosen, if there's no clear majority vote.
other_class_id: the id of other class, i.e. the class of negative samples
Returns: Decision per sample. Shape: (instances, )
"""
rule_counts_probs = z_t_matrices_to_majority_vote_probs(rule_matches_z, mapping_rules_labels_t)
kwargs = {"choose_random_label": choose_random_label, "other_class_id": other_class_id}
majority_labels = np.apply_along_axis(probabilities_to_majority_vote, axis=1, arr=rule_counts_probs, **kwargs)
return majority_labels
def input_to_majority_vote_input(
rule_matches_z: np.ndarray,
mapping_rules_labels_t: np.ndarray,
model_input_x: TensorDataset,
use_probabilistic_labels: bool = True,
filter_non_labelled: bool = True,
probability_threshold: int = None,
other_class_id: int = None,
multi_label: bool = False,
multi_label_threshold: float = None
) -> np.ndarray:
"""
This function calculates noisy labels y_hat from Knodle Z and T matrices.
:param model_input_x:
:param rule_matches_z: binary encoded array of which rules matched. Shape: instances x rules
:param mapping_rules_labels_t: mapping of rules to labels, binary encoded. Shape: rules x classes
:param filter_non_labelled: boolean value, whether the no matched samples should be filtered out or not.
:param other_class_id: the id of other class, i.e. the class of no matched samples, if they are to be stored.
:param use_probabilistic_labels: boolean value, whether the output labels should be in form of probabilistic labels
or single values.
:param multi_label: boolean value, whether the classification is multi-label
:param multi_label_threshold: : a value for calculation the classes in case of multi-label classification: if a class has
a probability greater than the threshold, this class will be selected as a true one
:return:
"""
if multi_label:
logger.info("The multi-label scenario demands the sigmoid normalization")
normalization = "sigmoid"
else:
logger.info("The softmax normalization will be used")
normalization = "softmax"
if other_class_id is not None and filter_non_labelled:
raise ValueError("You can either filter samples with no weak labels or add them to the other class.")
noisy_y_train = z_t_matrices_to_majority_vote_probs(
rule_matches_z,
mapping_rules_labels_t,
other_class_id,
normalization=normalization
)
if filter_non_labelled and probability_threshold is not None:
raise ValueError("You can either filter all non labeled samples or those that have probabilities below "
"some threshold.")
# filter out samples where no pattern matched
if filter_non_labelled:
model_input_x, noisy_y_train, rule_matches_z = filter_empty_probabilities(
model_input_x, noisy_y_train, rule_matches_z
)
# filter out samples where that have probabilities below the threshold
elif probability_threshold is not None:
model_input_x, noisy_y_train = filter_probability_threshold(
model_input_x, noisy_y_train, probability_threshold=probability_threshold
)
if not use_probabilistic_labels:
# convert labels represented as a prob distribution to a single label using majority voting
if multi_label:
kwargs = {"choose_random_label": True, "other_class_id": other_class_id, "threshold": multi_label_threshold}
noisy_y_train = np.apply_along_axis(probabilities_to_binary_multi_labels, axis=1, arr=noisy_y_train, **kwargs)
else:
kwargs = {"choose_random_label": True, "other_class_id": other_class_id}
noisy_y_train = np.apply_along_axis(probabilities_to_majority_vote, axis=1, arr=noisy_y_train, **kwargs)
return model_input_x, noisy_y_train, rule_matches_z
def probabilities_to_binary_multi_labels(
probs: np.ndarray, choose_random_label: bool = True, other_class_id: int = None, threshold: float = 0.5
) -> np.ndarray:
"""
probs: Vector of probabilities for 1 sample. Shape: classes x 1
choose_random_label: Choose a random label, if there's no clear majority.
other_class_id: Class ID being used, if there's no clear majority
multi_label: boolean value, whether the classification is multi-label
threshold: a value for calculation the classes in case of multi-label classification: if a class has
a probability greater than the threshold, this class will be selected as a true one
"""
probs[probs >= threshold] = 1
probs[probs < threshold] = 0
if np.all((probs == 0)):
if choose_random_label:
probs[:, random.randrange(probs.shape[1])] = 1
elif other_class_id is not None:
probs[:, other_class_id] = 1
return probs
| 10,110 | 46.027907 | 125 | py |
knodle-develop | knodle-develop/knodle/transformation/torch_input.py | import numpy as np
import torch
from torch.utils.data import TensorDataset
def input_labels_to_tensordataset(model_input_x: TensorDataset, labels: np.ndarray) -> TensorDataset:
"""
This function takes Dataset with data features (num_samples x features dimension x features) and
labels (num_samples x labels dimension) and turns it into one Dataset
"""
model_tensors = model_input_x.tensors
input_label_dataset = TensorDataset(*model_tensors, torch.from_numpy(labels))
return input_label_dataset
def input_info_labels_to_tensordataset(
model_input_x: TensorDataset, input_info: np.ndarray, labels: np.ndarray
) -> TensorDataset:
"""
This function takes Dataset with data features (num_samples x features dimension x features),
labels (num_samples x labels dimension) and some additional information about data encoded as
a numpy array (num_samples x n; could be sample weights, sample indices etc) and turns it into one Dataset
"""
model_tensors = model_input_x.tensors
input_ids_label_dataset = TensorDataset(*model_tensors, torch.from_numpy(input_info), torch.from_numpy(labels))
return input_ids_label_dataset
def dataset_to_numpy_input(model_input_x: TensorDataset) -> np.ndarray:
if len(model_input_x.tensors) == 1:
return model_input_x.tensors[0].detach().cpu().numpy()
else:
raise ValueError(f"Selected denoising method accepts input features encoded with one tensor only, while "
f"{len(model_input_x.tensors) + 1} input tensors were given. Please use another input "
f"encoding or another denoising method.")
| 1,667 | 42.894737 | 115 | py |
FantasticNetworksNoData | FantasticNetworksNoData-main/train.py | from __future__ import print_function
import os
import json
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.optim as optim
import datetime
from datetime import datetime
import numpy as np
import copy
from torchvision import datasets, transforms, models
from models.resnet import *
from loss_utils import *
from utils import *
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
#Foolbox required
from foolbox import PyTorchModel, accuracy, samples
from foolbox.attacks import L2DeepFoolAttack
parser = argparse.ArgumentParser(description='PyTorch CIFAR + proximity training')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--weight-decay', '--wd', default=5e-4,
type=float, metavar='W')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--model',default="ResNet18",
help='network to use')
parser.add_argument('--dataset', default="CIFAR10",
help='which dataset to use, CIFAR10, CIFAR100, TINYIN')
parser.add_argument('--anneal', default="cosine",
help='type of LR schedule stairstep, cosine, or cyclic')
parser.add_argument('--grad-clip', default = 1, type=int,
help='clip model weight gradients to 0.5')
parser.add_argument('--runs', default=5, type=int,
help='number of random intializations of prototypes')
parser.add_argument('--image-step', default=0.1, type=float,
help='learning rate for prototype image update')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='base learning rate')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('--channel-norm', type=int, default=0,
help='whether to use specific whitening transforms per channel')
args = parser.parse_args()
kwargsUser = {}
# settings
if (args.model == "ResNet18"):
network_string = "ResNet18"
else:
print ("Invalid model architecture")
def get_datetime():
now = datetime.now()
dt_string = now.strftime("%m%d_%H_%M_%S")
return dt_string
with open('commandline_args.txt', 'a') as f:
json.dump(args.__dict__, f, indent=2)
f.close()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 4, 'pin_memory': False} if use_cuda else {}
torch.cuda.empty_cache()
print ("cuda: ", use_cuda)
def train_image_nodata(args, model, device, epoch, par_images, targets, iterations=200, transformDict={}, **kwargs):
model.multi_out=1
model.eval()
image_lr = args.image_step
for batch_idx in range(iterations):
_par_images_opt = par_images.clone().detach().requires_grad_(True).to(device)
_par_images_opt_norm = transformDict['norm'](_par_images_opt)
L2_img, logits_img = model(_par_images_opt_norm)
loss = F.cross_entropy(logits_img, targets, reduction='none')
loss.backward(gradient=torch.ones_like(loss))
with torch.no_grad():
gradients_unscaled = _par_images_opt.grad.clone()
grad_mag = gradients_unscaled.view(gradients_unscaled.shape[0], -1).norm(2, dim=-1)
image_gradients = image_lr*gradients_unscaled / grad_mag.view(-1, 1, 1, 1)
if (torch.mean(loss)>1e-7):
par_images.add_(-image_gradients)
par_images.clamp_(0.0,1.0)
_par_images_opt.grad.zero_()
model.multi_out=0
return loss
def train(args, model, device, cur_loader, optimizer, epoch, scheduler=0.0, max_steps = 0, transformDict={}, **kwargs):
print ('Training model')
for batch_idx, (data, target) in enumerate(cur_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
data_norm = transformDict['norm'](data)
Z = model(data_norm)
loss = F.cross_entropy(Z, target)
loss.backward()
if (args.grad_clip):
nn.utils.clip_grad_value_(model.parameters(), clip_value=0.5)
optimizer.step()
if args.anneal == "cosine":
if batch_idx < max_steps:
scheduler.step()
# print progress
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(cur_loader.dataset),
100. * batch_idx / len(cur_loader), loss.item()))
def eval_train(args, model, device, train_loader, transformDict):
model.eval()
train_loss = 0
correct = 0
with torch.no_grad():
for data, target in train_loader:
#for data, target in train_loader:
data, target = data.to(device), target.to(device)
data = transformDict['norm'](data)
output = model(data)
train_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
train_loss /= len(train_loader.dataset)
print('Training: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
train_loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)))
training_accuracy = correct / len(train_loader.dataset)
return train_loss, training_accuracy
def eval_test(args, model, device, test_loader, transformDict):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
#for data, target in test_loader:
data, target = data.to(device), target.to(device)
data = transformDict['norm'](data)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_accuracy = correct / len(test_loader.dataset)
return test_loss, test_accuracy
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate"""
lr = args.lr
if epoch >= (0.5*args.epochs):
lr = args.lr * 0.1
if epoch >= (0.75*args.epochs):
lr = args.lr * 0.01
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
torch.manual_seed(args.seed)
np.random.seed(args.seed)
MEAN = [0.5]*3
STD = [0.5]*3
if (args.dataset == "CIFAR10"):
if args.channel_norm:
MEAN = [0.4914, 0.4822, 0.4465]
STD = [0.2471, 0.2435, 0.2616]
elif(args.dataset == "CIFAR100"):
if args.channel_norm:
MEAN = [0.5071, 0.4865, 0.4409]
STD = [0.2673, 0.2564, 0.2762]
elif (args.dataset == "TINYIN"):
if args.channel_norm:
MEAN = [0.4802, 0.4481, 0.3975]
STD = [0.2302, 0.2265, 0.2262]
else:
print ("ERROR dataset not found")
if args.dataset in ["CIFAR10","CIFAR100"]:
train_transform = transforms.Compose(
[transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4)])
gen_transform_test = transforms.Compose(
[transforms.ToTensor()])
elif args.dataset in ["TINYIN"]:
train_transform = transforms.Compose(
[transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(64, padding=4)])
gen_transform_test = transforms.Compose(
[transforms.ToTensor()])
else:
print ("ERROR setting transforms")
if (args.dataset == "CIFAR10"):
trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=gen_transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
kwargsUser['num_classes'] = 10
nclass=10
nchannels = 3
H, W = 32, 32
elif (args.dataset == "CIFAR100"):
trainset = torchvision.datasets.CIFAR100(root='../data', train=True, download=True, transform=train_transform)
testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=gen_transform_test)
kwargsUser['num_classes'] = 100
nclass=100
nchannels = 3
H, W = 32, 32
elif (args.dataset == "TINYIN"):
trainset = datasets.ImageFolder(
'./tiny-imagenet-200/train',
transform=train_transform)
testset = datasets.ImageFolder(
'./tiny-imagenet-200/val/images',
transform=gen_transform_test)
kwargsUser['num_classes'] = 200
nclass = 200
nchannels = 3
H, W = 64, 64
else:
print ("Error getting dataset")
transformDict = {}
transformDict['norm'] = transforms.Compose([transforms.Normalize(MEAN, STD)])
splits = []
all_inds = np.arange(len(trainset.targets))
inds_train1, inds_test1, y_train1, y_test1 = train_test_split(all_inds, trainset.targets, test_size=0.25, random_state=42, stratify=trainset.targets)
splits.append(inds_test1)
inds_train2, inds_test2, y_train2, y_test2 = train_test_split(all_inds, trainset.targets, test_size=0.4, random_state=42, stratify=trainset.targets)
splits.append(inds_test2)
inds_train3, inds_test3, y_train3, y_test3 = train_test_split(all_inds, trainset.targets, test_size=0.6, random_state=42, stratify=trainset.targets)
splits.append(inds_test3)
inds_train4, inds_test4, y_train4, y_test4 = train_test_split(all_inds, trainset.targets, test_size=0.7, random_state=42, stratify=trainset.targets)
splits.append(inds_test4)
inds_train5, inds_test5, y_train5, y_test5 = train_test_split(all_inds, trainset.targets, test_size=0.8, random_state=42, stratify=trainset.targets)
splits.append(inds_test5)
inds_train6, inds_test6, y_train6, y_test6 = train_test_split(all_inds, trainset.targets, test_size=0.9, random_state=42, stratify=trainset.targets)
splits.append(inds_test6)
#add 100% training
splits.append(all_inds)
for j in range(len(splits)):
# with open('train_hist.txt', 'a') as f:
# f.write("\n")
# f.write("Training: {} ".format(j))
# f.write("\n")
# f.write("TrainAcc \t TestAcc \t TrainLoss \t TestLoss \n")
# f.close()
subtrain = torch.utils.data.Subset(trainset, splits[j])
print (len(subtrain))
print('------------training no---------{}----------------------'.format(j))
cur_loader = torch.utils.data.DataLoader(subtrain, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, **kwargs)
if j==0:
# init model, ResNet18() can be also used here for training
if (args.model == "ResNet18"):
if args.dataset in ["CIFAR10","CIFAR100"]:
model = ResNet18(nclass = nclass, scale=1.0, channels=nchannels, **kwargsUser).to(device)
#image_model = ResNet18(nclass = nclass,**kwargsUser).to(device)
elif args.dataset in ["TINYIN"]:
model = ResNet18Tiny(nclass=nclass, scale=1.0, channels=nchannels, **kwargsUser).to(device)
else:
print ("Error matching model to dataset")
else:
print ("Invalid model architecture")
with torch.no_grad():
prototype_batches = []
#last_losses = []
Mg = []
Madv = []
targets_onehot = torch.arange(nclass, dtype=torch.long, device=device)
total_runs = args.runs
for tr in range(total_runs):
#last_losses.append(0.0)
par_images_glob = torch.rand([nclass,nchannels,H,W],dtype=torch.float,device=device)
par_images_glob.clamp_(0.0,1.0)
prototype_batches.append(par_images_glob.clone().detach())
if args.anneal in ["stairstep", "cosine"]:
lr_i = args.lr
else:
print ("Error setting learning rate")
optimizer = optim.SGD(cur_model.parameters(), lr=lr_i, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
scheduler = 0.0
steps_per_epoch = int(np.ceil(len(cur_loader.dataset) / args.batch_size))
if args.anneal == "stairstep":
pass
elif args.anneal == "cosine":
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs*len(cur_loader), eta_min=0.0000001, last_epoch=-1, verbose=False)
else:
print ("ERROR making scheduler")
#start training
for epoch in range(1, args.epochs + 1):
# adjust learning rate for SGD
if args.anneal == "stairstep":
adjust_learning_rate(optimizer, epoch)
cur_model.multi_out = 0
cur_model.train()
train(args, cur_model, device, cur_loader, optimizer, epoch, scheduler=scheduler, max_steps = steps_per_epoch, transformDict=transformDict, **kwargsUser)
cur_model.eval()
print('================================================================')
loss_train, acc_train = eval_train(args, cur_model, device, cur_loader, transformDict)
loss_test, acc_test = eval_test(args, cur_model, device, test_loader, transformDict)
print('================================================================')
if (epoch == args.epochs):
torch.save(cur_model.state_dict(),'model-{}-epoch{}-training{}.pt'.format(network_string,epoch,j))
#torch.save(par_images_glob, os.path.join(model_dir,'prototypes_online_lyr_{}_pool_{}_epoch{}_training{}.pt'.format(args.proto_layer,args.proto_pool,epoch,j)))
#Data Independent Assessment of Latest Model
cur_model.eval()
last_losses = []
for run in range(total_runs):
#par_images_glob_ref = par_images_class.clone().detach()
last_loss = train_image_nodata(args, cur_model, device, epoch, prototype_batches[run], targets=targets_onehot,iterations=250, transformDict=transformDict, **kwargsUser)
last_losses.append(torch.mean(last_loss).clone())
#print ("final image loss: ", torch.mean(last_loss).item())
cos_matrix_means = []
df_latent_means = []
for proto in prototype_batches:
#Mg----------------------------------------------------------
cur_model.multi_out=1
proto_image_norm = transformDict['norm'](proto)
latent_p, logits_p = cur_model(proto_image_norm)
#compute cosine similarity matrix
cos_mat_latent_temp = torch.zeros(nclass, nclass, dtype=torch.float)
cos_sim = nn.CosineSimilarity(dim=0, eps=1e-6)
for i in range(len(latent_p)):
for q in range(len(latent_p)):
if i != q:
cos_mat_latent_temp[i,q] = cos_sim(latent_p[i].view(-1), latent_p[q].view(-1))
#cos_mat_latent[i,q] = torch.sum((latent_p[i].view(-1))*(latent_p[q].view(-1)))
#cos_matrices.append(cos_mat_latent_temp.clone())
cos_matrix_means.append(1.0-torch.mean(cos_mat_latent_temp))
#Madv----------------------------------------------------------
cur_model.eval()
cur_model.multi_out=0
attack = L2DeepFoolAttack(overshoot=0.02)
preprocessing = dict(mean=MEAN, std=STD, axis=-3)
fmodel = PyTorchModel(cur_model, bounds=(0,1), preprocessing=preprocessing)
raw, X_new_torch, is_adv = attack(fmodel, proto, targets_onehot, epsilons=100)
cur_model.multi_out=1
with torch.no_grad():
X_new_torch_norm = transformDict['norm'](X_new_torch)
latent_p_adv, logits_p_adv = cur_model(X_new_torch_norm)
CS_df_latent = F.cosine_similarity(latent_p_adv.view(nclass,-1), latent_p.view(nclass,-1))
df_latent_means.append(1.0-torch.mean(CS_df_latent))
print ("Run Summary\n")
print('================================================================\n')
print ("Train Accuracy: {0:4.3f}\n".format(acc_train))
print ("Test Accuracy: {0:4.3f}\n".format(acc_test))
loss_mean = torch.mean(torch.stack(last_losses,dim=0),dim=0)
print ("Mean Prototype Xent Loss: {0:4.6f}\n".format(loss_mean))
cos_mat_std, cos_mat_moms = torch.std_mean(torch.stack(cos_matrix_means,dim=0),dim=0)
print ("Mg Mean: {0:4.3f}".format(cos_mat_moms.item()))
print ("Mg Mean Std: {0:4.3f}".format(cos_mat_std.item()))
df_std, df_moms = torch.std_mean(torch.stack(df_latent_means,dim=0),dim=0)
print ("Madv Mean: {0:4.3f}".format(df_moms.item()))
print ("Madv Mean Std: {0:4.3f}".format(df_std.item()))
if __name__ == '__main__':
main()
| 18,369 | 35.161417 | 180 | py |
FantasticNetworksNoData | FantasticNetworksNoData-main/models/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import linalg as LA
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, HW, stride=1):
super(BasicBlock, self).__init__()
#self.kwargs = kwargs
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, nclass=10, scale=1, channels=3, **kwargs):
super(ResNet, self).__init__()
#self.kwargs = kwargs
self.in_planes = int(64 * scale)
self.channels = channels
#print (self.in_planes)
self.conv1 = nn.Conv2d(self.channels, int(64 * scale), kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(int(64*scale))
self.layer1 = self._make_layer(block, int(64 * scale), num_blocks[0], HW=32, stride=1)
self.layer2 = self._make_layer(block, int(128 * scale), num_blocks[1], HW=16, stride=2)
self.layer3 = self._make_layer(block, int(256 * scale), num_blocks[2], HW=8, stride=2)
self.layer4 = self._make_layer(block, int(512 * scale), num_blocks[3], HW=4, stride=2)
self.gap = nn.AdaptiveAvgPool2d((1,1))
self.linear = nn.Linear(int(512 * scale) * block.expansion, nclass)
self.multi_out = 0
def _make_layer(self, block, planes, num_blocks, HW, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, HW, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.gap(out)
p = out.view(out.size(0), -1)
out = self.linear(p)
if (self.multi_out):
return p, out
else:
return out
class ResNetTiny(nn.Module):
def __init__(self, block, num_blocks, nclass=10, scale=1, channels=3, **kwargs):
super(ResNetTiny, self).__init__()
#self.kwargs = kwargs
self.in_planes = int(64 * scale)
self.channels = channels
self.conv1 = nn.Conv2d(self.channels, int(64 * scale), kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(int(64*scale))
self.layer1 = self._make_layer(block, int(64*scale), num_blocks[0], HW=32, stride=1)
self.layer2 = self._make_layer(block, int(128*scale), num_blocks[1], HW=16, stride=2)
self.layer3 = self._make_layer(block, int(256*scale), num_blocks[2], HW=8, stride=2)
self.layer4 = self._make_layer(block, int(512*scale), num_blocks[3], HW=4, stride=2)
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(int(512*scale) * block.expansion, nclass)
self.multi_out = 0
def _make_layer(self, block, planes, num_blocks, HW, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, HW, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.gap(out)
p = out.view(out.size(0), -1)
out = self.linear(p)
if (self.multi_out):
return p, out
else:
return out
#Model definitions
def ResNet18(nclass, scale, channels, **kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], nclass, scale, channels, **kwargs)
def ResNet18Tiny(nclass, scale, channels, **kwargs):
return ResNetTiny(BasicBlock, [2, 2, 2, 2], nclass, scale, channels, **kwargs)
def set_bn_eval(m):
if isinstance(m, nn.modules.batchnorm._BatchNorm):
m.eval()
def set_bn_train(m):
if isinstance(m, nn.modules.batchnorm._BatchNorm):
m.train()
| 5,019 | 32.691275 | 110 | py |
brain_sas_baseline | brain_sas_baseline-main/baseline.py | import argparse
from multiprocessing import Pool
import os
from time import time
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.utils.data import Dataset
from utils.data_utils import DatasetHandler, load_mr_scan
from utils import evaluation, utils
class DataPreloader(Dataset):
def __init__(self, paths, img_size, slices_lower_upper):
super().__init__()
self.samples, self.segmentations, self.brain_masks = [], [], []
self.load_to_ram(paths, img_size, slices_lower_upper)
def __len__(self):
return len(self.samples)
@staticmethod
def load_batch(paths, img_size, slices_lower_upper):
samples = []
segmentations = []
for p in paths:
# Samples are shape [1, slices, height, width]
sample, segmentation = load_mr_scan(
p, img_size, equalize=True,
slices_lower_upper=slices_lower_upper
)
samples.append(sample)
segmentations.append(segmentation)
return {
'samples': samples,
'segmentations': segmentations,
}
def load_to_ram(self, paths, img_size, slices_lower_upper):
# Set number of cpus used
num_cpus = os.cpu_count() - 4
# Split list into batches
batches = [list(p) for p in np.array_split(
paths, num_cpus) if len(p) > 0]
# Start multiprocessing
with Pool(processes=num_cpus) as pool:
temp = pool.starmap(
self.load_batch,
zip(batches, [img_size for _ in batches], [slices_lower_upper for _ in batches])
)
# temp = self.load_batch(paths, img_size, slices_lower_upper)
# Collect results
self.samples = [s for t in temp for s in t['samples']]
self.segmentations = [s for t in temp for s in t['segmentations']]
def __getitem__(self, idx):
return self.samples[idx], self.segmentations[idx]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data params
parser.add_argument("--img_size", type=int, default=128,
help="Image resolution")
parser.add_argument("--test_ds", type=str, required=True,
choices=["BraTS", "MSLUB", "WMH", "MSSEG2015"])
parser.add_argument("--weighting", type=str, default="FLAIR",
choices=["T1", "t1", "T2", "t2", "FLAIR", "flair"])
parser.add_argument("--test_prop", type=float, default=1.0,
help="Fraction of data to evaluate on")
parser.add_argument("--slices_lower_upper", nargs='+', type=int,
default=[15, 125],
help="Upper and lower bound for MRI slices. Use "
"[15, 125] for experiment 1 and [84, 88] for "
"experiment 2")
# Logging params
parser.add_argument("--n_images_log", type=int, default=30)
parser.add_argument("--save_dir", type=str, default="./logs/baseline/")
args = parser.parse_args()
args.save_dir = f"{args.save_dir}{args.img_size}_" \
f"{args.slices_lower_upper[0]}-" \
f"{args.slices_lower_upper[1]}/"
# Get train and test paths
ds_handler = DatasetHandler()
paths, _ = ds_handler(args.test_ds, args.weighting)
paths = paths[-int(len(paths) * args.test_prop):]
# Load data to RAM
print("Loading data")
t_data_start = time()
ds = DataPreloader(paths, args.img_size, args.slices_lower_upper)
print(f"Finished loading data in {time() - t_data_start:.2f}s, found {len(ds)} samples.")
anomaly_maps = torch.cat(ds.samples, 0)
segmentations = torch.cat(ds.segmentations, 0)
auroc, aupr, dice, th = evaluation.evaluate(
predictions=anomaly_maps,
targets=segmentations,
# auroc=False,
# auprc=False,
proauc=False,
)
# Binarize anomaly_maps
bin_map = torch.where(anomaly_maps > th, 1., 0.)
# Connected component filtering
bin_map = utils.connected_components_3d(bin_map)
print("Saving some images")
c = (args.slices_lower_upper[1] - args.slices_lower_upper[0]) // 2
images = [
anomaly_maps[:, c][:, None],
bin_map[:, c][:, None],
segmentations[:, c][:, None]
]
titles = ['Anomaly map', 'Binarized map', 'Ground truth']
fig = evaluation.plot_results(images, titles, n_images=args.n_images_log)
os.makedirs(args.save_dir, exist_ok=True)
plt.savefig(f"{args.save_dir}{args.test_ds}_{args.weighting}_samples.png")
| 4,617 | 34.79845 | 96 | py |
brain_sas_baseline | brain_sas_baseline-main/utils/utils.py | import matplotlib.pyplot as plt
import numpy as np
from skimage.exposure import equalize_hist
from skimage.measure import label, regionprops
import torch
import torch.nn.functional as F
from torchvision.utils import make_grid, save_image
from tqdm import tqdm
""" General utilities """
def torch2np_img(img: torch.Tensor):
"""Converts a pytorch image to a cv2 RGB image
Args:
img (torch.Tensor): range (-1, 1), dtype torch.float32, shape [C, H, W]
Returns:
img (np.array): range(0, 255), dtype np.uint8, shape (H, W, C)
"""
return (img.permute(1, 2, 0).numpy() * 255.).astype(np.uint8)
def plot_img(img):
"""Plot a torch tensor with shape [n, c, h, w], [c, h, w] or [h, w]"""
if not torch.is_tensor(img):
img = torch.from_numpy(img)
img = img.detach().cpu()
if img.ndim == 2:
img = img.unsqueeze(0)
if img.ndim == 3:
img = img.unsqueeze(0)
img_grid = make_grid(img, normalize=False, scale_each=False)
plt.imshow(img_grid.permute(1, 2, 0))
# plt.axis('off')
plt.show()
def save_img(img, f):
"""Save a torch tensor with shape [n, c, h, w], [c, h, w] or [h, w]"""
if not torch.is_tensor(img):
img = torch.from_numpy(img)
img = img.detach().cpu()
if img.ndim == 2:
img = img.unsqueeze(0)
if img.ndim == 3:
img = img.unsqueeze(0)
img_grid = make_grid(img, normalize=True, scale_each=True)
save_image(img_grid, f)
""" Data normalization and augmentation functions """
class CenterCrop3D:
"""Center crop a volume with shape [channels, slices, height, width] to a
rectangle in height and width
"""
@staticmethod
def __call__(volume):
_, _, h, w = volume.shape
# If the volume is already a rectangle, just return it
if h == w:
return volume
# Else we need to crop along the longer side
min_side = min(h, w)
lower = min_side // 2
upper = min_side // 2 if min_side % 2 == 0 else min_side // 2 + 1
if h < w:
center = w // 2
bottom = center - lower
top = center + upper
volume = volume[:, :, :, bottom:top]
else:
center = h // 2
bottom = center - lower
top = center + upper
volume = volume[:, :, bottom:top, :]
return volume
class ResizeGray:
def __init__(self, size, mode='nearest', align_corners=None):
"""Resample a tensor of shape [c, slices, h, w], or [c, h, w] to size
Arguments are the same as in torch.nn.functional.interpolate, but we
don't need a batch- or channel dimension here.
The datatype can only be preserved when using nearest neighbor.
Example:
volume = torch.randn(1, 189, 197, 197)
out = ResizeGray()(volume, size=[189, 120, 120])
out.shape = [1, 189, 120, 120]
out.dtype = volume.dtype if mode == 'nearest' else torch.float32
"""
self.size = size
self.mode = mode
self.align_corners = align_corners
def __call__(self, volume):
dtype = volume.dtype
out = F.interpolate(volume[None].float(), size=self.size,
mode=self.mode,
align_corners=self.align_corners)[0]
if self.mode == 'nearest':
out = out.type(dtype)
return out
def histogram_equalization(img):
# Take care of torch tensors
batch_dim = img.ndim == 4
is_torch = torch.is_tensor(img)
if batch_dim:
img = img.squeeze(0)
if is_torch:
img = img.numpy()
# Create equalization mask
mask = np.zeros_like(img)
mask[img > 0] = 1
# Equalize
img = equalize_hist(img.astype(np.long), nbins=256, mask=mask)
# Assure that background still is 0
img *= mask
# Take care of torch tensors again
if is_torch:
img = torch.Tensor(img)
if batch_dim:
img = img.unsqueeze(0)
return img
""" Others """
def connected_components_3d(volume):
is_batch = True
is_torch = torch.is_tensor(volume)
if is_torch:
volume = volume.numpy()
if volume.ndim == 3:
volume = volume.unsqueeze(0)
is_batch = False
# shape [b, d, h, w], treat every sample in batch independently
pbar = tqdm(range(len(volume)), desc="Connected components")
for i in pbar:
cc_volume = label(volume[i], connectivity=3)
props = regionprops(cc_volume)
for prop in props:
if prop['filled_area'] <= 20:
volume[i, cc_volume == prop['label']] = 0
if not is_batch:
volume = volume.squeeze(0)
if is_torch:
volume = torch.from_numpy(volume)
return volume
| 4,772 | 28.103659 | 79 | py |
brain_sas_baseline | brain_sas_baseline-main/utils/data_utils.py | from glob import glob
import os
import nibabel as nib
import numpy as np
import torch
from torchvision.datasets.folder import IMG_EXTENSIONS
from warnings import warn
from utils.utils import CenterCrop3D, ResizeGray, histogram_equalization
DATAROOT = str(os.environ.get('DATAROOT'))
DICOM_EXT = ('.dcm', )
NIFTI_EXT = ('.nii', '.nii.gz')
ALL_EXT = IMG_EXTENSIONS + DICOM_EXT + NIFTI_EXT
RSNA_TRAIN_PATHS = None
RSNA_TRAIN_LABELS = None
RSNA_TEST_PATHS = None
RSNA_TEST_LABELS = None
class DatasetHandler:
def __init__(self):
"""A class that returns a list of paths and labels for every dataset
Usage:
paths, labels = DatasetHandler()('name of dataset')
Args:
dataset_name (str): Name of the dataset we want to get files from
weighting (str): t1 or t2, for MRI
"""
pass
def __call__(self, dataset_name, weighting=None):
switch_ds = {
'BraTS': self.returnBraTS,
'MSLUB': self.returnMSLUB,
'WMH': self.returnWMH,
'MSSEG2015': self.returnMSSEG,
}
paths, labels = switch_ds[dataset_name](weighting=weighting)
return paths, labels
@staticmethod
def returnBraTS(**kwargs):
root = os.path.join(DATAROOT, 'BraTS/MICCAI_BraTS2020_TrainingData')
paths = glob(
f"{root}/*/*{kwargs['weighting'].lower()}_registered.nii.gz")
labels = [1 for _ in paths]
# Raise warning if no files are found
if len(paths) == 0:
raise RuntimeWarning(f"No files found for BraTS")
return paths, labels
@staticmethod
def returnMSLUB(**kwargs):
root = os.path.join(DATAROOT, 'MSLUB/lesion')
w = kwargs['weighting']
if w != 'FLAIR':
w += 'W'
paths = glob(
f"{root}/*/*{w.upper()}_stripped_registered.nii.gz")
labels = [1 for _ in paths]
# Raise warning if no files are found
if len(paths) == 0:
raise RuntimeWarning(f"No files found for MSLUB")
return paths, labels
@staticmethod
def returnMSSEG(**kwargs):
root = os.path.join(DATAROOT, 'MSSEG2015/training')
w = kwargs['weighting']
if w.lower() == 't1':
w = 'mprage'
paths = glob(
f"{root}/training*/training*/*{w.lower()}_pp_registered.nii")
labels = [1 for _ in paths]
# Raise warning if no files are found
if len(paths) == 0:
raise RuntimeWarning(f"No files found for MSSEG2015")
return paths, labels
@staticmethod
def returnWMH(**kwargs):
root = os.path.join(DATAROOT, 'WMH')
w = kwargs['weighting']
paths = glob(
f"{root}/*/*/orig/{w.upper()}_stripped_registered.nii.gz")
labels = [1 for _ in paths]
# Raise warning if no files are found
if len(paths) == 0:
raise RuntimeWarning(f"No files found for WMH")
return paths, labels
def load_mr_scan(path: str, img_size: int = None, equalize: bool = False, slices_lower_upper: tuple = None):
"""Load an MR image in the Nifti format from path and it's corresponding
segmentation from 'anomaly_segmentation.nii.gz' in the same folder if
available.
Args:
path (str): Path to MR image
img_size (int): Optional. Size of the loaded image
n_clahe (bool): Perform N-CLAHE histogram normalization
slices_lower_upper (tuple): Lower and upper indices for slices
Returns:
sample (torch.tensor): Loaded MR image as short, shape [c, h, d]
segmentation (torch.tensor): Segmentation as short, shape [c, h, d]
"""
# Load mri scan
sample = nii_loader(path, dtype='float32', size=img_size)
# Load segmentation if available else 0s
sp = f"{path[:path.rfind('/')]}/anomaly_segmentation.nii.gz"
if os.path.exists(sp):
segmentation = nii_loader(sp, dtype='float32', size=img_size)
# Binarize segmentation at a threshold, see discussion in paper
# Section 3.2 Pre-processing
segmentation = torch.where(segmentation > 0.9, 1., 0.)
else:
segmentation = torch.zeros_like(sample)
warn(f"No segmentation found at {sp}")
# Apply histogram equalization
if equalize:
sample = histogram_equalization(sample)
# Samples are shape [1, slices, height, width]
if slices_lower_upper is not None:
brain_inds = slice(*slices_lower_upper)
sample = sample[:, brain_inds]
segmentation = segmentation[:, brain_inds]
return sample, segmentation
def nii_loader(path: str, dtype: str = 'float32', size: int = None):
"""Load a neuroimaging file with nibabel
https://nipy.org/nibabel/reference/nibabel.html
Args:
dtype (str): Optional. Datatype of the loaded volume
size (int): Optional. Output size for h and w. Only supports rectangles
Returns:
volume (np.array): Of shape [1, slices, h, w]
"""
# Load file
data = nib.load(path, keep_file_open=False)
volume = data.get_fdata(caching='unchanged',
dtype=np.float32).astype(np.dtype(dtype))
# Convert to tensor and slices first
if volume.ndim == 4:
volume = volume.squeeze(-1)
volume = torch.Tensor(volume).permute(2, 0, 1).unsqueeze(0)
# Resize if size is given
if size is not None:
volume = CenterCrop3D()(volume) # Has no effect in SRI space since it's already 240x240
volume = ResizeGray(size=[volume.shape[1], size, size])(volume)
return volume
| 5,620 | 31.304598 | 108 | py |
brain_sas_baseline | brain_sas_baseline-main/utils/evaluation.py | import warnings
import matplotlib.pyplot as plt
import numpy as np
from skimage import measure
from sklearn.metrics import auc, precision_recall_curve, roc_auc_score, roc_curve
import torch
from tqdm import tqdm
from torchvision.utils import make_grid
from utils.utils import connected_components_3d, torch2np_img
def plot_roc(fpr, tpr, auroc, title=""):
"""Returns a plot of the reciever operating characteristics (ROC)
Args:
fpr (array): false positives per threshold
tpr (array): true positives per threshold
auroc (float): area under ROC curve
title (str): Title of plot
Returns:
fig (matplotlib.figure.Figure): Finished plot
"""
fig = plt.figure()
fig.add_subplot(1, 1, 1)
plt.title(title)
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % auroc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
return fig
def plot_results(images: list, titles: list, n_images: int = 25):
"""Returns a plot containing the input images, reconstructed images,
uncertainty maps and anomaly maps"""
if len(images) != len(titles):
raise RuntimeError("Not the same number of images and titles")
# Stack tensors to grid image and transform to numpy for plotting
img_dict = {}
for img, title in zip(images, titles):
img_grid = make_grid(
img[:n_images].float(), nrow=1, normalize=True, scale_each=True)
img_grid = torch2np_img(img_grid)
img_dict[title] = img_grid
n = len(images)
# Construct matplotlib figure
fig = plt.figure(figsize=(3 * n, 1 * n_images))
plt.axis("off")
for i, key in enumerate(img_dict.keys()):
a = fig.add_subplot(1, n, i + 1)
plt.imshow(img_dict[key])
a.set_title(key)
return fig
def compute_auroc(predictions, targets):
"""Compute the area under reciever operating characteristics curve.
If the label is a scalar value, we measure the detection performance,
else the segmentation performance.
Args:
predictions (torch.tensor): Predicted anomaly map. Shape [b, c, h, w]
targets (torch.tensor): Target label [b] or segmentation map [b, c, h, w]
Returns:
auroc (float)
"""
# Safety check. Can't compute auroc with no positive targets
if targets.sum() == 0:
warnings.warn("Can't compute auroc with only negative target values, "
"returning 0.5")
auroc = 0.5
else:
auroc = roc_auc_score(targets.view(-1), predictions.view(-1))
# auroc = roc_auc_score(targets.flatten(), predictions.flatten())
return auroc
def compute_roc(predictions, targets):
"""Compute the reciever operating characteristics curve.
If the label is a scalar value, we measure the detection performance,
else the segmentation performance.
Args:
predictions (torch.tensor): Predicted anomaly map. Shape [b, c, h, w]
targets (torch.tensor): Target label [b] or segmentation map [b, c, h, w]
Returns:
fpr (np.array): False positive rate
tpd (np.array): True positive rate
thresholds (np.array)
"""
fpr, tpr, thresholds = roc_curve(targets.view(-1), predictions.view(-1))
return fpr, tpr, thresholds
def compute_best_dice(preds, targets, n_thresh=100):
"""Compute the best dice score between an anomaly map and the ground truth
segmentation using a greedy binary search with depth search_depth
Args:
preds (torch.tensor): Predicted binary anomaly map. Shape [b, c, h, w]
targets (torch.tensor): Target label [b] or segmentation map [b, c, h, w]
n_thresh (int): Number of thresholds to try
Returns:
max_dice (float): Maximal dice score
max_thresh (float): Threshold corresponding to maximal dice score
"""
if targets.ndim == 1:
warnings.warn("Can't compute a meaningful dice score with only"
"labels, returning 0.")
return 0., 0.
thresholds = np.linspace(preds.min(), preds.max(), n_thresh)
threshs = []
scores = []
pbar = tqdm(thresholds, desc="DICE search")
for t in pbar:
dice = compute_dice(torch.where(preds > t, 1., 0.), targets)
scores.append(dice)
threshs.append(t)
scores = torch.stack(scores, 0)
# max_dice = scores.max()
max_thresh = threshs[scores.argmax()]
# Get best dice once again after connected component analysis
bin_preds = torch.where(preds > max_thresh, 1., 0.)
bin_preds = connected_components_3d(bin_preds)
max_dice = compute_dice(bin_preds, targets)
return max_dice, max_thresh
def compute_dice_fpr(preds, targets, max_fprs=[0.01, 0.05, 0.1]):
fprs, _, thresholds = compute_roc(preds, targets)
dices = []
for max_fpr in max_fprs:
th = thresholds[fprs < max_fpr][-1]
bin_preds = torch.where(preds > th, 1., 0.)
bin_preds = connected_components_3d(bin_preds)
dice = compute_dice(bin_preds, targets)
dices.append(dice)
print(f"DICE{int(max_fpr * 100)}: {dice:.4f}, threshold: {th:.4f}")
return dices
def compute_dice(predictions, targets) -> float:
"""Compute the DICE score. This only works for segmentations.
PREDICTIONS NEED TO BE BINARY!
Args:
predictions (torch.tensor): Predicted binary anomaly map. Shape [b, c, h, w]
targets (torch.tensor): Target label [b] or segmentation map [b, c, h, w]
Returns:
dice (float)
"""
if (predictions - predictions.int()).sum() > 0.:
raise RuntimeError("predictions for DICE score must be binary")
if (targets - targets.int()).sum() > 0.:
raise RuntimeError("targets for DICE score must be binary")
pred_sum = predictions.view(-1).sum()
targ_sum = targets.view(-1).sum()
intersection = predictions.view(-1).float() @ targets.view(-1).float()
# pred_sum = predictions.flatten().sum()
# targ_sum = targets.flatten().sum()
# intersection = predictions.flatten().float() @ targets.flatten().float()
dice = (2 * intersection) / (pred_sum + targ_sum)
return dice
def compute_pro_auc(predictions, targets, expect_fpr=0.3, max_steps=300):
"""Computes the PRO-score and intersection over union (IOU)
Code from: https://github.com/YoungGod/DFR/blob/master/DFR-source/anoseg_dfr.py
"""
if targets.ndim == 1:
warnings.warn("Can't compute a meaningful pro score with only"
"labels, returning 0.")
return 0.
def rescale(x):
return (x - x.min()) / (x.max() - x.min())
if torch.is_tensor(predictions):
predictions = predictions.numpy()
if torch.is_tensor(targets):
targets = targets.numpy()
# Squeeze away channel dimension
predictions = predictions.squeeze(1)
targets = targets.squeeze(1)
# Binarize target segmentations
targets[targets <= 0.5] = 0
targets[targets > 0.5] = 1
targets = targets.astype(np.bool)
# Maximum and minimum thresholdsmax_th = scores.max()
max_th = predictions.max()
min_th = predictions.min()
delta = (max_th - min_th) / max_steps
ious_mean = []
ious_std = []
pros_mean = []
pros_std = []
threds = []
fprs = []
binary_score_maps = np.zeros_like(predictions, dtype=np.bool)
for step in tqdm(range(max_steps), desc="PRO AUC"):
thred = max_th - step * delta
# segmentation
binary_score_maps[predictions <= thred] = 0
binary_score_maps[predictions > thred] = 1
# Connected component analysis
# binary_score_maps = connected_components_3d(binary_score_maps)
pro = [] # per region overlap
iou = [] # per image iou
# pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region
# iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map
for i in range(len(binary_score_maps)): # for i th image
# pro (per region level)
label_map = measure.label(targets[i], connectivity=2)
props = measure.regionprops(label_map)
for prop in props:
# find the bounding box of an anomaly region
x_min, y_min, x_max, y_max = prop.bbox
cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max]
# cropped_mask = targets[i][x_min:x_max, y_min:y_max] # bug!
cropped_targets = prop.filled_image # corrected!
intersection = np.logical_and(
cropped_pred_label, cropped_targets).astype(np.float32).sum()
pro.append(intersection / prop.area)
# iou (per image level)
intersection = np.logical_and(
binary_score_maps[i], targets[i]).astype(np.float32).sum()
union = np.logical_or(
binary_score_maps[i], targets[i]).astype(np.float32).sum()
if targets[i].any() > 0: # when the gt have no anomaly pixels, skip it
iou.append(intersection / union)
# against steps and average metrics on the testing data
ious_mean.append(np.array(iou).mean())
# print("per image mean iou:", np.array(iou).mean())
ious_std.append(np.array(iou).std())
pros_mean.append(np.array(pro).mean())
pros_std.append(np.array(pro).std())
# fpr for pro-auc
targets_neg = ~targets
fpr = np.logical_and(
targets_neg, binary_score_maps).sum() / targets_neg.sum()
fprs.append(fpr)
threds.append(thred)
# as array
threds = np.array(threds)
pros_mean = np.array(pros_mean)
pros_std = np.array(pros_std)
fprs = np.array(fprs)
ious_mean = np.array(ious_mean)
ious_std = np.array(ious_std)
# best per image iou
best_miou = ious_mean.max()
print(f"Best IOU: {best_miou:.4f}")
# default 30% fpr vs pro, pro_auc
# find the indexs of fprs that is less than expect_fpr (default 0.3)
idx = fprs <= expect_fpr
fprs_selected = fprs[idx]
fprs_selected = rescale(fprs_selected) # rescale fpr [0,0.3] -> [0, 1]
pros_mean_selected = pros_mean[idx]
pro_auc_score = auc(fprs_selected, pros_mean_selected)
print(f"pro auc ({int(expect_fpr*100)}% FPR): {pro_auc_score:.4f}")
return pro_auc_score, best_miou
def compute_aupr(predictions, targets):
"""Compute the area under the precision-recall curve
Args:
predictions (torch.tensor): Anomaly scores
targets (torch.tensor): Segmentation map, must be binary
"""
precision, recall, _ = precision_recall_curve(targets.view(-1), predictions.view(-1))
# precision, recall, _ = precision_recall_curve(targets.flatten(), predictions.flatten())
aupr = auc(recall, precision)
return aupr
def evaluate(predictions, targets, auroc=True, dice=True, auprc=True, proauc=True, n_thresh_dice=100):
# compute_dice_fpr(predictions, targets, masks)
if auroc:
auroc = compute_auroc(predictions, targets)
print(f"AUROC: {auroc:.4f}")
else:
auroc = 0.0
if auprc:
auprc = compute_aupr(predictions, targets)
print(f"AUPRC: {auprc:.4f}")
else:
auprc = 0.0
if dice:
dice, th = compute_best_dice(predictions, targets, n_thresh=n_thresh_dice)
print(f"DICE: {dice:.4f}, best threshold: {th:.4f}")
else:
dice = 0.0
th = None
if proauc:
h, w = predictions.shape[-2:]
compute_pro_auc(
predictions=predictions.view(-1, 1, h, w),
targets=targets.view(-1, 1, h, w),
)
return auroc, auprc, dice, th
| 11,908 | 34.655689 | 118 | py |
clinicalBERT | clinicalBERT-master/downstream_tasks/run_classifier.py | # Code is adapted from the PyTorch pretrained BERT repo - See copyright & license below.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import csv
import logging
import os
import random
import sys
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig, WEIGHTS_NAME, CONFIG_NAME
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
#added
import json
from random import shuffle
import math
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
# NEW
class MedNLIProcessor(DataProcessor):
def _chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
file_path = os.path.join(data_dir, "mli_train_v1.jsonl")
return self._create_examples(file_path)
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
file_path = os.path.join(data_dir, "mli_dev_v1.jsonl")
return self._create_examples(file_path)
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the test set."""
file_path = os.path.join(data_dir, "mli_test_v1.jsonl")
return self._create_examples(file_path)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, file_path):
examples = []
with open(file_path, "r") as f:
lines = f.readlines()
for line in lines:
example = json.loads(line)
examples.append(
InputExample(guid=example['pairID'], text_a=example['sentence1'],
text_b=example['sentence2'], label=example['gold_label']))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
max_len = 0
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
seq_len = len(tokens_a) + len(tokens_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
seq_len = len(tokens_a)
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
if seq_len > max_len:
max_len = seq_len
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
print('Max Sequence Length: %d' %max_len)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def setup_parser():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese, biobert.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action='store_true',
help="Whether to run eval on the test set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--model_loc', type=str, default='', help="Specify the location of the bio or clinical bert model")
return parser
def main():
parser = setup_parser()
args = parser.parse_args()
# specifies the path where the biobert or clinical bert model is saved
if args.bert_model == 'biobert' or args.bert_model == 'clinical_bert':
args.bert_model = args.model_loc
print(args.bert_model)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"mednli": MedNLIProcessor
}
num_labels_task = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"mednli": 3
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = num_labels_task[task_name]
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
print('TRAIN')
train = processor.get_train_examples(args.data_dir)
print([(train[i].text_a,train[i].text_b, train[i].label) for i in range(3)])
print('DEV')
dev = processor.get_dev_examples(args.data_dir)
print([(dev[i].text_a,dev[i].text_b, dev[i].label) for i in range(3)])
print('TEST')
test = processor.get_test_examples(args.data_dir)
print([(test[i].text_a,test[i].text_b, test[i].label) for i in range(3)])
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
# Prepare model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, 'distributed_{}'.format(args.local_rank))
model = BertForSequenceClassification.from_pretrained(args.bert_model,
cache_dir=cache_dir,
num_labels = num_labels)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.do_train:
# Save a trained model and the associated configuration
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
# Load a trained model and config that you have fine-tuned
config = BertConfig(output_config_file)
model = BertForSequenceClassification(config, num_labels=num_labels)
model.load_state_dict(torch.load(output_model_file))
else:
model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
model.to(device)
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': loss}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if args.do_test and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
test_examples = processor.get_test_examples(args.data_dir)
test_features = convert_examples_to_features(
test_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running testing *****")
logger.info(" Num examples = %d", len(test_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
model.eval()
test_loss, test_accuracy = 0, 0
nb_test_steps, nb_test_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(test_dataloader, desc="Testing"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_test_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_test_accuracy = accuracy(logits, label_ids)
test_loss += tmp_test_loss.mean().item()
test_accuracy += tmp_test_accuracy
nb_test_examples += input_ids.size(0)
nb_test_steps += 1
test_loss = test_loss / nb_test_steps
test_accuracy = test_accuracy / nb_test_examples
loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'test_loss': test_loss,
'test_accuracy': test_accuracy,
'global_step': global_step,
'loss': loss}
output_test_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_file, "w") as writer:
logger.info("***** Test results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 33,551 | 41.57868 | 139 | py |
IDEC-toy | IDEC-toy-master/DEC.py | """
Toy implementation for Deep Embedded Clustering as described in the paper:
Junyuan Xie, Ross Girshick, and Ali Farhadi. Unsupervised deep embedding for clustering analysis. ICML 2016.
Main differences with original code at https://github.com/piiswrong/dec.git:
1. Autoencoder is pretrain in an end-to-end manner, while original code is in greedy layer-wise training manner.
Usage:
No pretrained autoencoder weights available:
python DEC.py mnist
python DEC.py usps
python DEC.py reutersidf10k --n_clusters 4
Weights of Pretrained autoencoder for mnist are in './ae_weights/mnist_ae_weights.h5':
python DEC.py mnist --ae_weights ./ae_weights/mnist_ae_weights.h5
Author:
Xifeng Guo. 2017.4.30
"""
from time import time
import numpy as np
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input
from keras.models import Model
from keras.optimizers import SGD
from keras.utils.vis_utils import plot_model
from sklearn.cluster import KMeans
from sklearn import metrics
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
from sklearn.utils.linear_assignment_ import linear_assignment
ind = linear_assignment(w.max() - w)
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
def autoencoder(dims, act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
x = Input(shape=(dims[0],), name='input')
h = x
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], activation=act, name='encoder_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], name='encoder_%d' % (n_stacks - 1))(h) # hidden layer, features are extracted from here
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], activation=act, name='decoder_%d' % i)(h)
# output
h = Dense(dims[0], name='decoder_0')(h)
return Model(inputs=x, outputs=h)
class ClusteringLayer(Layer):
"""
Clustering layer converts input sample (feature) to soft label, i.e. a vector that represents the probability of the
sample belonging to each cluster. The probability is calculated with student's t-distribution.
# Example
```
model.add(ClusteringLayer(n_clusters=10))
```
# Arguments
n_clusters: number of clusters.
weights: list of Numpy array with shape `(n_clusters, n_features)` witch represents the initial cluster centers.
alpha: parameter in Student's t-distribution. Default to 1.0.
# Input shape
2D tensor with shape: `(n_samples, n_features)`.
# Output shape
2D tensor with shape: `(n_samples, n_clusters)`.
"""
def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ClusteringLayer, self).__init__(**kwargs)
self.n_clusters = n_clusters
self.alpha = alpha
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
self.clusters = self.add_weight((self.n_clusters, input_dim), initializer='glorot_uniform', name='clusters')
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, inputs, **kwargs):
""" student t-distribution, as same as used in t-SNE algorithm.
q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it.
Arguments:
inputs: the variable containing data, shape=(n_samples, n_features)
Return:
q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters)
"""
q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha))
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return input_shape[0], self.n_clusters
def get_config(self):
config = {'n_clusters': self.n_clusters}
base_config = super(ClusteringLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class DEC(object):
def __init__(self,
dims,
n_clusters=10,
alpha=1.0):
super(DEC, self).__init__()
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.n_clusters = n_clusters
self.alpha = alpha
self.autoencoder = autoencoder(self.dims)
hidden = self.autoencoder.get_layer(name='encoder_%d' % (self.n_stacks - 1)).output
self.encoder = Model(inputs=self.autoencoder.input, outputs=hidden)
# prepare DEC model
clustering_layer = ClusteringLayer(self.n_clusters, alpha=self.alpha, name='clustering')(hidden)
self.model = Model(inputs=self.autoencoder.input, outputs=clustering_layer)
self.pretrained = False
self.centers = []
self.y_pred = []
def pretrain(self, x, batch_size=256, epochs=200, optimizer='adam'):
print('...Pretraining...')
self.autoencoder.compile(loss='mse', optimizer=optimizer) # SGD(lr=0.01, momentum=0.9),
self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs)
self.autoencoder.save_weights('ae_weights.h5')
print('Pretrained weights are saved to ./ae_weights.h5')
self.pretrained = True
def load_weights(self, weights_path): # load weights of DEC model
self.model.load_weights(weights_path)
def extract_feature(self, x): # extract features from before clustering layer
return self.encoder.predict(x)
def predict_clusters(self, x): # predict cluster labels using the output of clustering layer
q = self.model.predict(x, verbose=0)
return q.argmax(1)
@staticmethod
def target_distribution(q): # target distribution P which enhances the discrimination of soft label Q
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def compile(self, loss='kld', optimizer='adam'):
self.model.compile(loss=loss, optimizer=optimizer)
def fit(self, x, y=None, batch_size=256, maxiter=2e4, tol=1e-3, update_interval=140,
ae_weights=None, save_dir='./results/dec'):
print('Update interval', update_interval)
save_interval = int(x.shape[0] / batch_size) * 5 # 5 epochs
print('Save interval', save_interval)
# Step 1: pretrain
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
# Step 2: initialize cluster centers using k-means
print('Initializing cluster centers with k-means.')
kmeans = KMeans(n_clusters=self.n_clusters, n_init=20)
self.y_pred = kmeans.fit_predict(self.encoder.predict(x))
y_pred_last = np.copy(self.y_pred)
self.model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_])
# Step 3: deep clustering
# logging file
import csv, os
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = open(save_dir + '/dec_log.csv', 'w')
logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'acc', 'nmi', 'ari', 'L'])
logwriter.writeheader()
loss = 0
index = 0
for ite in range(int(maxiter)):
if ite % update_interval == 0:
q = self.model.predict(x, verbose=0)
p = self.target_distribution(q) # update the auxiliary target distribution p
# evaluate the clustering performance
self.y_pred = q.argmax(1)
if y is not None:
acc = np.round(cluster_acc(y, self.y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, self.y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, self.y_pred), 5)
loss = np.round(loss, 5)
logwriter.writerow(dict(iter=ite, acc=acc, nmi=nmi, ari=ari, L=loss))
print('Iter-%d: ACC= %.4f, NMI= %.4f, ARI= %.4f; L= %.5f'
% (ite, acc, nmi, ari, loss))
# check stop criterion
delta_label = np.sum(self.y_pred != y_pred_last).astype(np.float32) / self.y_pred.shape[0]
y_pred_last = np.copy(self.y_pred)
if ite > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
logfile.close()
break
# train on batch
if (index + 1) * batch_size > x.shape[0]:
loss = self.model.train_on_batch(x=x[index * batch_size::],
y=p[index * batch_size::])
index = 0
else:
loss = self.model.train_on_batch(x=x[index * batch_size:(index + 1) * batch_size],
y=p[index * batch_size:(index + 1) * batch_size])
index += 1
# save intermediate model
if ite % save_interval == 0:
# save DEC model checkpoints
print('saving model to: ' + save_dir + '/DEC_model_' + str(ite) + '.h5')
self.model.save_weights(save_dir + '/DEC_model_' + str(ite) + '.h5')
ite += 1
# save the trained model
logfile.close()
print('saving model to: ' + save_dir + '/DEC_model_final.h5')
self.model.save_weights(save_dir + '/DEC_model_final.h5')
return self.y_pred
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dataset', default='mnist', choices=['mnist', 'usps', 'reutersidf10k', 'pendigits'])
parser.add_argument('--n_clusters', default=10, type=int)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--maxiter', default=2e4, type=int)
parser.add_argument('--pretrain_epochs', default=200, type=int)
parser.add_argument('--gamma', default=0.1, type=float,
help='coefficient of clustering loss')
parser.add_argument('--update_interval', default=0, type=int)
parser.add_argument('--tol', default=0.001, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--save_dir', default='results/dec')
args = parser.parse_args()
print(args)
# load dataset
optimizer = 'adam' # SGD(lr=0.01, momentum=0.99)
from datasets import load_mnist, load_reuters, load_usps, load_pendigits
if args.dataset == 'mnist': # recommends: n_clusters=10, update_interval=140
x, y = load_mnist()
elif args.dataset == 'usps': # recommends: n_clusters=10, update_interval=30
x, y = load_usps('data/usps')
elif args.dataset == 'pendigits':
x, y = load_pendigits('data/pendigits')
elif args.dataset == 'reutersidf10k': # recommends: n_clusters=4, update_interval=20
x, y = load_reuters('data/reuters')
if args.update_interval == 0: # one epoch. A smaller value may correspond to higher performance
args.update_interval = int(x.shape[0]/args.batch_size)
# Define DEC model
dec = DEC(dims=[x.shape[-1], 500, 500, 2000, 10], n_clusters=args.n_clusters)
plot_model(dec.model, to_file='dec_model.png', show_shapes=True)
dec.model.summary()
t0 = time()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
dec.pretrain(x, batch_size=args.batch_size, epochs=args.pretrain_epochs, optimizer=optimizer)
# begin clustering, time not include pretraining part.
dec.compile(loss='kld', optimizer=optimizer)
dec.fit(x, y=y, batch_size=args.batch_size, tol=args.tol, maxiter=args.maxiter,
update_interval=args.update_interval, ae_weights=args.ae_weights, save_dir=args.save_dir)
# Show the final results
y_pred = dec.y_pred
print('acc:', cluster_acc(y, y_pred))
print('clustering time: %d seconds.' % int(time() - t0))
| 13,998 | 39.459538 | 120 | py |
IDEC-toy | IDEC-toy-master/datasets.py | import numpy as np
def load_mnist():
# the data, shuffled and split between train and test sets
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.concatenate((x_train, x_test))
y = np.concatenate((y_train, y_test))
x = x.reshape((x.shape[0], -1))
x = np.divide(x, 255.) # normalize as it does in DEC paper
print('MNIST samples', x.shape)
return x, y
def load_usps(data_path='./data/usps'):
import os
if not os.path.exists(data_path+'/usps_train.jf'):
if not os.path.exists(data_path+'/usps_train.jf.gz'):
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_train.jf.gz -P %s' % data_path)
os.system('wget http://www-i6.informatik.rwth-aachen.de/~keysers/usps_test.jf.gz -P %s' % data_path)
os.system('gunzip %s/usps_train.jf.gz' % data_path)
os.system('gunzip %s/usps_test.jf.gz' % data_path)
with open(data_path + '/usps_train.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_train, labels_train = data[:, 1:], data[:, 0]
with open(data_path + '/usps_test.jf') as f:
data = f.readlines()
data = data[1:-1]
data = [list(map(float, line.split())) for line in data]
data = np.array(data)
data_test, labels_test = data[:, 1:], data[:, 0]
x = np.concatenate((data_train, data_test)).astype('float64')
y = np.concatenate((labels_train, labels_test))
print('USPS samples', x.shape)
return x, y
def load_pendigits(data_path='./data/pendigits'):
import os
if not os.path.exists(data_path + '/pendigits.tra'):
os.system('wget http://mlearn.ics.uci.edu/databases/pendigits/pendigits.tra -P %s' % data_path)
os.system('wget http://mlearn.ics.uci.edu/databases/pendigits/pendigits.tes -P %s' % data_path)
os.system('wget http://mlearn.ics.uci.edu/databases/pendigits/pendigits.names -P %s' % data_path)
# load training data
with open(data_path + '/pendigits.tra') as file:
data = file.readlines()
data = [list(map(float, line.split(','))) for line in data]
data = np.array(data).astype(np.float32)
data_train, labels_train = data[:, :-1], data[:, -1]
print('data_train shape=', data_train.shape)
# load testing data
with open(data_path + '/pendigits.tes') as file:
data = file.readlines()
data = [list(map(float, line.split(','))) for line in data]
data = np.array(data).astype(np.float32)
data_test, labels_test = data[:, :-1], data[:, -1]
print('data_test shape=', data_test.shape)
x = np.concatenate((data_train, data_test)).astype('float32')
y = np.concatenate((labels_train, labels_test))
x /= 100.
print('pendigits samples:', x.shape)
return x, y
def load_reuters(data_path='./data/reuters'):
import os
if not os.path.exists(os.path.join(data_path, 'reutersidf10k.npy')):
print('making reuters idf features')
make_reuters_data(data_path)
print('reutersidf saved to ' + data_path)
data = np.load(os.path.join(data_path, 'reutersidf10k.npy')).item()
# has been shuffled
x = data['data']
y = data['label']
x = x.reshape((x.shape[0], -1)).astype('float32')
y = y.reshape((y.size,))
print('REUTERSIDF10K samples', x.shape)
return x, y
def make_reuters_data(data_dir):
np.random.seed(1234)
from sklearn.feature_extraction.text import CountVectorizer
from os.path import join
did_to_cat = {}
cat_list = ['CCAT', 'GCAT', 'MCAT', 'ECAT']
with open(join(data_dir, 'rcv1-v2.topics.qrels')) as fin:
for line in fin.readlines():
line = line.strip().split(' ')
cat = line[0]
did = int(line[1])
if cat in cat_list:
did_to_cat[did] = did_to_cat.get(did, []) + [cat]
for did in list(did_to_cat.keys()):
if len(did_to_cat[did]) > 1:
del did_to_cat[did]
dat_list = ['lyrl2004_tokens_test_pt0.dat',
'lyrl2004_tokens_test_pt1.dat',
'lyrl2004_tokens_test_pt2.dat',
'lyrl2004_tokens_test_pt3.dat',
'lyrl2004_tokens_train.dat']
data = []
target = []
cat_to_cid = {'CCAT': 0, 'GCAT': 1, 'MCAT': 2, 'ECAT': 3}
del did
for dat in dat_list:
with open(join(data_dir, dat)) as fin:
for line in fin.readlines():
if line.startswith('.I'):
if 'did' in locals():
assert doc != ''
if did in did_to_cat:
data.append(doc)
target.append(cat_to_cid[did_to_cat[did][0]])
did = int(line.strip().split(' ')[1])
doc = ''
elif line.startswith('.W'):
assert doc == ''
else:
doc += line
assert len(data) == len(did_to_cat)
x = CountVectorizer(dtype=np.float64, max_features=2000).fit_transform(data)
y = np.asarray(target)
from sklearn.feature_extraction.text import TfidfTransformer
x = TfidfTransformer(norm='l2', sublinear_tf=True).fit_transform(x)
x, y = x[:10000], y[:10000]
x = np.asarray(x.todense()) * np.sqrt(x.shape[1])
print('todense succeed')
p = np.random.permutation(x.shape[0])
x, y = x[p], y[p]
print('permutation finished')
assert x.shape[0] == y.shape[0]
x = x.reshape((x.shape[0], -1))
np.save(join(data_dir, 'reutersidf10k.npy'), {'data': x, 'label': y})
| 5,688 | 36.675497 | 113 | py |
IDEC-toy | IDEC-toy-master/IDEC.py | """
Toy implementation for Improved Deep Embedded Clustering as described in paper:
Xifeng Guo, Long Gao, Xinwang Liu, Jianping Yin. Improved Deep Embedded Clustering with Local Structure
Preservation. IJCAI 2017.
The Autoencoder is pretrained directly in an end-to-end manner, NOT greedy layer-wise training. So the results are
different with what reported in the paper.
Usage:
No pretrained autoencoder weights available:
python IDEC.py mnist
python IDEC.py usps
python IDEC.py reutersidf10k --n_clusters 4
Weights of Pretrained autoencoder for mnist are in './ae_weights/mnist_ae_weights.h5':
python IDEC.py mnist --ae_weights ./ae_weights/mnist_ae_weights.h5
Author:
Xifeng Guo. 2017.4.30
"""
from time import time
import numpy as np
from keras.models import Model
from keras.optimizers import SGD
from keras.utils.vis_utils import plot_model
from sklearn.cluster import KMeans
from sklearn import metrics
from DEC import cluster_acc, ClusteringLayer, autoencoder
class IDEC(object):
def __init__(self,
dims,
n_clusters=10,
alpha=1.0):
super(IDEC, self).__init__()
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.n_clusters = n_clusters
self.alpha = alpha
self.autoencoder = autoencoder(self.dims)
hidden = self.autoencoder.get_layer(name='encoder_%d' % (self.n_stacks - 1)).output
self.encoder = Model(inputs=self.autoencoder.input, outputs=hidden)
# prepare IDEC model
clustering_layer = ClusteringLayer(self.n_clusters, alpha=self.alpha, name='clustering')(hidden)
self.model = Model(inputs=self.autoencoder.input,
outputs=[clustering_layer, self.autoencoder.output])
self.pretrained = False
self.centers = []
self.y_pred = []
def pretrain(self, x, batch_size=256, epochs=200, optimizer='adam'):
print('...Pretraining...')
self.autoencoder.compile(loss='mse', optimizer=optimizer) # SGD(lr=0.01, momentum=0.9),
self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs)
self.autoencoder.save_weights('ae_weights.h5')
print('Pretrained weights are saved to ./ae_weights.h5')
self.pretrained = True
def load_weights(self, weights_path): # load weights of IDEC model
self.model.load_weights(weights_path)
def extract_feature(self, x): # extract features from before clustering layer
return self.encoder.predict(x)
def predict_clusters(self, x): # predict cluster labels using the output of clustering layer
q, _ = self.model.predict(x, verbose=0)
return q.argmax(1)
@staticmethod
def target_distribution(q): # target distribution P which enhances the discrimination of soft label Q
weight = q ** 2 / q.sum(0)
return (weight.T / weight.sum(1)).T
def compile(self, loss=['kld', 'mse'], loss_weights=[1, 1], optimizer='adam'):
self.model.compile(loss=loss, loss_weights=loss_weights, optimizer=optimizer)
def fit(self, x, y=None, batch_size=256, maxiter=2e4, tol=1e-3, update_interval=140,
ae_weights=None, save_dir='./results/idec'):
print('Update interval', update_interval)
save_interval = int(x.shape[0] / batch_size) * 5 # 5 epochs
print('Save interval', save_interval)
# Step 1: pretrain
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
# Step 2: initialize cluster centers using k-means
print('Initializing cluster centers with k-means.')
kmeans = KMeans(n_clusters=self.n_clusters, n_init=20)
self.y_pred = kmeans.fit_predict(self.encoder.predict(x))
y_pred_last = np.copy(self.y_pred)
self.model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_])
# Step 3: deep clustering
# logging file
import csv, os
if not os.path.exists(save_dir):
os.makedirs(save_dir)
logfile = open(save_dir + '/idec_log.csv', 'w')
logwriter = csv.DictWriter(logfile, fieldnames=['iter', 'acc', 'nmi', 'ari', 'L', 'Lc', 'Lr'])
logwriter.writeheader()
loss = [0, 0, 0]
index = 0
for ite in range(int(maxiter)):
if ite % update_interval == 0:
q, _ = self.model.predict(x, verbose=0)
p = self.target_distribution(q) # update the auxiliary target distribution p
# evaluate the clustering performance
self.y_pred = q.argmax(1)
if y is not None:
acc = np.round(cluster_acc(y, self.y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, self.y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, self.y_pred), 5)
loss = np.round(loss, 5)
logwriter.writerow(dict(iter=ite, acc=acc, nmi=nmi, ari=ari, L=loss[0], Lc=loss[1], Lr=loss[2]))
print('Iter-%d: ACC= %.4f, NMI= %.4f, ARI= %.4f; L= %.5f, Lc= %.5f, Lr= %.5f'
% (ite, acc, nmi, ari, loss[0], loss[1], loss[2]))
# check stop criterion
delta_label = np.sum(self.y_pred != y_pred_last).astype(np.float32) / self.y_pred.shape[0]
y_pred_last = np.copy(self.y_pred)
if ite > 0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print('Reached tolerance threshold. Stopping training.')
logfile.close()
break
# train on batch
if (index + 1) * batch_size > x.shape[0]:
loss = self.model.train_on_batch(x=x[index * batch_size::],
y=[p[index * batch_size::], x[index * batch_size::]])
index = 0
else:
loss = self.model.train_on_batch(x=x[index * batch_size:(index + 1) * batch_size],
y=[p[index * batch_size:(index + 1) * batch_size],
x[index * batch_size:(index + 1) * batch_size]])
index += 1
# save intermediate model
if ite % save_interval == 0:
# save IDEC model checkpoints
print('saving model to: ' + save_dir + '/IDEC_model_' + str(ite) + '.h5')
self.model.save_weights(save_dir + '/IDEC_model_' + str(ite) + '.h5')
ite += 1
# save the trained model
logfile.close()
print('saving model to: ' + save_dir + '/IDEC_model_final.h5')
self.model.save_weights(save_dir + '/IDEC_model_final.h5')
return self.y_pred
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('dataset', default='mnist', choices=['mnist', 'usps', 'reutersidf10k', 'pendigits'])
parser.add_argument('--n_clusters', default=10, type=int)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--maxiter', default=2e4, type=int)
parser.add_argument('--pretrain_epochs', default=200, type=int)
parser.add_argument('--gamma', default=0.1, type=float,
help='coefficient of clustering loss')
parser.add_argument('--update_interval', default=0, type=int)
parser.add_argument('--tol', default=0.001, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--save_dir', default='results/idec')
args = parser.parse_args()
print(args)
# load dataset
optimizer = 'adam' # SGD(lr=0.01, momentum=0.99)
from datasets import load_mnist, load_reuters, load_usps, load_pendigits
if args.dataset == 'mnist': # recommends: n_clusters=10, update_interval=140
x, y = load_mnist()
elif args.dataset == 'usps': # recommends: n_clusters=10, update_interval=30
x, y = load_usps('data/usps')
elif args.dataset == 'pendigits':
x, y = load_pendigits('data/pendigits')
elif args.dataset == 'reutersidf10k': # recommends: n_clusters=4, update_interval=20
x, y = load_reuters('data/reuters')
if args.update_interval == 0: # one epoch
args.update_interval = int(x.shape[0]/args.batch_size)
# Define IDEC model
idec = IDEC(dims=[x.shape[-1], 500, 500, 2000, 10], n_clusters=args.n_clusters)
plot_model(idec.model, to_file='idec_model.png', show_shapes=True)
idec.model.summary()
t0 = time()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
idec.pretrain(x, batch_size=args.batch_size, epochs=args.pretrain_epochs, optimizer=optimizer)
# begin clustering, time not include pretraining part.
idec.compile(loss=['kld', 'mse'], loss_weights=[args.gamma, 1], optimizer=optimizer)
idec.fit(x, y=y, batch_size=args.batch_size, tol=args.tol, maxiter=args.maxiter,
update_interval=args.update_interval, ae_weights=args.ae_weights, save_dir=args.save_dir)
# Show the final results
y_pred = idec.y_pred
print('acc:', cluster_acc(y, y_pred))
print('clustering time: %d seconds.' % int(time() - t0))
| 9,906 | 42.073913 | 116 | py |
CE-OCL | CE-OCL-main/src/run_MLmodels.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# module load python/3.6.3
# module load sloan/python/modules/3.6
# srun --pty --mem=16G -p sched_mit_sloan_interactive python3
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn import tree
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
import matplotlib
# matplotlib.use('Agg')
# import shap
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import pickle
import random
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from pathlib import Path
import opticl
def r_squared(y_true, y_pred, y_mean):
ss_res = ((y_true - y_pred) ** 2).sum()
ss_tot = ((y_true - y_mean) ** 2).sum()
return (1 - (ss_res / ss_tot))
def sens_spec(y_true, y_pred, threshold):
y_pred_bin = 1 * (y_pred > threshold)
tn, fp, fn, tp = metrics.confusion_matrix(y_true, y_pred_bin).ravel()
sens = tp / (tp + fn)
spec = tn / (tn + fp)
print("Sensitivity: " + str(sens))
print("Specificity: " + str(spec))
return [sens, spec]
def create_and_save_pickle(gs, pickle_path):
try:
if type(gs).__name__ == 'ElasticNetCV':
exp = {'model': gs,
'best_params': gs.best_params_,
'param_grid': gs.param_grid}
else:
exp = {'model': gs.best_estimator_,
'best_params': gs.best_params_,
'param_grid': gs.param_grid}
except:
exp = {'model': gs}
with open(pickle_path, 'wb') as handle:
pickle.dump(exp, handle, protocol=4)
return
def shap_summary(model, X, col_names, save_path, filetype='.pdf'):
plt.close()
explainer = shap.TreeExplainer(model,
# data=test_x, model_output="probability",
);
shap_values = explainer.shap_values(X);
if len(shap_values) == 2:
shap_values = shap_values[1]
importance = pd.DataFrame(list(zip(col_names, np.mean(abs(shap_values), axis=0))),
columns=['Feature', 'Importance']).sort_values(by='Importance', ascending=False)
importance.to_csv(save_path + '_importance.csv', index=False)
shap.summary_plot(shap_values, X, show=False,
max_display=10,
plot_size=(10, 5),
plot_type="violin",
feature_names=list(col_names))
f = plt.gcf()
plt.xlabel('SHAP value (impact on model output)')
f.savefig(save_path + '_summary_plot' + filetype,
bbox_inches='tight'
)
plt.clf()
plt.close()
def initialize_model(model_choice, task, cv_folds, parameter_grid, gs_metric, seed, mlp_solver='adam'):
## select scoring metric
if gs_metric == None:
if task == 'binary':
gs_metric = 'roc_auc'
elif task == 'multiclass':
gs_metric = 'roc_auc_ovr'
elif task == 'continuous':
gs_metric = 'neg_mean_squared_error'
# gs_metric = 'r2'
if model_choice == "linear":
if task == 'binary':
from sklearn.linear_model import LogisticRegression
param_grid = {'C': np.arange(0.001, 1, 0.05), 'penalty': ['l2', 'l1']}
est = LogisticRegression(random_state=seed, solver='saga', max_iter=1e4)
elif task == 'multiclass':
from sklearn.linear_model import LogisticRegression
param_grid = parameter_grid if parameter_grid is not None else {'C': np.arange(0.001, 1, 0.05),
'penalty': ['l2', 'l1'], 'max_iter': [1e4]}
est = LogisticRegression(random_state=seed, multi_class='multinomial', solver='saga', max_iter=1e4)
elif task == 'continuous':
from sklearn.linear_model import ElasticNet
param_grid = parameter_grid if parameter_grid is not None else {'alpha': [0.1, 1, 10, 100, 1000],
'l1_ratio': np.arange(0.1, 1.0, 0.1)}
est = ElasticNet(random_state=seed, max_iter=1e4)
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
elif model_choice == "cart":
from sklearn.tree import DecisionTreeClassifier
param_grid = parameter_grid if parameter_grid is not None else {"max_depth": [3, 4, 5, 6, 7, 8, 9, 10],
'min_samples_leaf': [0.02, 0.04, 0.06],
"max_features": [0.4, 0.6, 0.8, 1.0]}
if task in ['binary', 'multiclass']:
from sklearn.tree import DecisionTreeClassifier
est = DecisionTreeClassifier(random_state=seed, criterion='gini')
elif task == 'continuous':
from sklearn.tree import DecisionTreeRegressor
est = DecisionTreeRegressor(random_state=seed)
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
elif model_choice in ["rf", "rf_shallow"]:
if model_choice == "rf":
param_grid = parameter_grid if parameter_grid is not None else {
'n_estimators': [250, 500],
'max_features': ['auto'],
'max_depth': [6, 7, 8]
}
else:
param_grid = parameter_grid if parameter_grid is not None else {
'n_estimators': [10, 25],
'max_features': ['auto'],
'max_depth': [2, 3, 4]
}
if task in ['binary', 'multiclass']:
from sklearn.ensemble import RandomForestClassifier
est = RandomForestClassifier(random_state=seed, criterion='gini')
elif task == 'continuous':
from sklearn.ensemble import RandomForestRegressor
est = RandomForestRegressor(random_state=seed)
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
elif model_choice == 'gbm':
param_grid = parameter_grid if parameter_grid is not None else {
"learning_rate": [0.01, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2],
"max_depth": [2, 3, 4, 5],
"n_estimators": [20]
}
if task == 'binary':
from sklearn.ensemble import GradientBoostingClassifier
est = GradientBoostingClassifier(random_state=seed, init='zero')
elif task == 'multiclass':
from sklearn.ensemble import GradientBoostingClassifier
est = GradientBoostingClassifier(random_state=seed, init='zero')
elif task == 'continuous':
from sklearn.ensemble import GradientBoostingRegressor
est = GradientBoostingRegressor(random_state=seed)
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
elif model_choice == "xgb":
param_grid = parameter_grid if parameter_grid is not None else {
'min_child_weight': [1, 5, 10],
'gamma': [0.5, 1, 2, 5, 10],
'subsample': [0.8, 1.0],
'colsample_bytree': [0.8, 1.0],
'max_depth': [4, 5, 6],
'n_estimators': [250]
}
if task == 'binary':
from xgboost import XGBClassifier
est = XGBClassifier(random_state=seed, objective='binary:logistic')
elif task == 'multiclass':
from xgboost import XGBClassifier
est = XGBClassifier(random_state=seed, objective='multi:softmax')
elif task == 'continuous':
from xgboost import XGBRegressor
est = XGBRegressor(random_state=seed)
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
elif model_choice == "iai":
from julia import Julia
Julia(compiled_modules=False)
from interpretableai import iai
if task in ['binary', 'multiclass']:
est = iai.OptimalTreeClassifier(
random_seed=seed,
ls_num_hyper_restarts=5, # 5 is default
fast_num_support_restarts=10,
hyperplane_config={'sparsity': 'sqrt'}
)
elif task == 'continuous':
est = iai.OptimalTreeRegressor(
random_seed=seed,
ls_num_hyper_restarts=5, # 5 is default
fast_num_support_restarts=10,
hyperplane_config={'sparsity': 'sqrt'}
)
gs = iai.GridSearch(est,
max_depth=range(2, 6), minbucket=[5, 10]
)
elif model_choice == "iai-single":
from julia import Julia
Julia(compiled_modules=False)
from interpretableai import iai
if task in ['binary', 'multiclass']:
est = iai.OptimalTreeClassifier(
random_seed=seed,
)
elif task == 'continuous':
est = iai.OptimalTreeRegressor(
random_seed=seed,
)
gs = iai.GridSearch(est,
max_depth=range(2, 6), minbucket=[.01, .02, .05]
)
elif model_choice == "svm":
param_grid = parameter_grid if parameter_grid is not None else {
'C': [.1, 1, 10, 100]
}
if task in ['binary', 'multiclass']:
from sklearn.svm import LinearSVC
est = LinearSVC(max_iter=int(1e5), dual=False, penalty='l2')
elif task == 'continuous':
from sklearn.svm import LinearSVR
est = LinearSVR(max_iter=int(1e5), dual=False, loss='squared_epsilon_insensitive')
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
elif model_choice == "mlp":
param_grid = parameter_grid if parameter_grid is not None else {
'hidden_layer_sizes': [(10,), (20,), (50,), (100,)]
}
if task in ['binary', 'multiclass']:
assert task == 'binary', 'sorry, the multiclass is under development'
from sklearn.neural_network import MLPClassifier
est = MLPClassifier(random_state=seed, solver=mlp_solver, max_iter=10000)
elif task == 'continuous':
from sklearn.neural_network import MLPRegressor
est = MLPRegressor(random_state=seed, solver=mlp_solver, max_iter=10000)
gs = GridSearchCV(estimator=est, param_grid=param_grid, scoring=gs_metric, cv=cv_folds)
return gs
def run_model(train_x, y_train, test_x, y_test, model_choice, outcome, task, cv_folds=3,
seed=1, save_path='../results/', save=False, shap=False,
# weights = []
parameter_grid=None,
metric=None
):
assert task in ['multiclass', 'binary', 'continuous']
assert model_choice in ['linear', 'cart', 'rf', 'rf_shallow', 'xgb', 'gbm', 'iai', 'iai-single', 'svm', 'mlp']
# Path(save_path).mkdir(parents=True, exist_ok=True)
print("------------- Initialize grid ----------------")
mlp_solver = 'adam' if train_x.shape[0] >= 1000 else 'lbfgs'
gs = initialize_model(model_choice, task, cv_folds,
parameter_grid, metric, seed, mlp_solver=mlp_solver)
print("------------- Running model ----------------")
print(f"Algorithm = {model_choice}, metric = {metric}")
np.random.seed(seed)
if (model_choice == 'iai') | (model_choice == 'iai-single'):
metric = 'mse' if task == 'continuous' else 'auc'
gs.fit_cv(train_x, y_train, n_folds=cv_folds, validation_criterion=metric)
else:
gs.fit(train_x, y_train)
filename = 'results/' + model_choice + '_' + outcome + '_trained.pkl'
with open(filename, 'wb') as f:
print(f'saving... {filename}')
pickle.dump(gs.best_estimator_, f)
# if len(weights) > 0:
# print("Applying sample weights")
# gs.fit(train_x, y_train, sample_weight = weights)
# else:
if (model_choice == 'iai') | (model_choice == 'iai-single'):
grid_result = gs.get_grid_results()
valid_score = grid_result.query('rank_valid_score == 1')['mean_valid_score'].values[0]
best_params = gs.get_best_params()
param_grid = {'minbucket': grid_result['minbucket'].unique(),
'max_depth': grid_result['max_depth'].unique()}
model = gs.get_learner()
else:
valid_score = gs.best_score_
best_params = gs.best_params_
param_grid = gs.param_grid
model = gs.best_estimator_
print("------------- Model evaluation ----------------")
if task == 'binary':
if model_choice != 'svm':
print("-------------------training evaluation-----------------------")
train_pred = np.array(gs.predict_proba(train_x))[::, 1]
train_score = metrics.roc_auc_score(y_train, train_pred)
print("Train Score: " + str(train_score))
print("-------------------testing evaluation-----------------------")
test_pred = np.array(gs.predict_proba(test_x))[::, 1]
test_score = metrics.roc_auc_score(y_test, test_pred)
print("Test Score: " + str(test_score))
preds_train = pd.DataFrame({'true': y_train, 'pred': train_pred})
preds_test = pd.DataFrame({'true': y_test, 'pred': test_pred})
performance_dict = {'save_path': save_path, 'seed': seed,
'cv_folds': cv_folds, 'task': task, 'parameters': param_grid,
'best_params': best_params,
'valid_score': valid_score, 'train_score': train_score, 'test_score': test_score}
else:
print("-------------------training evaluation-----------------------")
train_pred = gs.predict(train_x)
train_score = gs.score(train_x, y_train)
print("Train Score: " + str(train_score))
print("-------------------testing evaluation-----------------------")
test_pred = gs.predict(test_x)
test_score = gs.score(test_x, y_test)
print("Test Score: " + str(test_score))
preds_train = pd.DataFrame({'true': y_train, 'pred': train_pred})
preds_test = pd.DataFrame({'true': y_test, 'pred': test_pred})
performance_dict = {'save_path': save_path, 'seed': seed,
'cv_folds': cv_folds, 'task': task, 'parameters': param_grid,
'best_params': best_params,
'valid_score': valid_score, 'train_score': train_score, 'test_score': test_score}
elif task == 'multiclass':
print("-------------------training evaluation-----------------------")
train_pred = gs.predict_proba(train_x)
train_score = metrics.roc_auc_score(y_train, train_pred, multi_class='ovr')
print("Train Score: " + str(train_score))
print("-------------------testing evaluation-----------------------")
test_pred = gs.predict_proba(test_x)
test_score = metrics.roc_auc_score(y_test, test_pred, multi_class='ovr')
print("Test Score: " + str(test_score))
preds_train = pd.DataFrame(train_pred, columns=gs.classes_);
preds_train['true'] = y_train
preds_test = pd.DataFrame(test_pred, columns=gs.classes_);
preds_test['true'] = y_test
performance_dict = {'save_path': save_path, 'seed': seed,
'cv_folds': cv_folds, 'task': task, 'parameters': param_grid, 'best_params': best_params,
'valid_score': valid_score, 'train_score': train_score, 'test_score': test_score}
elif task == 'continuous':
print("-------------------training evaluation-----------------------")
train_pred = gs.predict(train_x)
train_mse = metrics.mean_squared_error(y_train, train_pred)
print("Train MSE: " + str(train_mse))
train_r2 = r_squared(y_train, train_pred, y_train.mean())
print("Train R2: " + str(train_r2))
print("-------------------testing evaluation-----------------------")
test_pred = gs.predict(test_x)
test_mse = metrics.mean_squared_error(y_test, test_pred)
print("Test MSE: " + str(test_mse))
test_r2 = r_squared(y_test, test_pred, y_train.mean())
print("Test R2: " + str(test_r2))
preds_train = pd.DataFrame({'true': y_train, 'pred': train_pred})
preds_test = pd.DataFrame({'true': y_test, 'pred': test_pred})
performance_dict = {'save_path': save_path, 'seed': seed,
'cv_folds': cv_folds,
'task': task, 'parameters': param_grid, 'best_params': best_params,
'valid_score': valid_score,
'train_score': train_mse, 'train_r2': train_r2,
'test_score': test_mse, 'test_r2': test_r2}
performance = pd.DataFrame([list(performance_dict.values())], columns=performance_dict.keys(), index=[0])
if save:
print("------------- Save results ----------------")
if not os.path.exists('results/%s/' % model_choice):
os.makedirs('results/%s/' % model_choice)
performance.to_csv('results/%s/%s_performance.csv' % (model_choice, outcome), index=False)
# preds_train.to_csv(save_path+"_trainprobs.csv")
# preds_test.to_csv(save_path+"_testprobs.csv")
if model_choice == 'cart':
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(15, 15), dpi=200)
tree.plot_tree(model,
feature_names=train_x.columns,
filled=True)
fig.savefig(save_path + '_tree.png')
plt.clf()
plt.close()
elif model_choice == 'iai':
df_A, node_info = extract_tree_info(model, train_x.columns, save_path)
model.write_html(save_path + '_tree.html')
model.write_png(save_path + '_tree.png')
elif model_choice == 'linear':
coef = pd.DataFrame(model.coef_.transpose(),
columns=gs.classes_, index=train_x.columns)
coef.to_csv(save_path + "_coefficients.csv", index=True)
elif shap:
shap_summary(model, train_x, train_x.columns, save_path)
# create_and_save_pickle(gs, save_path+".pkl")
return model, performance
def train_ml_models(outcome_list, version, s = 1, bootstrap_proportion = 0.5, save_models = False, save_path = 'results/'):
performance = pd.DataFrame()
if not os.path.exists(save_path):
os.makedirs(save_path)
for outcome_main in outcome_list.keys():
print(f'Learning a model for {outcome_main}')
outcome_specs = outcome_list[outcome_main]
alg_list = outcome_specs['alg_list']
task_type = outcome_specs['task_type']
bootstrap_iterations = outcome_specs['bootstrap_iterations']
bootstrap_yn = True if bootstrap_iterations > 0 else 0
## Iterate over bootstrap iterations (or single loop if none)
X_train_all = outcome_specs['X_train']
y_train_all = outcome_specs['y_train']
X_test = outcome_specs['X_test']
y_test = outcome_specs['y_test']
for i in range(max(1,bootstrap_iterations)):
if not bootstrap_yn:
print("No bootstrap - training on full training data")
outcome = outcome_main
X_train = X_train_all
y_train = y_train_all
else:
print("Bootstrap iteration %d of %d" % (i+1, bootstrap_iterations))
## If bootstrapping, save outcome with subscript
outcome = outcome_main + '_s%d' % i
bs_sample = int(bootstrap_proportion*X_train_all.shape[0])
X_train, y_train = resample(X_train_all, y_train_all,
replace = True, n_samples = bs_sample, random_state=i)
for alg in alg_list:
print(f'training {outcome} with {alg}')
if not os.path.exists(save_path+alg+'/'):
os.makedirs(save_path+alg+'/')
## Run shallow/small version of RF
alg_run = 'rf_shallow' if alg == 'rf' else alg
m, perf = run_model(X_train, y_train, X_test, y_test, alg_run, outcome, task = task_type,
seed = s, cv_folds = 5,
# metric = 'r2',
save = False
)
## Save model
constraintL = opticl.ConstraintLearning(X_train, y_train, m, alg)
constraint_add = constraintL.constraint_extrapolation(task_type)
constraint_add.to_csv(save_path+'/%s/%s_%s_model.csv' % (alg, version, outcome), index = False)
## Extract performance metrics
try:
threshold = outcome_specs['lb'] or outcome_specs['ub']
perf['auc_threshold'] = threshold
perf['auc_train'] = metrics.roc_auc_score(y_train >= threshold, m.predict(X_train))
perf['auc_test'] = metrics.roc_auc_score(y_test >= threshold, m.predict(X_test))
except:
perf['auc_threshold'] = np.nan
perf['auc_train'] = np.nan
perf['auc_test'] = np.nan
perf['seed'] = s
perf['outcome'] = outcome_main
perf['outcome_label'] = outcome
perf['alg'] = alg
perf['bootstrap_iteration'] = i
perf['save_path'] = save_path+'%s/%s_%s_model.csv' % (alg, version, outcome)
perf.to_csv(save_path+'%s/%s_%s_performance.csv' % (alg, version, outcome), index = False)
performance = performance.append(perf)
print()
print('Saving the performance...')
# performance.to_csv(save_path+'%s_performance.csv' % version, index = False)
print('Done!')
return performance
| 22,467 | 46.004184 | 123 | py |
Ranger-Deep-Learning-Optimizer | Ranger-Deep-Learning-Optimizer-master/setup.py | #!/usr/bin/env python
import os
from setuptools import find_packages, setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='ranger',
version='0.1.dev0',
packages=find_packages(
exclude=['tests', '*.tests', '*.tests.*', 'tests.*']
),
package_dir={'ranger': os.path.join('.', 'ranger')},
description='Ranger - a synergistic optimizer using RAdam '
'(Rectified Adam) and LookAhead in one codebase ',
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='Less Wright',
license='Apache',
install_requires=['torch']
)
| 696 | 24.814815 | 67 | py |
Ranger-Deep-Learning-Optimizer | Ranger-Deep-Learning-Optimizer-master/ranger/rangerqh.py | # RangerQH - @lessw2020 github
# Combines Quasi Hyperbolic momentum with Hinton Lookahead.
# https://arxiv.org/abs/1810.06801v4 (QH paper)
# #Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
# Some portions = Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim.optimizer import Optimizer
#from ..common import param_conv
class RangerQH(Optimizer):
r"""Implements the QHAdam optimization algorithm `(Ma and Yarats, 2019)`_.
Along with Hinton/Zhang Lookahead.
Args:
params (iterable):
iterable of parameters to optimize or dicts defining parameter
groups
lr (float, optional): learning rate (:math:`\alpha` from the paper)
(default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of the gradient and its square
(default: (0.9, 0.999))
nus (Tuple[float, float], optional): immediate discount factors used to
estimate the gradient and its square
(default: (1.0, 1.0))
eps (float, optional): term added to the denominator to improve
numerical stability
(default: 1e-8)
weight_decay (float, optional): weight decay (default: 0.0)
decouple_weight_decay (bool, optional): whether to decouple the weight
decay from the gradient-based optimization step
(default: False)
Example:
>>> optimizer = qhoptim.pyt.QHAdam(
... model.parameters(),
... lr=3e-4, nus=(0.8, 1.0), betas=(0.99, 0.999))
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
.. _`(Ma and Yarats, 2019)`: https://arxiv.org/abs/1810.06801
"""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
nus=(.7, 1.0),
weight_decay=0.0,
k=6,
alpha=.5,
decouple_weight_decay=False,
eps=1e-8,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = {
"lr": lr,
"betas": betas,
"nus": nus,
"weight_decay": weight_decay,
"decouple_weight_decay": decouple_weight_decay,
"eps": eps,
}
super().__init__(params, defaults)
#look ahead params
self.alpha = alpha
self.k = k
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional):
A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
lr = group["lr"]
beta1, beta2 = group["betas"]
nu1, nu2 = group["nus"]
weight_decay = group["weight_decay"]
decouple_weight_decay = group["decouple_weight_decay"]
eps = group["eps"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if d_p.is_sparse:
raise RuntimeError("QHAdam does not support sparse gradients")
if weight_decay != 0:
if decouple_weight_decay:
p.data.mul_(1 - lr * weight_decay)
else:
d_p.add_(weight_decay, p.data)
d_p_sq = d_p.mul(d_p)
#prep for saved param loading
param_state = self.state[p]
if len(param_state) == 0:
param_state["beta1_weight"] = 0.0
param_state["beta2_weight"] = 0.0
param_state['step'] = 0
param_state["exp_avg"] = torch.zeros_like(p.data)
param_state["exp_avg_sq"] = torch.zeros_like(p.data)
#look ahead weight storage now in state dict
param_state['slow_buffer'] = torch.empty_like(p.data)
param_state['slow_buffer'].copy_(p.data)
param_state['step'] += 1
param_state["beta1_weight"] = 1.0 + beta1 * param_state["beta1_weight"]
param_state["beta2_weight"] = 1.0 + beta2 * param_state["beta2_weight"]
beta1_weight = param_state["beta1_weight"]
beta2_weight = param_state["beta2_weight"]
exp_avg = param_state["exp_avg"]
exp_avg_sq = param_state["exp_avg_sq"]
beta1_adj = 1.0 - (1.0 / beta1_weight)
beta2_adj = 1.0 - (1.0 / beta2_weight)
exp_avg.mul_(beta1_adj).add_(1.0 - beta1_adj, d_p)
exp_avg_sq.mul_(beta2_adj).add_(1.0 - beta2_adj, d_p_sq)
avg_grad = exp_avg.mul(nu1)
if nu1 != 1.0:
avg_grad.add_(1.0 - nu1, d_p)
avg_grad_rms = exp_avg_sq.mul(nu2)
if nu2 != 1.0:
avg_grad_rms.add_(1.0 - nu2, d_p_sq)
avg_grad_rms.sqrt_()
if eps != 0.0:
avg_grad_rms.add_(eps)
p.data.addcdiv_(-lr, avg_grad, avg_grad_rms)
#integrated look ahead...
#we do it at the param level instead of group level
if param_state['step'] % self.k ==0: #group['k'] == 0:
slow_p = param_state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
@classmethod
def _params_to_dict(cls, params):
return {"lr": params.alpha, "nus": (params.nu1, params.nu2), "betas": (params.beta1, params.beta2)}
| 6,752 | 35.901639 | 107 | py |
Ranger-Deep-Learning-Optimizer | Ranger-Deep-Learning-Optimizer-master/ranger/ranger913A.py | # Ranger deep learning optimizer - RAdam + Lookahead + calibrated adaptive LR combined.
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
# Ranger has now been used to capture 12 records on the FastAI leaderboard.
#This version = 9.13.19A
#Credits:
#RAdam --> https://github.com/LiyuanLucasLiu/RAdam
#Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
#Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
# Calibrated anisotropic adaptive learning rates - https://arxiv.org/abs/1908.00700v2
#summary of changes:
#full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
#supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
#changes 8/31/19 - fix references to *self*.N_sma_threshold;
#changed eps to 1e-5 as better default than 1e-8.
import math
import torch
from torch.optim.optimizer import Optimizer, required
import itertools as it
class RangerVA(Optimizer):
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, n_sma_threshhold=5, betas=(.95,0.999),
eps=1e-5, weight_decay=0, amsgrad=True, transformer='softplus', smooth=50,
grad_transformer='square'):
#parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
#parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
#N_sma_threshold of 5 seems better in testing than 4.
#In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
#prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas,
n_sma_threshhold=n_sma_threshhold, eps=eps, weight_decay=weight_decay,
smooth=smooth, transformer=transformer, grad_transformer=grad_transformer,
amsgrad=amsgrad)
super().__init__(params,defaults)
#adjustable threshold
self.n_sma_threshhold = n_sma_threshhold
#look ahead params
self.alpha = alpha
self.k = k
#radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
#self.first_run_check=0
#lookahead weights
#9/2/19 - lookahead param tensors have been moved to state storage.
#This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
#self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
#don't use grad for lookahead weights
#for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(RangerVA, self).__setstate__(state)
def step(self, closure=None):
loss = None
#note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
#Uncomment if you need to use the actual closure...
#if closure is not None:
#loss = closure()
#Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
amsgrad = group['amsgrad']
smooth = group['smooth']
grad_transformer = group['grad_transformer']
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
##transformer
if grad_transformer == 'square':
grad_tmp = grad**2
elif grad_transformer == 'abs':
grad_tmp = grad.abs()
exp_avg_sq.mul_(beta2).add_((1 - beta2)*grad_tmp)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denomc = max_exp_avg_sq.clone()
else:
denomc = exp_avg_sq.clone()
if grad_transformer == 'square':
#pdb.set_trace()
denomc.sqrt_()
state['step'] += 1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# ...let's use calibrated alr
if group['transformer'] =='softplus':
sp = torch.nn.Softplus( smooth)
denomf = sp( denomc)
p_data_fp32.addcdiv_(-step_size, exp_avg, denomf )
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
| 8,362 | 39.400966 | 133 | py |
Ranger-Deep-Learning-Optimizer | Ranger-Deep-Learning-Optimizer-master/ranger/ranger.py | # Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer.
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
# and/or
# https://github.com/lessw2020/Best-Deep-Learning-Optimizers
# Ranger has now been used to capture 12 records on the FastAI leaderboard.
# This version = 20.4.11
# Credits:
# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization
# RAdam --> https://github.com/LiyuanLucasLiu/RAdam
# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
# summary of changes:
# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init.
# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
# changes 8/31/19 - fix references to *self*.N_sma_threshold;
# changed eps to 1e-5 as better default than 1e-8.
import math
import torch
from torch.optim.optimizer import Optimizer, required
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, # lr
alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options
betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options
# Gradient centralization on or off, applied to conv layers only or conv + fc layers
use_gc=True, gc_conv_only=False
):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas,
N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# gc on or off
self.use_gc = use_gc
# level of gradient centralization
self.gc_gradient_threshold = 3 if gc_conv_only else 1
print(
f"Ranger optimizer loaded. \nGradient Centralization usage = {self.use_gc}")
if (self.use_gc and self.gc_gradient_threshold == 1):
print(f"GC applied to both conv and fc layers")
elif (self.use_gc and self.gc_gradient_threshold == 3):
print(f"GC applied to conv layers only")
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
# Uncomment if you need to use the actual closure...
# if closure is not None:
#loss = closure()
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0: # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# GC operation for Conv layers and FC layers
if grad.dim() > self.gc_gradient_threshold:
grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
state['step'] += 1
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * \
state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay']
* group['lr'], p_data_fp32)
# apply lr
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size *
group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
# get access to slow param tensor
slow_p = state['slow_buffer']
# (fast weights - slow weights) * alpha
slow_p.add_(self.alpha, p.data - slow_p)
# copy interpolated weights to RAdam param tensor
p.data.copy_(slow_p)
return loss
| 7,915 | 41.789189 | 169 | py |
Ranger-Deep-Learning-Optimizer | Ranger-Deep-Learning-Optimizer-master/ranger/ranger2020.py | # Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer.
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer
# and/or
# https://github.com/lessw2020/Best-Deep-Learning-Optimizers
# Ranger has been used to capture 12 records on the FastAI leaderboard.
# This version = 2020.9.4
# Credits:
# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization
# RAdam --> https://github.com/LiyuanLucasLiu/RAdam
# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.
# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610
# summary of changes:
# 9/4/20 - updated addcmul_ signature to avoid warning. Integrates latest changes from GC developer (he did the work for this), and verified on performance on private dataset.
# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init.
# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),
# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.
# changes 8/31/19 - fix references to *self*.N_sma_threshold;
# changed eps to 1e-5 as better default than 1e-8.
import math
import torch
from torch.optim.optimizer import Optimizer, required
def centralized_gradient(x, use_gc=True, gc_conv_only=False):
'''credit - https://github.com/Yonghongwei/Gradient-Centralization '''
if use_gc:
if gc_conv_only:
if len(list(x.size())) > 3:
x.add_(-x.mean(dim=tuple(range(1, len(list(x.size())))), keepdim=True))
else:
if len(list(x.size())) > 1:
x.add_(-x.mean(dim=tuple(range(1, len(list(x.size())))), keepdim=True))
return x
class Ranger(Optimizer):
def __init__(self, params, lr=1e-3, # lr
alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options
betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options
# Gradient centralization on or off, applied to conv layers only or conv + fc layers
use_gc=True, gc_conv_only=False, gc_loc=True
):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas,
N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None, None, None] for ind in range(10)]
# gc on or off
self.gc_loc = gc_loc
self.use_gc = use_gc
self.gc_conv_only = gc_conv_only
# level of gradient centralization
#self.gc_gradient_threshold = 3 if gc_conv_only else 1
print(
f"Ranger optimizer loaded. \nGradient Centralization usage = {self.use_gc}")
if (self.use_gc and self.gc_conv_only == False):
print(f"GC applied to both conv and fc layers")
elif (self.use_gc and self.gc_conv_only == True):
print(f"GC applied to conv layers only")
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
# Uncomment if you need to use the actual closure...
# if closure is not None:
#loss = closure()
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] # get state dict for this param
if len(state) == 0: # if first time to run...init dictionary with our desired entries
# if self.first_run_check==0:
# self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
# begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# GC operation for Conv layers and FC layers
# if grad.dim() > self.gc_gradient_threshold:
# grad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))
if self.gc_loc:
grad = centralized_gradient(grad, use_gc=self.use_gc, gc_conv_only=self.gc_conv_only)
state['step'] += 1
# compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# compute mean moving avg
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * \
state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
# if group['weight_decay'] != 0:
# p_data_fp32.add_(-group['weight_decay']
# * group['lr'], p_data_fp32)
# apply lr
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
G_grad = exp_avg / denom
else:
G_grad = exp_avg
if group['weight_decay'] != 0:
G_grad.add_(p_data_fp32, alpha=group['weight_decay'])
# GC operation
if self.gc_loc == False:
G_grad = centralized_gradient(G_grad, use_gc=self.use_gc, gc_conv_only=self.gc_conv_only)
p_data_fp32.add_(G_grad, alpha=-step_size * group['lr'])
p.data.copy_(p_data_fp32)
# integrated look ahead...
# we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
# get access to slow param tensor
slow_p = state['slow_buffer']
# (fast weights - slow weights) * alpha
slow_p.add_(p.data - slow_p, alpha=self.alpha)
# copy interpolated weights to RAdam param tensor
p.data.copy_(slow_p)
return loss
| 9,051 | 42.311005 | 176 | py |
RADio | RADio-main/dart/preprocess/nlp.py | import datetime
# if that doesn't work, do pip install -U numpy
# https://discuss.pytorch.org/t/valueerror-and-importerror-occurred-when-import-torch/5818
import nl_core_news_sm
import pandas as pd
import en_core_web_sm
from textblob import TextBlob
import textstat
import json
import os
import dart.Util
config = dart.Util.read_config_file()
language = config["language"]
if language == 'english':
nlp = en_core_web_sm.load(disable=['parser'])
textstat.set_lang('en')
elif language == 'dutch':
nlp = nl_core_news_sm.load(disable=['parser'])
textstat.set_lang('nl')
else:
raise Exception("Sorry, language not implemented yet")
def process(df):
to_process = df[df.entities.isnull()]
split = 100
chunk_size = int(to_process.shape[0] / split)
if chunk_size > 0:
for start in range(0, to_process.shape[0], chunk_size):
print("\t{:.0f}/{}".format(start / chunk_size, split))
df_subset = to_process.iloc[start:start + chunk_size]
enrich = list(nlp.pipe(df_subset.text))
for i, doc in enumerate(enrich):
index = df_subset.iloc[i].name
entities = [{
'text': e.text,
'start_char': e.start_char,
'end_char': e.end_char,
'label': e.label_
} for e in doc.ents]
df.at[index, "entities_base"] = entities
blob = TextBlob(doc.text)
df.at[index, "sentiment"] = blob.polarity
complexity = textstat.flesch_reading_ease(doc.text)
df.at[index, "complexity"] = complexity
df.to_json("data/rtl/annotated.json")
return df
def resolve_dates(df):
df['date'] = df['date'].apply(lambda x: str(x).strip())
df['publication_date'] = pd.to_datetime(df['date'], format='%m/%d/%Y', errors='raise')
return df
def execute():
if os.path.exists("data/rtl/annotated.json"):
texts = pd.read_json("data/rtl/annotated.json")
else:
texts = pd.read_csv("data/rtl/article_text.csv").set_index('ID')
texts.drop(texts[texts.text == 'not found'].index, inplace=True)
texts = texts.dropna(subset=['text'])
if 'entities' not in texts:
texts["entities"] = None
texts["entities_base"] = None
texts["sentiment"] = None
texts["complexity"] = None
texts = process(texts)
texts['publication_date'] = texts['date']
return texts
| 2,500 | 28.081395 | 90 | py |
acoustic-images-distillation | acoustic-images-distillation-master/setup.py | from setuptools import setup
setup(
name='codebase',
version='0.0.1',
packages=['codebase'],
install_requires=['librosa', 'numpy', 'tensorflow-gpu==1.4.0', 'torchfile'],
url='https://gitlab.iit.it/aperez/acoustic-images-distillation',
license='',
author='Andres Perez',
author_email='andres.perez@iit.it',
description='A Python package with a sample data loader, trainers and models'
)
| 423 | 29.285714 | 81 | py |
acoustic-images-distillation | acoustic-images-distillation-master/codebase/models/soundnet.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import torchfile
def soundnet_arg_scope(is_training=True,
weight_decay=0.0001):
"""Defines the SoundNet arg scope.
Args:
is_training: Boolean flag indicating whether we are in training or not.
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.regularizers.l2_regularizer(weight_decay),
padding='VALID',
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=tf.truncated_normal_initializer(0.0, stddev=0.01),
biases_initializer=tf.constant_initializer(0.0)):
with slim.arg_scope([slim.batch_norm],
scale=True,
activation_fn=slim.nn_ops.relu,
is_training=is_training) as arg_sc:
return arg_sc
def soundnet5(inputs,
num_classes=None,
spatial_squeeze=False,
scope='SoundNet'):
"""
Builds a SoundNet 5-Layers network.
"""
with tf.variable_scope(scope, 'SoundNet', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for convolution2d and max_pool2d
with slim.arg_scope([slim.layers.conv2d, slim.batch_norm, slim.layers.max_pool2d],
outputs_collections=[end_points_collection]):
# ----------- 1st layer group ---------------
net = tf.pad(inputs, [[0, 0], [32, 32], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 32, [64, 1], [2, 1], scope='conv1')
net = slim.batch_norm(net, scope='conv1/norm')
net = slim.max_pool2d(net, [8, 1], [8, 1], scope='pool1')
# ----------- 2nd layer group ---------------
net = tf.pad(net, [[0, 0], [16, 16], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 64, [32, 1], [2, 1], scope='conv2')
net = slim.batch_norm(net, scope='conv2/norm')
net = slim.max_pool2d(net, [8, 1], [8, 1], scope='pool2')
# ----------- 3rd layer group ---------------
net = tf.pad(net, [[0, 0], [8, 8], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 128, [16, 1], [2, 1], scope='conv3')
net = slim.batch_norm(net, scope='conv3/norm')
net = slim.max_pool2d(net, [8, 1], [8, 1], scope='pool3')
# ----------- 4th layer group ---------------
net = tf.pad(net, [[0, 0], [4, 4], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 256, [8, 1], [2, 1], scope='conv4')
net = slim.batch_norm(net, scope='conv4/norm')
# ----------- 5th layer group ---------------
net = tf.pad(net, [[0, 0], [4, 4], [0, 0], [0, 0]], 'CONSTANT')
if num_classes is None:
conv5a = slim.conv2d(net, 1000, [16, 1], [12, 1], scope='conv5a')
conv5b = slim.conv2d(net, 401, [16, 1], [12, 1], scope='conv5b')
net = (conv5a, conv5b)
else:
net = slim.conv2d(net, 1024, [16, 1], [12, 1], scope='conv5')
net = slim.batch_norm(net, scope='conv5/norm')
net = slim.conv2d(net, 1024, 1, scope='conv6', activation_fn=slim.nn_ops.relu)
# Convert end_points_collection into a end_point dictionary
end_points = slim.layers.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None and spatial_squeeze:
# Apply spatial squeezing
net = tf.squeeze(net, [1, 2], name='conv6/squeezed')
# Add squeezed fc1 to the collection of end points
end_points[sc.name + '/conv6'] = net
return net, end_points
soundnet5.default_size = [22050 * 5, 1, 1]
def soundnet8(inputs,
num_classes=None,
spatial_squeeze=True,
scope='SoundNet'):
"""
Builds a SoundNet 8-Layers network.
"""
with tf.variable_scope(scope, 'SoundNet', [inputs]) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for convolution2d and max_pool2d
with slim.arg_scope([slim.layers.conv2d, slim.batch_norm, slim.layers.max_pool2d],
outputs_collections=[end_points_collection]):
# ----------- 1st layer group ---------------
net = tf.pad(inputs, [[0, 0], [32, 32], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 16, [64, 1], [2, 1], scope='conv1')
net = slim.batch_norm(net, scope='conv1/norm')
net = slim.max_pool2d(net, [8, 1], [8, 1], scope='pool1')
# ----------- 2nd layer group ---------------
net = tf.pad(net, [[0, 0], [16, 16], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 32, [32, 1], [2, 1], scope='conv2')
net = slim.batch_norm(net, scope='conv2/norm')
net = slim.max_pool2d(net, [8, 1], [8, 1], scope='pool2')
# ----------- 3rd layer group ---------------
net = tf.pad(net, [[0, 0], [8, 8], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 64, [16, 1], [2, 1], scope='conv3')
net = slim.batch_norm(net, scope='conv3/norm')
# ----------- 4th layer group ---------------
net = tf.pad(net, [[0, 0], [4, 4], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 128, [8, 1], [2, 1], scope='conv4')
net = slim.batch_norm(net, scope='conv4/norm')
# ----------- 5th layer group ---------------
net = tf.pad(net, [[0, 0], [2, 2], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 256, [4, 1], [2, 1], scope='conv5')
net = slim.batch_norm(net, scope='conv5/norm')
net = slim.max_pool2d(net, [4, 1], [4, 1], scope='pool5')
# ----------- 6th layer group ---------------
net = tf.pad(net, [[0, 0], [2, 2], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 512, [4, 1], [2, 1], scope='conv6')
net = slim.batch_norm(net, scope='conv6/norm')
# ----------- 7th layer group ---------------
net = tf.pad(net, [[0, 0], [2, 2], [0, 0], [0, 0]], 'CONSTANT')
net = slim.conv2d(net, 1024, [4, 1], [2, 1], scope='conv7')
net = slim.batch_norm(net, scope='conv7/norm')
# ----------- 8th layer group ---------------
if num_classes is None:
conv8a = slim.conv2d(net, 1000, [8, 1], [2, 1], scope='conv8a')
conv8b = slim.conv2d(net, 401, [8, 1], [2, 1], scope='conv8b')
net = (conv8a, conv8b)
else:
net = slim.conv2d(net, 1024, [8, 1], [2, 1], scope='conv8')
net = slim.batch_norm(net, scope='conv8/norm')
net = slim.conv2d(net, 1024, 1, scope='conv9', activation_fn=slim.nn_ops.relu)
# Convert end_points_collection into a end_point dictionary
end_points = slim.layers.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None and spatial_squeeze:
# Apply spatial squeezing
net = tf.squeeze(net, [1, 2], name='conv9/squeezed')
# Add squeezed fc1 to the collection of end points
end_points[sc.name + '/conv9'] = net
return net, end_points
soundnet8.default_size = [22050 * 5, 1, 1]
def soundnet5_model_params(model_filename=None, num_classes=None, scope='SoundNet'):
"""
Load model parameters from Torch file.
"""
def retrieve_if_not_none(local_net, layer_num, var_name): return None if local_net is None else \
local_net['modules'][layer_num][var_name]
def transpose_if_not_none(x): return None if x is None else x.transpose((2, 3, 1, 0))
# Load network
net = None if model_filename is None else torchfile.load(model_filename)
sub_net = None if net is None else net['modules'][15]
# Extract weights and biases
net_params = dict()
net_params[scope + '/conv1/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 0, 'weight'))
net_params[scope + '/conv1/biases'] = retrieve_if_not_none(net, 0, 'bias')
net_params[scope + '/conv1/norm/gamma'] = retrieve_if_not_none(net, 1, 'weight')
net_params[scope + '/conv1/norm/beta'] = retrieve_if_not_none(net, 1, 'bias')
net_params[scope + '/conv1/norm/moving_mean'] = retrieve_if_not_none(net, 1, 'running_mean')
net_params[scope + '/conv1/norm/moving_variance'] = retrieve_if_not_none(net, 1, 'running_var')
net_params[scope + '/conv2/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 4, 'weight'))
net_params[scope + '/conv2/biases'] = retrieve_if_not_none(net, 4, 'bias')
net_params[scope + '/conv2/norm/gamma'] = retrieve_if_not_none(net, 5, 'weight')
net_params[scope + '/conv2/norm/beta'] = retrieve_if_not_none(net, 5, 'bias')
net_params[scope + '/conv2/norm/moving_mean'] = retrieve_if_not_none(net, 5, 'running_mean')
net_params[scope + '/conv2/norm/moving_variance'] = retrieve_if_not_none(net, 5, 'running_var')
net_params[scope + '/conv3/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 8, 'weight'))
net_params[scope + '/conv3/biases'] = retrieve_if_not_none(net, 8, 'bias')
net_params[scope + '/conv3/norm/gamma'] = retrieve_if_not_none(net, 9, 'weight')
net_params[scope + '/conv3/norm/beta'] = retrieve_if_not_none(net, 9, 'bias')
net_params[scope + '/conv3/norm/moving_mean'] = retrieve_if_not_none(net, 9, 'running_mean')
net_params[scope + '/conv3/norm/moving_variance'] = retrieve_if_not_none(net, 9, 'running_var')
net_params[scope + '/conv4/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 12, 'weight'))
net_params[scope + '/conv4/biases'] = retrieve_if_not_none(net, 12, 'bias')
net_params[scope + '/conv4/norm/gamma'] = retrieve_if_not_none(net, 13, 'weight')
net_params[scope + '/conv4/norm/beta'] = retrieve_if_not_none(net, 13, 'bias')
net_params[scope + '/conv4/norm/moving_mean'] = retrieve_if_not_none(net, 13, 'running_mean')
net_params[scope + '/conv4/norm/moving_variance'] = retrieve_if_not_none(net, 13, 'running_var')
if num_classes is None:
net_params[scope + '/conv5a/weights'] = transpose_if_not_none(retrieve_if_not_none(sub_net, 0, 'weight'))
net_params[scope + '/conv5a/biases'] = retrieve_if_not_none(sub_net, 0, 'bias')
net_params[scope + '/conv5b/weights'] = transpose_if_not_none(retrieve_if_not_none(sub_net, 1, 'weight'))
net_params[scope + '/conv5b/biases'] = retrieve_if_not_none(sub_net, 1, 'bias')
return net_params
def soundnet8_model_params(model_filename=None, num_classes=None, scope='SoundNet'):
"""
Load model parameters from Torch file.
"""
def retrieve_if_not_none(local_net, layer_num, var_name): return None if local_net is None else \
local_net['modules'][layer_num][var_name]
def transpose_if_not_none(x): return None if x is None else x.transpose((2, 3, 1, 0))
# Load network
net = None if model_filename is None else torchfile.load(model_filename)
sub_net = None if net is None else net['modules'][24]
# Extract weights and biases
net_params = dict()
net_params[scope + '/conv1/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 0, 'weight'))
net_params[scope + '/conv1/biases'] = retrieve_if_not_none(net, 0, 'bias')
net_params[scope + '/conv1/norm/gamma'] = retrieve_if_not_none(net, 1, 'weight')
net_params[scope + '/conv1/norm/beta'] = retrieve_if_not_none(net, 1, 'bias')
net_params[scope + '/conv1/norm/moving_mean'] = retrieve_if_not_none(net, 1, 'running_mean')
net_params[scope + '/conv1/norm/moving_variance'] = retrieve_if_not_none(net, 1, 'running_var')
net_params[scope + '/conv2/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 4, 'weight'))
net_params[scope + '/conv2/biases'] = retrieve_if_not_none(net, 4, 'bias')
net_params[scope + '/conv2/norm/gamma'] = retrieve_if_not_none(net, 5, 'weight')
net_params[scope + '/conv2/norm/beta'] = retrieve_if_not_none(net, 5, 'bias')
net_params[scope + '/conv2/norm/moving_mean'] = retrieve_if_not_none(net, 5, 'running_mean')
net_params[scope + '/conv2/norm/moving_variance'] = retrieve_if_not_none(net, 5, 'running_var')
net_params[scope + '/conv3/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 8, 'weight'))
net_params[scope + '/conv3/biases'] = retrieve_if_not_none(net, 8, 'bias')
net_params[scope + '/conv3/norm/gamma'] = retrieve_if_not_none(net, 9, 'weight')
net_params[scope + '/conv3/norm/beta'] = retrieve_if_not_none(net, 9, 'bias')
net_params[scope + '/conv3/norm/moving_mean'] = retrieve_if_not_none(net, 9, 'running_mean')
net_params[scope + '/conv3/norm/moving_variance'] = retrieve_if_not_none(net, 9, 'running_var')
net_params[scope + '/conv4/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 11, 'weight'))
net_params[scope + '/conv4/biases'] = retrieve_if_not_none(net, 11, 'bias')
net_params[scope + '/conv4/norm/gamma'] = retrieve_if_not_none(net, 12, 'weight')
net_params[scope + '/conv4/norm/beta'] = retrieve_if_not_none(net, 12, 'bias')
net_params[scope + '/conv4/norm/moving_mean'] = retrieve_if_not_none(net, 12, 'running_mean')
net_params[scope + '/conv4/norm/moving_variance'] = retrieve_if_not_none(net, 12, 'running_var')
net_params[scope + '/conv5/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 14, 'weight'))
net_params[scope + '/conv5/biases'] = retrieve_if_not_none(net, 14, 'bias')
net_params[scope + '/conv5/norm/gamma'] = retrieve_if_not_none(net, 15, 'weight')
net_params[scope + '/conv5/norm/beta'] = retrieve_if_not_none(net, 15, 'bias')
net_params[scope + '/conv5/norm/moving_mean'] = retrieve_if_not_none(net, 15, 'running_mean')
net_params[scope + '/conv5/norm/moving_variance'] = retrieve_if_not_none(net, 15, 'running_var')
net_params[scope + '/conv6/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 18, 'weight'))
net_params[scope + '/conv6/biases'] = retrieve_if_not_none(net, 18, 'bias')
net_params[scope + '/conv6/norm/gamma'] = retrieve_if_not_none(net, 19, 'weight')
net_params[scope + '/conv6/norm/beta'] = retrieve_if_not_none(net, 19, 'bias')
net_params[scope + '/conv6/norm/moving_mean'] = retrieve_if_not_none(net, 19, 'running_mean')
net_params[scope + '/conv6/norm/moving_variance'] = retrieve_if_not_none(net, 19, 'running_var')
net_params[scope + '/conv7/weights'] = transpose_if_not_none(retrieve_if_not_none(net, 21, 'weight'))
net_params[scope + '/conv7/biases'] = retrieve_if_not_none(net, 21, 'bias')
net_params[scope + '/conv7/norm/gamma'] = retrieve_if_not_none(net, 22, 'weight')
net_params[scope + '/conv7/norm/beta'] = retrieve_if_not_none(net, 22, 'bias')
net_params[scope + '/conv7/norm/moving_mean'] = retrieve_if_not_none(net, 22, 'running_mean')
net_params[scope + '/conv7/norm/moving_variance'] = retrieve_if_not_none(net, 22, 'running_var')
if num_classes is None:
net_params[scope + '/conv8a/weights'] = transpose_if_not_none(retrieve_if_not_none(sub_net, 0, 'weight'))
net_params[scope + '/conv8a/biases'] = retrieve_if_not_none(sub_net, 0, 'bias')
net_params[scope + '/conv8b/weights'] = transpose_if_not_none(retrieve_if_not_none(sub_net, 1, 'weight'))
net_params[scope + '/conv8b/biases'] = retrieve_if_not_none(sub_net, 1, 'bias')
return net_params
| 15,910 | 51.167213 | 113 | py |
acoustic-images-distillation | acoustic-images-distillation-master/codebase/models/resnet_utils.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains building blocks for various versions of Residual Networks.
Residual networks (ResNets) were proposed in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015
More variants were introduced in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016
We can obtain different ResNet variants by changing the network depth, width,
and form of residual unit. This module implements the infrastructure for
building them. Concrete ResNet units and full ResNet networks are implemented in
the accompanying resnet_v1.py and resnet_v2.py modules.
Compared to https://github.com/KaimingHe/deep-residual-networks, in the current
implementation we subsample the output activations in the last residual unit of
each block, instead of subsampling the input activations in the first residual
unit of each block. The two implementations give identical results but our
implementation is more memory efficient.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import ipdb
import collections
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and
returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
"""
def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
output: A `Tensor` of size [batch, height_out, width_out, channels] with the
input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
else:
return slim.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
"""Strided 2-D convolution with 'SAME' padding.
When stride > 1, then we do explicit zero-padding, followed by conv2d with
'VALID' padding.
Note that
net = conv2d_same(inputs, num_outputs, 3, stride=stride)
is equivalent to
net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')
net = subsample(net, factor=stride)
whereas
net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')
is different when the input's height or width is even, which is why we add the
current function. For more details, see ResnetUtilsTest.testConv2DSameEven().
Args:
inputs: A 4-D tensor of size [batch, height_in, width_in, channels].
num_outputs: An integer, the number of output filters.
kernel_size: An int with the kernel_size of the filters.
stride: An integer, the output stride.
rate: An integer, rate for atrous convolution.
scope: Scope.
Returns:
output: A 4-D tensor of size [batch, height_out, width_out, channels] with
the convolution output.
"""
if stride == 1:
return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate,
padding='SAME', scope=scope)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(inputs,
[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride,
rate=rate, padding='VALID', scope=scope)
def conv_temp2(inputs, nr_frames, scope=None):
# inputs - Batch.X.Y.C
spatial_x = inputs.shape[1].value
spatial_y = inputs.shape[2].value
channels = inputs.shape[3].value
inputs = tf.reshape(inputs, tf.stack([-1, nr_frames, spatial_x, spatial_y, channels]))
value = np.eye(channels, channels)
value = np.expand_dims(value, 0)
value = np.expand_dims(value, 0)
value = np.stack([np.zeros((1, 1, channels, channels)), value, np.zeros((1, 1, channels, channels))], 0)
init = tf.constant_initializer(value)
feats = slim.conv3d(inputs, channels, [3, 1, 1], stride=1, rate=1, padding='SAME', scope=scope,
weights_initializer=init)
feats = tf.reshape(feats, [-1, spatial_x, spatial_y, channels])
return feats
@slim.add_arg_scope
def stack_blocks_dense(net, blocks, nr_frames, output_stride=None,
outputs_collections=None, conn_args=None):
"""Stacks ResNet `Blocks` and controls output feature density.
First, this function creates scopes for the ResNet in the form of
'block_name/unit_1', 'block_name/unit_2', etc.
Second, this function allows the user to explicitly control the ResNet
output_stride, which is the ratio of the input to output spatial resolution.
This is useful for dense prediction tasks such as semantic segmentation or
object detection.
Most ResNets consist of 4 ResNet blocks and subsample the activations by a
factor of 2 when transitioning between consecutive ResNet blocks. This results
to a nominal ResNet output_stride equal to 8. If we set the output_stride to
half the nominal network stride (e.g., output_stride=4), then we compute
responses twice.
Control of the output feature density is implemented by atrous convolution.
Args:
net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
network stride. If output_stride is not `None`, it specifies the requested
ratio of input to output spatial resolution, which needs to be equal to
the product of unit strides from the start up to some level of the ResNet.
For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
then valid values for the output_stride are 1, 2, 6, 24 or None (which
is equivalent to output_stride=24).
outputs_collections: Collection to add the ResNet block outputs.
Returns:
net: Output tensor with stride equal to the specified output_stride.
Raises:
ValueError: If the target output_stride is not valid.
"""
# The current_stride variable keeps track of the effective stride of the
# activations. This allows us to invoke atrous convolution whenever applying
# the next residual unit would result in the activations having stride larger
# than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError(
'The target output_stride cannot be reached.')
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(
net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
if i == 1:
# unit #2, use temporal conv
net = block.unit_fn(
net, nr_frames=nr_frames, temporal=True, rate=1, **unit)
else:
net = block.unit_fn(
net, nr_frames=nr_frames, temporal=False, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = slim.utils.collect_named_outputs(
outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
@slim.add_arg_scope
def stack_blocks_dense_injected(net, blocks, nr_frames, end_points_other, other_scope_name, output_stride=None,
outputs_collections=None, conn_args=None):
current_stride = 1
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]) as sc:
for i, unit in enumerate(block.args):
if output_stride is not None and current_stride > output_stride:
raise ValueError(
'The target output_stride cannot be reached.')
with tf.variable_scope('unit_%d' % (i + 1), values=[net]):
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(
net, rate=rate, **dict(unit, stride=1))
rate *= unit.get('stride', 1)
else:
if i == 0:
net, net_before_relu = block.unit_fn(
net, nr_frames=nr_frames, temporal=False, rate=1, unit_id=0, **unit)
elif i == 1:
unit_i = '/unit_%d' % (i)
# which is equal to ReLU(conv3+shortcut)
this_end_point = other_scope_name + block.scope + unit_i + '/bottleneck_v1'
# unit #2, inject temporal conv and interaction
net = block.unit_fn(
net, nr_frames=nr_frames, temporal=True, rate=1, multiplier=end_points_other[this_end_point], net_before_relu=net_before_relu, **unit)
else:
net = block.unit_fn(
net, nr_frames=nr_frames, temporal=False, rate=1, **unit)
current_stride *= unit.get('stride', 1)
net = slim.utils.collect_named_outputs(
outputs_collections, sc.name, net)
if output_stride is not None and current_stride != output_stride:
raise ValueError('The target output_stride cannot be reached.')
return net
def resnet_arg_scope(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
activation_fn=tf.nn.relu,
use_batch_norm=True):
"""Defines the default ResNet arg scope.
TODO(gpapan): The batch-normalization related default values above are
appropriate for use in conjunction with the reference ResNet models
released at https://github.com/KaimingHe/deep-residual-networks. When
training ResNets from scratch, they might need to be tuned.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
activation_fn: The activation function which is used in ResNet.
use_batch_norm: Whether or not to use batch normalization.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
# The following implies padding='SAME' for pool1, which makes feature
# alignment easier for dense prediction tasks. This is also used in
# https://github.com/facebook/fb.resnet.torch. However the accompanying
# code of 'Deep Residual Learning for Image Recognition' uses
# padding='VALID' for pool1. You can switch to that choice by setting
# slim.arg_scope([slim.max_pool2d], padding='VALID').
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
| 14,617 | 43.567073 | 166 | py |
synorim-merged | synorim-merged/pytorch/evaluate.py | import torch
import bdb, traceback, pdb
import importlib
import argparse
from pathlib import Path
from utils import exp
import numpy as np
from tqdm import tqdm
from dataset.base import DatasetSpec
def visualize(test_result, data):
try:
import open3d as o3d
except ImportError:
print("Please import open3d for visualization!")
exit()
for (view_i, view_j) in test_result.keys():
pc_i = data[DatasetSpec.PC][view_i][0].cpu().numpy()
pc_j = data[DatasetSpec.PC][view_j][0].cpu().numpy()
flow_ij = test_result[(view_i, view_j)].cpu().numpy()
base_pcd = o3d.geometry.PointCloud()
base_pcd.points = o3d.utility.Vector3dVector(pc_i)
base_pcd.paint_uniform_color((1.0, 0., 0.))
final_pcd = o3d.geometry.PointCloud()
final_pcd.points = o3d.utility.Vector3dVector(pc_i + flow_ij)
final_pcd.paint_uniform_color((0., 1.0, 0.))
dest_pcd = o3d.geometry.PointCloud()
dest_pcd.points = o3d.utility.Vector3dVector(pc_j)
dest_pcd.paint_uniform_color((0., 0., 1.0))
corres_lineset = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(np.vstack([pc_i, pc_i + flow_ij])),
lines=o3d.utility.Vector2iVector(np.arange(2 * flow_ij.shape[0]).reshape((2, -1)).T))
print(f"Visualizing scene flow computed from view {view_i} to view {view_j}.")
o3d.visualization.draw([base_pcd, final_pcd, dest_pcd, corres_lineset])
def test_epoch():
net_model.eval()
net_model.hparams.is_training = False
pbar = tqdm(test_loader, desc='Test')
meter = exp.AverageMeter()
for batch_idx, data in enumerate(pbar):
data = exp.to_target_device(data, args.device)
with torch.no_grad():
test_result, test_metric = net_model.test_step(data, batch_idx)
if args.visualize:
visualize(test_result, data)
meter.append_loss(test_metric)
return meter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Synorim Evaluation script')
parser.add_argument('config', type=str, help='Path to the config file.')
parser.add_argument('--device', type=str, choices=['cpu', 'cuda'], default='cuda', help='Device to run on.')
parser.add_argument('--visualize', action='store_true', help='Whether or not to visualize.')
args = parser.parse_args()
exp.seed_everything(0)
model_args = exp.parse_config_yaml(Path(args.config))
net_module = importlib.import_module("models." + model_args.model).Model
net_model = net_module(model_args)
# Load dataset
test_loader = net_model.test_dataloader()
# Move to target device
args.device = torch.device(args.device)
net_model = exp.to_target_device(net_model, args.device)
net_model.device = args.device
net_model.update_device()
# Run test
try:
test_meter = test_epoch()
except Exception as ex:
if not isinstance(ex, bdb.BdbQuit):
traceback.print_exc()
pdb.post_mortem(ex.__traceback__)
exit()
# Print metrics
res = test_meter.get_mean_loss_dict()
print("Test metrics:")
print("+ Non-occluded:")
print(f" + EPE3D: \t {res[f'epe3d-avg'] * 100:.2f}\t+/-\t{res[f'epe3d-std'] * 100:.2f}")
print(f" + AccS (%): \t {res[f'acc3d_strict-avg'] * 100:.1f}\t+/-\t{res[f'acc3d_strict-std'] * 100:.1f}")
print(f" + AccR (%): \t {res[f'acc3d_relax-avg'] * 100:.1f}\t+/-\t{res[f'acc3d_relax-std'] * 100:.1f}")
print(f" + Outlier: \t {res[f'outlier-avg'] * 100:.1f}\t+/-\t{res[f'outlier-std'] * 100:.1f}")
print("+ Full:")
print(f" + EPE3D: \t {res[f'epe3d-full-avg'] * 100:.2f}\t+/-\t{res[f'epe3d-full-std'] * 100:.2f}")
print(f" + AccS (%): \t {res[f'acc3d_strict-full-avg'] * 100:.1f}\t+/-\t{res[f'acc3d_strict-full-std'] * 100:.1f}")
print(f" + AccR (%): \t {res[f'acc3d_relax-full-avg'] * 100:.1f}\t+/-\t{res[f'acc3d_relax-full-std'] * 100:.1f}")
print(f" + Outlier: \t {res[f'outlier-full-avg'] * 100:.1f}\t+/-\t{res[f'outlier-full-std'] * 100:.1f}")
| 4,097 | 37.299065 | 120 | py |
synorim-merged | synorim-merged/pytorch/train.py | import argparse
import bdb
import importlib
import pdb
import shutil
import traceback
from pathlib import Path
import torch
from omegaconf import OmegaConf
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils import exp
def train_epoch():
global global_step
net_model.train()
net_model.hparams.is_training = True
pbar = tqdm(train_loader, desc='Training')
for batch_idx, data in enumerate(pbar):
data = exp.to_target_device(data, args.device)
optimizer.zero_grad()
loss = net_model.training_step(data, batch_idx)
loss.backward()
net_model.on_after_backward()
optimizer.step()
scheduler.step()
net_model.log('learning_rate', scheduler.get_last_lr()[0])
pbar.set_postfix_str(f"Loss = {loss.item():.2f}")
net_model.write_log(writer, global_step)
global_step += 1
def validate_epoch():
global metric_val_best
net_model.eval()
net_model.hparams.is_training = False
pbar = tqdm(val_loader, desc='Validation')
for batch_idx, data in enumerate(pbar):
data = exp.to_target_device(data, args.device)
with torch.no_grad():
net_model.validation_step(data, batch_idx)
log = net_model.write_log(writer, global_step)
metric_val = log['val_loss']
model_state = {
'state_dict': net_model.state_dict(),
'epoch': epoch_idx, 'val_loss': metric_val
}
if metric_val < metric_val_best:
metric_val_best = metric_val
torch.save(model_state, train_log_dir / f"best.pth")
torch.save(model_state, train_log_dir / f"newest.pth")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Synorim Training script')
parser.add_argument('config', type=str, help='Path to the config file.')
parser.add_argument('--device', type=str, choices=['cpu', 'cuda'], default='cuda', help='Device to run on.')
parser.add_argument('--epochs', type=int, default=100, help='Number of epochs to train.')
args = parser.parse_args()
exp.seed_everything(0)
model_args = exp.parse_config_yaml(Path(args.config))
net_module = importlib.import_module("models." + model_args.model).Model
net_model = net_module(model_args)
train_log_dir = Path("out") / model_args.name
train_log_dir.mkdir(exist_ok=True, parents=True)
print(" >>>> ======= MODEL HYPER-PARAMETERS ======= <<<< ")
print(OmegaConf.to_yaml(net_model.hparams, resolve=True))
print("Save Directory is in:", train_log_dir)
print(" >>>> ====================================== <<<< ")
# Copy the model definition and config.
shutil.copy(f"models/{model_args.model.replace('.', '/')}.py", train_log_dir / "model.py")
OmegaConf.save(model_args, train_log_dir / "config.yaml")
# Load dataset
train_loader = net_model.train_dataloader()
val_loader = net_model.val_dataloader()
# Load training specs
optimizers, schedulers = net_model.configure_optimizers()
assert len(optimizers) == 1 and len(schedulers) == 1
optimizer, scheduler = optimizers[0], schedulers[0]
assert scheduler['interval'] == 'step'
scheduler = scheduler['scheduler']
# TensorboardX writer
tb_logdir = train_log_dir / "tensorboard"
tb_logdir.mkdir(exist_ok=True, parents=True)
writer = SummaryWriter(log_dir=tb_logdir)
# Move to target device
args.device = torch.device(args.device)
net_model = exp.to_target_device(net_model, args.device)
net_model.device = args.device
# Train and validate within a protected loop.
global_step = 0
metric_val_best = 1e6
try:
for epoch_idx in range(args.epochs):
train_epoch()
validate_epoch()
except Exception as ex:
if not isinstance(ex, bdb.BdbQuit):
traceback.print_exc()
pdb.post_mortem(ex.__traceback__)
| 3,917 | 31.114754 | 112 | py |
synorim-merged | synorim-merged/pytorch/metric.py | import torch
class PairwiseFlowMetric:
def __init__(self, batch_mean: bool = False, compute_epe3d: bool = True, compute_acc3d_outlier: bool = False,
scene_level: bool = False):
"""
:param batch_mean: Whether to return an array with size (B, ) or a single scalar (mean)
:param compute_epe3d: compute EPE3D metric
:param compute_acc3d_outlier: compute Acc3d-strict, Acc3d-relax and outlier metric
:param scene_level: whether use the scene threshold as proposed in FlowNet3D.
"""
self.batch_mean = batch_mean
self.compute_epe3d = compute_epe3d
self.compute_acc3d_outlier = compute_acc3d_outlier
self.scene_level = scene_level
def evaluate(self, gt_flow: torch.Tensor, pd_flow: torch.Tensor, valid_mask: torch.Tensor = None):
"""
Compute the pairwise flow metric; batch dimension will not be reduced. (Unit will be the same as input)
:param gt_flow: (..., N, 3)
:param pd_flow: (..., N, 3)
:param valid_mask: (..., N)
:return: metrics dict.
"""
result_dict = {}
assert gt_flow.size(-1) == pd_flow.size(-1) == 3
assert gt_flow.size(-2) == pd_flow.size(-2)
n_point = gt_flow.size(-2)
gt_flow = gt_flow.reshape(-1, n_point, 3)
pd_flow = pd_flow.reshape(-1, n_point, 3)
if valid_mask is None:
valid_mask = torch.ones((gt_flow.size(0), n_point), dtype=bool, device=gt_flow.device)
else:
valid_mask = valid_mask.reshape(-1, n_point)
l2_norm = torch.norm(pd_flow - gt_flow, dim=-1) # (B, N)
if self.compute_epe3d:
result_dict['epe3d'] = (l2_norm * valid_mask).sum(-1) / (valid_mask.sum(-1) + 1e-6)
if self.compute_acc3d_outlier:
sf_norm = torch.norm(gt_flow, dim=-1) # (B, N)
rel_err = l2_norm / (sf_norm + 1e-4) # (B, N)
if self.scene_level:
acc3d_strict_mask = torch.logical_or(l2_norm < 0.05, rel_err < 0.05).float()
acc3d_relax_mask = torch.logical_or(l2_norm < 0.1, rel_err < 0.1).float()
outlier_mask = torch.logical_or(l2_norm > 0.3, rel_err > 0.1).float()
else:
acc3d_strict_mask = torch.logical_or(l2_norm < 0.02, rel_err < 0.05).float()
acc3d_relax_mask = torch.logical_or(l2_norm < 0.05, rel_err < 0.1).float()
outlier_mask = (rel_err > 0.3).float()
result_dict['acc3d_strict'] = (acc3d_strict_mask * valid_mask).sum(-1) / (valid_mask.sum(-1) + 1e-6)
result_dict['acc3d_relax'] = (acc3d_relax_mask * valid_mask).sum(-1) / (valid_mask.sum(-1) + 1e-6)
result_dict['outlier'] = (outlier_mask * valid_mask).sum(-1) / (valid_mask.sum(-1) + 1e-6)
if self.batch_mean:
for ckey in list(result_dict.keys()):
result_dict[ckey] = torch.mean(result_dict[ckey])
return result_dict
| 3,005 | 45.246154 | 113 | py |
synorim-merged | synorim-merged/pytorch/dataset/base.py | import collections
import multiprocessing
import torch
from numpy.random import RandomState
from torch.utils.data import Dataset
import zlib, json
from enum import Enum
class DatasetSpec(Enum):
FILENAME = 100
PC = 200
# Flow and masks are dictionary with key (view_i, view_j).
FULL_FLOW = 300
FULL_MASK = 400
# Quantized coordinates from MinkowskiEngine.
QUANTIZED_COORDS = 500
def deterministic_hash(data):
"""
:param data: Any type
:return: a deterministic hash value of integer type (32bit)
"""
jval = json.dumps(data, ensure_ascii=False, sort_keys=True,
indent=None, separators=(',', ':'))
return zlib.adler32(jval.encode('utf-8'))
class RandomSafeDataset(Dataset):
"""
A dataset class that provides a deterministic random seed.
However, in order to have consistent validation set, we need to set is_val=True for validation/test sets.
"""
def __init__(self, seed: int, _is_val: bool = False):
self._seed = seed
self._is_val = _is_val
if not self._is_val:
self._manager = multiprocessing.Manager()
self._read_count = self._manager.dict()
def get_rng(self, idx):
if self._is_val:
return RandomState(self._seed)
if idx not in self._read_count:
self._read_count[idx] = 0
rng = RandomState(deterministic_hash((idx, self._read_count[idx], self._seed)))
self._read_count[idx] += 1
return rng
def list_collate(batch):
"""
This collation does not stack batch dimension, but instead output only lists.
"""
elem = None
for e in batch:
if e is not None:
elem = e
break
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
return batch
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
return list_collate([torch.as_tensor(b) if b is not None else None for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, str):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: list_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [list_collate(samples) for samples in transposed]
elif elem is None:
return batch
raise NotImplementedError
| 3,050 | 32.527473 | 109 | py |
synorim-merged | synorim-merged/pytorch/models/base_model.py | import functools
import importlib
import tempfile
from pathlib import Path
from typing import Mapping, Any, Optional, Callable, Union
import numpy as np
import torch
from torch import nn
from omegaconf import OmegaConf
from torch.optim.lr_scheduler import LambdaLR
from utils.exp import AverageMeter, parse_config_yaml
def lambda_lr_wrapper(it, lr_config, batch_size):
return max(
lr_config['decay_mult'] ** (int(it * batch_size / lr_config['decay_step'])),
lr_config['clip'] / lr_config['init'])
class BaseModel(nn.Module):
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self.log_cache = AverageMeter()
@staticmethod
def load_module(spec_path):
"""
Load a module given spec_path
:param spec_path: Path to a model ckpt.
:return: the module class, possibly with weight loaded.
"""
spec_path = Path(spec_path)
config_args = parse_config_yaml(spec_path.parent / "config.yaml")
net_module = importlib.import_module("models." + config_args.model).Model
net_model = net_module(config_args)
if "none.pth" not in spec_path.name:
ckpt_data = torch.load(spec_path)
net_model.load_state_dict(ckpt_data['state_dict'])
print(f"Checkpoint loaded from {spec_path}.")
return net_model
def configure_optimizers(self):
lr_config = self.hparams.learning_rate
if self.hparams.optimizer == 'SGD':
optimizer = torch.optim.SGD(self.parameters(), lr=lr_config['init'], momentum=0.9,
weight_decay=self.hparams.weight_decay)
elif self.hparams.optimizer == 'Adam':
# The learning rate here is the maximum rate we can reach for each parameter.
optimizer = torch.optim.AdamW(self.parameters(), lr=lr_config['init'],
weight_decay=self.hparams.weight_decay, amsgrad=True)
else:
raise NotImplementedError
scheduler = LambdaLR(optimizer,
lr_lambda=functools.partial(
lambda_lr_wrapper, lr_config=lr_config, batch_size=self.hparams.batch_size))
return [optimizer], [{'scheduler': scheduler, 'interval': 'step'}]
def on_after_backward(self):
grad_clip_val = self.hparams.get('grad_clip', 1000.)
torch.nn.utils.clip_grad_value_(self.parameters(), clip_value=grad_clip_val)
# Also remove nan values if any.
has_nan_value = False
for p in filter(lambda p: p.grad is not None, self.parameters()):
pdata = p.grad.data
grad_is_nan = pdata != pdata
if torch.any(grad_is_nan):
has_nan_value = True
pdata[grad_is_nan] = 0.
if has_nan_value:
print(f"Warning: Gets a nan-gradient but set to 0.")
def log(self, key, value):
if self.hparams.is_training:
assert key not in self.log_cache.loss_dict
self.log_cache.append_loss({
key: value.item() if isinstance(value, torch.Tensor) else value
})
def log_dict(self, dictionary: Mapping[str, Any]):
for k, v in dictionary.items():
self.log(str(k), v)
def write_log(self, writer, it):
logs_written = {}
if not self.hparams.is_training or it % 10 == 0:
for k, v in self.log_cache.get_mean_loss_dict().items():
writer.add_scalar(k, v, it)
logs_written[k] = v
self.log_cache.clear()
return logs_written
| 3,644 | 37.776596 | 109 | py |
synorim-merged | synorim-merged/pytorch/models/basis_net_self.py | from collections import defaultdict
import torch
from torch.utils.data import DataLoader
from dataset.base import DatasetSpec as DS, list_collate
from dataset.flow_dataset import FlowDataset
from models.basis_net import Model as BaseModel
from models.desc_net_self import Model as DescModel
class Model(BaseModel):
"""
Self-supervised setting of training the basis network.
"""
def compute_loss(self, batch, basis_output, all_sels, compute_metric=False):
num_batches = len(batch[DS.PC][0])
loss_dict = defaultdict(list)
loss_config = self.hparams.self_supervised_loss
robust_kernel, robust_iter = self.get_robust_kernel()
for batch_idx in range(num_batches):
cur_pc0, cur_pc1 = batch[DS.PC][0][batch_idx], batch[DS.PC][1][batch_idx]
cur_sel0, cur_sel1 = all_sels[batch_idx * 2 + 0], all_sels[batch_idx * 2 + 1]
cur_sub_pc0, cur_sub_pc1 = cur_pc0[cur_sel0], cur_pc1[cur_sel1]
cur_basis0 = basis_output[0].features_at(batch_idx * 2 + 0)
cur_basis1 = basis_output[0].features_at(batch_idx * 2 + 1)
cur_desc0 = basis_output[1].features_at(batch_idx * 2 + 0)
cur_desc1 = basis_output[1].features_at(batch_idx * 2 + 1)
aligned_basis0, aligned_basis1 = self.align_basis_via_pd_test(
cur_basis0, cur_basis1, cur_desc0, cur_desc1, thres=0.6)
c01 = self.solve_optimal_maps(aligned_basis0, aligned_basis1,
robust_kernel=robust_kernel, num_iter=robust_iter)
c10 = self.solve_optimal_maps(aligned_basis1, aligned_basis0,
robust_kernel=robust_kernel, num_iter=robust_iter)
if self.hparams.ctc_weight:
ctc_err = c01 @ c10 - torch.eye(c01.size(0), device=self.device)
loss_dict['ctc'].append(self.hparams.ctc_weight * torch.sum(ctc_err ** 2))
coords0 = batch[DS.QUANTIZED_COORDS][0][0][batch_idx]
coords1 = batch[DS.QUANTIZED_COORDS][1][0][batch_idx]
flow01 = self.compute_flow_from_maps(cur_basis0, cur_basis1, c01,
cur_sub_pc0, cur_sub_pc1, sparse_coords=coords0)
flow10 = self.compute_flow_from_maps(cur_basis1, cur_basis0, c10,
cur_sub_pc1, cur_sub_pc0, sparse_coords=coords1)
for flow_type, flow_weight in zip(['final', 'f'], [1.0, self.hparams.flow_f_weight]):
if flow_weight <= 0.0:
continue
cur_loss_unsup01 = DescModel.compute_self_sup_loss(
cur_sub_pc0, cur_sub_pc1, flow01[flow_type], loss_config)
cur_loss_unsup10 = DescModel.compute_self_sup_loss(
cur_sub_pc1, cur_sub_pc0, flow10[flow_type], loss_config)
for ss_lkey in cur_loss_unsup01.keys():
loss_dict[f'self-{flow_type}-{ss_lkey}'].append(flow_weight * cur_loss_unsup01[ss_lkey])
loss_dict[f'self-{flow_type}-{ss_lkey}'].append(flow_weight * cur_loss_unsup10[ss_lkey])
loss_dict = {k: sum(v) / len(v) for k, v in loss_dict.items()}
return loss_dict, {}
def get_dataset_spec(self):
return [DS.QUANTIZED_COORDS, DS.PC]
def train_dataloader(self):
train_set = FlowDataset(**self.hparams.train_kwargs, spec=self.get_dataset_spec(),
hparams=self.hparams, augmentor=None)
torch.manual_seed(0)
return DataLoader(train_set, batch_size=self.hparams.batch_size, shuffle=True,
num_workers=4, collate_fn=list_collate)
| 3,726 | 50.763889 | 108 | py |
synorim-merged | synorim-merged/pytorch/models/desc_net.py | import torch
import MinkowskiEngine as ME
from torch.nn import Parameter
from torch.utils.data import DataLoader
from dataset.base import DatasetSpec as DS, list_collate
from dataset.flow_dataset import FlowDataset, DataAugmentor
from metric import PairwiseFlowMetric
from models.spconv import ResUNet
from models.base_model import BaseModel
import numpy as np
from utils.point import propagate_features
class Model(BaseModel):
"""
This model trains the descriptor network. This is the 1st stage of training.
"""
def __init__(self, hparams):
super().__init__(hparams)
self.backbone_args = self.hparams.backbone_args
self.backbone = ResUNet(self.backbone_args,
in_channels=3,
out_channels=self.backbone_args.out_channels,
normalize_feature=True,
conv1_kernel_size=3)
self.td = Parameter(torch.tensor(np.float32(self.hparams.td_init)), requires_grad=True)
def forward(self, batch):
"""
Forward descriptor network.
As the backbone quantized point cloud into voxels (by selecting one point for each voxel),
we also return the selected point indices.
"""
num_batches = len(batch[DS.QUANTIZED_COORDS][0][0])
num_views = len(batch[DS.QUANTIZED_COORDS])
all_coords, all_feats, all_sels = [], [], []
for batch_idx in range(num_batches):
for view_idx in range(num_views):
all_coords.append(batch[DS.QUANTIZED_COORDS][view_idx][0][batch_idx])
cur_sel = batch[DS.QUANTIZED_COORDS][view_idx][1][batch_idx]
all_sels.append(cur_sel)
all_feats.append(batch[DS.PC][view_idx][batch_idx][cur_sel])
coords_batch, feats_batch = ME.utils.sparse_collate(all_coords, all_feats, device=self.device)
sinput = ME.SparseTensor(feats_batch, coordinates=coords_batch)
soutput = self.backbone(sinput)
return soutput, all_sels
def compute_loss(self, batch, desc_output, all_sels, compute_metric=False):
num_batches = len(batch[DS.PC][0])
all_flow_loss = []
all_epe3d = []
all_epe3d_full = []
metric = PairwiseFlowMetric(compute_epe3d=True, compute_acc3d_outlier=False)
for batch_idx in range(num_batches):
cur_pc0, cur_pc1 = batch[DS.PC][0][batch_idx], batch[DS.PC][1][batch_idx]
cur_sel0, cur_sel1 = all_sels[batch_idx * 2 + 0], all_sels[batch_idx * 2 + 1]
cur_gt0, cur_gt1 = batch[DS.FULL_FLOW][(0, 1)][batch_idx], batch[DS.FULL_FLOW][(1, 0)][batch_idx]
cur_mask0, cur_mask1 = batch[DS.FULL_MASK][(0, 1)][batch_idx], batch[DS.FULL_MASK][(1, 0)][batch_idx]
cur_feat0 = desc_output.features_at(batch_idx * 2 + 0)
cur_feat1 = desc_output.features_at(batch_idx * 2 + 1)
dist_mat = torch.cdist(cur_feat0, cur_feat1) / torch.maximum(
torch.tensor(np.float32(self.hparams.td_min), device=self.device), self.td)
cur_pd0 = torch.softmax(-dist_mat, dim=1) @ cur_pc1[cur_sel1] - cur_pc0[cur_sel0]
cur_pd1 = torch.softmax(-dist_mat, dim=0).transpose(-1, -2) @ cur_pc0[cur_sel0] - cur_pc1[cur_sel1]
if cur_gt0 is not None:
flow_loss01 = torch.linalg.norm(cur_pd0 - cur_gt0[cur_sel0], dim=-1)[cur_mask0[cur_sel0]].mean()
all_flow_loss.append(flow_loss01)
if cur_gt1 is not None:
flow_loss10 = torch.linalg.norm(cur_pd1 - cur_gt1[cur_sel1], dim=-1)[cur_mask1[cur_sel1]].mean()
all_flow_loss.append(flow_loss10)
if compute_metric:
with torch.no_grad():
if cur_gt0 is not None:
pd_full_flow01 = propagate_features(cur_pc0[cur_sel0], cur_pc0, cur_pd0, batched=False)
epe3d01 = metric.evaluate(cur_gt0, pd_full_flow01, cur_mask0)['epe3d']
epe3d01_full = metric.evaluate(cur_gt0, pd_full_flow01)['epe3d']
all_epe3d.append(epe3d01.item())
all_epe3d_full.append(epe3d01_full.item())
if cur_gt1 is not None:
pd_full_flow10 = propagate_features(cur_pc1[cur_sel1], cur_pc1, cur_pd1, batched=False)
epe3d10 = metric.evaluate(cur_gt1, pd_full_flow10, cur_mask1)['epe3d']
epe3d10_full = metric.evaluate(cur_gt1, pd_full_flow10)['epe3d']
all_epe3d.append(epe3d10.item())
all_epe3d_full.append(epe3d10_full.item())
flow_loss = sum(all_flow_loss) / len(all_flow_loss)
if compute_metric:
metric_dict = {'epe3d': np.mean(all_epe3d), 'epe3d_full': np.mean(all_epe3d_full)}
else:
metric_dict = {}
return {'flow': flow_loss}, metric_dict
def training_step(self, batch, batch_idx):
desc_output, all_sels = self(batch)
loss_dict, metric_dict = self.compute_loss(batch, desc_output, all_sels, compute_metric=False)
for metric_name, metric_val in metric_dict.items():
self.log(f'train_loss/{metric_name}', metric_val)
for loss_name, loss_val in loss_dict.items():
self.log(f'train_loss/{loss_name}', loss_val)
loss_sum = sum([t for t in loss_dict.values()])
self.log('train_loss/sum', loss_sum)
return loss_sum
def validation_step(self, batch, batch_idx):
desc_output, all_sels = self(batch)
loss_dict, metric_dict = self.compute_loss(batch, desc_output, all_sels, compute_metric=True)
for metric_name, metric_val in metric_dict.items():
self.log(f'val_loss/{metric_name}', metric_val)
for loss_name, loss_val in loss_dict.items():
self.log(f'val_loss/{loss_name}', loss_val)
loss_sum = sum([t for t in loss_dict.values()])
self.log('val_loss', loss_sum)
return loss_sum
def get_dataset_spec(self):
return [DS.FILENAME, DS.QUANTIZED_COORDS, DS.PC, DS.FULL_FLOW, DS.FULL_MASK]
def train_dataloader(self):
train_set = FlowDataset(**self.hparams.train_kwargs, spec=self.get_dataset_spec(),
hparams=self.hparams, augmentor=DataAugmentor(self.hparams.train_augmentation))
torch.manual_seed(0) # Ensure shuffle is consistent.
return DataLoader(train_set, batch_size=self.hparams.batch_size, shuffle=True,
num_workers=4, collate_fn=list_collate)
def val_dataloader(self):
val_set = FlowDataset(**self.hparams.val_kwargs, spec=self.get_dataset_spec(), hparams=self.hparams)
return DataLoader(val_set, batch_size=self.hparams.batch_size, shuffle=False,
num_workers=4, collate_fn=list_collate)
| 6,939 | 51.180451 | 113 | py |
synorim-merged | synorim-merged/pytorch/models/full_sync.py | from collections import defaultdict
import torch.linalg
from torch.utils.data import DataLoader
from dataset.base import DatasetSpec as DS, list_collate
from dataset.flow_dataset import FlowDataset
from metric import PairwiseFlowMetric
from models.base_model import BaseModel
import numpy as np
from utils.point import propagate_features
class Model(BaseModel):
"""
This model runs the full test of our model, taking multiple point clouds as input.
"""
def __init__(self, hparams):
super().__init__(hparams)
self.basis_net = self.load_module(self.hparams.basis_checkpoint)
self.desc_net = self.basis_net.desc_net
self.hparams.voxel_size = self.basis_net.hparams.voxel_size
def update_device(self):
self.basis_net.device = self.device
self.desc_net.device = self.device
def forward(self, batch):
s_basis_desc, s_sels = self.basis_net(batch)
return s_basis_desc[0], s_basis_desc[1], s_sels
def optimize_map(self, old_c_dict, ca_dict, cb_dict):
"""
Optimize for the best C that satisfies C* = argmin sum |ca @ c - cb| + |c @ hl - hk|
:param old_c_dict: (k, l) --> (MxM) Fmap dict
:param ca_dict: data dictionary for each (k, l) pair
:param cb_dict: data dictionary for each (k, l) pair
:return: optimized Fmap dict.
"""
all_keys = sorted(list(old_c_dict.keys()))
num_frames = max(*[max(t) for t in all_keys]) + 1
view_sizes = {}
for fid in range(num_frames - 1):
view_sizes[fid] = old_c_dict[(fid, fid + 1)].size(0)
view_sizes[fid + 1] = old_c_dict[(fid, fid + 1)].size(1)
var_sizes_T = np.cumsum([0] + [view_sizes[i] for i in range(num_frames)])
cur_device = old_c_dict[all_keys[0]].device
num_universe = max(view_sizes.values()) - self.hparams.num_v_sub
consistent_weight = self.hparams.cycle_weight
C_star = {k: v for k, v in old_c_dict.items()}
C_init_scale = {k: torch.linalg.norm(v.flatten()) for k, v in C_star.items()}
robust_kernel, _ = self.basis_net.get_robust_kernel()
for iter_i in range(self.hparams.sync_iter):
"""
1. Solve for {H} matrices, fixing {C}
"""
h_rows = []
for fid_i in range(num_frames):
h_cols = []
for fid_j in range(num_frames):
sum_matrices = [torch.zeros((view_sizes[fid_i], view_sizes[fid_j]), device=cur_device)]
if fid_i < fid_j:
if (fid_i, fid_j) in all_keys:
sum_matrices.append(-C_star[(fid_i, fid_j)])
if (fid_j, fid_i) in all_keys:
sum_matrices.append(-C_star[(fid_j, fid_i)].transpose(-1, -2))
elif fid_i == fid_j:
for fid_k in range(num_frames):
if (fid_i, fid_k) in all_keys:
sum_matrices.append(torch.eye(view_sizes[fid_i], device=cur_device))
if (fid_k, fid_i) in all_keys:
X_ji = C_star[(fid_k, fid_i)]
sum_matrices.append(X_ji.transpose(-1, -2) @ X_ji)
h_cols.append(sum(sum_matrices))
h_rows.append(torch.cat(h_cols, dim=-1))
full_h_matrix = torch.cat(h_rows, dim=0)
# eigh will be in descending order.
_, h_star = torch.linalg.eigh(full_h_matrix, UPLO='U')
h_star = h_star[..., :num_universe]
"""
2. Solve for {C} matrices, fixing {H}
"""
change_scales = []
for (mid, nid) in all_keys:
C_ij_star = self.basis_net.solve_optimal_maps(
ca_dict[(mid, nid)], cb_dict[(mid, nid)],
robust_kernel=robust_kernel,
k_i=h_star[var_sizes_T[mid]: var_sizes_T[mid + 1]],
k_j=h_star[var_sizes_T[nid]: var_sizes_T[nid + 1]],
sqrt_mu=np.sqrt(consistent_weight),
c_init=C_star[(mid, nid)],
)
change_scale = torch.linalg.norm((C_ij_star - C_star[(mid, nid)]).flatten()) / C_init_scale[(mid, nid)]
change_scales.append(change_scale.item())
C_star[(mid, nid)] = C_ij_star
rel_change = np.mean(change_scales)
if rel_change < self.hparams.sync_converge_rel:
break
return C_star
def test_step(self, batch, batch_idx):
# Forward descriptor and basis networks.
basis_output, desc_output, all_sels = self(batch)
# Generate indices pairs and obtain data from batch.
num_views = len(batch[DS.QUANTIZED_COORDS])
iters_ij, iters_upper_ij = [], []
for view_i in range(num_views):
for view_j in range(num_views):
if view_i == view_j:
continue
if view_i < view_j:
iters_upper_ij.append((view_i, view_j))
iters_ij.append((view_i, view_j))
# sub_pc is a subset of the full point cloud, selected by ME quantization
full_pc, sub_pc = {}, {}
for view_i in range(num_views):
full_pc[view_i] = batch[DS.PC][view_i][0]
sub_pc[view_i] = full_pc[view_i][all_sels[view_i]]
# Basis pre-conditioning for scale normalization.
normalized_basis, basis_multiplier = {}, {}
for view_i in range(num_views):
basis_origin = basis_output.features_at(view_i)
svd_res = torch.svd(basis_origin)
right_multiplier = torch.diag_embed(svd_res.S) @ svd_res.V.transpose(-1, -2)
normalized_basis[view_i], basis_multiplier[view_i] = svd_res.U, right_multiplier
# Get descriptor
sub_desc = {}
for view_i in range(num_views):
sub_desc[view_i] = desc_output.features_at(view_i)
# Compute pairwise feature descriptors phi_k^kl, phi_l^kl for each pair
phi_i_all, phi_j_all = {}, {}
for (view_i, view_j) in iters_upper_ij:
phi_i_all[(view_i, view_j)], phi_j_all[(view_i, view_j)] = self.basis_net.align_basis_via_pd_test(
normalized_basis[view_i], normalized_basis[view_j],
sub_desc[view_i], sub_desc[view_j],
thres=self.hparams.gpd_thres
)
phi_i_all[(view_j, view_i)], phi_j_all[(view_j, view_i)] = \
phi_j_all[(view_i, view_j)], phi_i_all[(view_i, view_j)]
# Computes initial maps.
maps_init = {}
robust_kernel, robust_iter = self.basis_net.get_robust_kernel()
for (view_i, view_j) in iters_ij:
maps_init[(view_i, view_j)] = self.basis_net.solve_optimal_maps(
phi_i_all[(view_i, view_j)], phi_j_all[(view_i, view_j)],
robust_kernel=robust_kernel, num_iter=robust_iter
)
# Optimize maps via synchronization (Sec.5).
maps_optimized = self.optimize_map(maps_init, phi_i_all, phi_j_all)
# Generate flow.
final_flows = {}
for (view_i, view_j) in iters_ij:
final_flows[(view_i, view_j)] = self.basis_net.compute_flow_from_maps(
normalized_basis[view_i], normalized_basis[view_j], maps_optimized[(view_i, view_j)],
sub_pc[view_i], sub_pc[view_j], pcond_multiplier=basis_multiplier[view_j],
sparse_coords=batch[DS.QUANTIZED_COORDS][view_i][0][0]
)['final']
# Measure errors.
full_final_flows = self.propagte_to_full_flow(batch, final_flows)
error = self.evaluate_flow_error(batch, full_final_flows)
return full_final_flows, error
def propagte_to_full_flow(self, batch, sub_flows):
"""
Propagate from flows at the sub-sampled positions to the input resolution.
"""
eval_pairs = list(sub_flows.keys())
output_full_flow = {}
for (view_i, view_j) in eval_pairs:
full_pc_i = batch[DS.PC][view_i][0]
voxelized_coords = batch[DS.QUANTIZED_COORDS][view_i][0][0] * self.hparams.voxel_size
pd_flow_ij = propagate_features(
voxelized_coords,
full_pc_i, sub_flows[(view_i, view_j)], batched=False, nk=self.hparams.flow_k)
output_full_flow[(view_i, view_j)] = pd_flow_ij
return output_full_flow
@staticmethod
def evaluate_flow_error(batch, pd_flows):
eval_pairs = list(batch[DS.FULL_FLOW].keys())
err_acc_dict = defaultdict(list)
for (view_i, view_j) in eval_pairs:
gt_flow_ij = batch[DS.FULL_FLOW][(view_i, view_j)][0]
gt_mask_ij = batch[DS.FULL_MASK][(view_i, view_j)][0]
# If ground-truth does not exist for this pair, then ignore it.
if gt_flow_ij is None:
continue
err_dict = PairwiseFlowMetric(compute_epe3d=True, compute_acc3d_outlier=True).evaluate(
gt_flow_ij, pd_flows[(view_i, view_j)], valid_mask=gt_mask_ij)
err_full_dict = PairwiseFlowMetric(compute_epe3d=True, compute_acc3d_outlier=True).evaluate(
gt_flow_ij, pd_flows[(view_i, view_j)])
err_dict = {k: v.item() for k, v in err_dict.items()}
err_full_dict = {k: v.item() for k, v in err_full_dict.items()}
# Put all metrics into list
for err_name in err_dict.keys():
err_acc_dict[err_name].append(err_dict[err_name])
err_acc_dict[err_name + "-full"].append(err_full_dict[err_name])
err_acc_final_dict = {}
for mkey, marray in err_acc_dict.items():
err_acc_final_dict[f"{mkey}-avg"] = np.mean(marray)
# This won't be ill-posed as ensured in dataset.
err_acc_final_dict[f"{mkey}-std"] = np.std(marray)
return err_acc_final_dict
def test_dataloader(self):
test_set = FlowDataset(**self.hparams.test_kwargs, spec=[
DS.FILENAME, DS.QUANTIZED_COORDS, DS.PC, DS.FULL_FLOW, DS.FULL_MASK], hparams=self.hparams)
return DataLoader(test_set, batch_size=1, shuffle=False, num_workers=4, collate_fn=list_collate)
| 10,385 | 41.740741 | 119 | py |
synorim-merged | synorim-merged/pytorch/models/desc_net_self.py | from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import DataLoader
from dataset.base import DatasetSpec as DS, list_collate
from dataset.flow_dataset import FlowDataset
from models.desc_net import Model as BaseModel
class Model(BaseModel):
"""
Self-supervised setting of training the descriptor network.
"""
@staticmethod
def compute_self_sup_loss(pc0, pc1, pd_flow01, loss_config):
pc0_warpped = pc0 + pd_flow01
dist01 = torch.cdist(pc0_warpped, pc1)
loss_dict = {}
if loss_config.chamfer_weight > 0.0:
chamfer01 = torch.min(dist01, dim=-1).values
chamfer10 = torch.min(dist01, dim=-2).values
loss_dict['chamfer'] = loss_config.chamfer_weight * (chamfer01.mean() + chamfer10.mean())
if loss_config.laplacian_weight > 0.0:
dist11 = torch.cdist(pc1, pc1)
_, kidx1 = torch.topk(dist11, 10, dim=-1, largest=False, sorted=False) # (N, 10)
pc1_laplacian = torch.sum(pc1[kidx1] - pc1.unsqueeze(1), dim=1) / 9.0
dist00 = torch.cdist(pc0, pc0)
_, kidx0 = torch.topk(dist00, 10, dim=-1, largest=False, sorted=False)
pc0_laplacian = torch.sum(pc0_warpped[kidx0] - pc0_warpped.unsqueeze(1), dim=1) / 9.0
# Interpolate pc1_laplacian to pc0's
dist, knn01 = torch.topk(dist01, 5, dim=-1, largest=False, sorted=False)
pc1_lap_more = pc1_laplacian[knn01]
norm = torch.sum(1.0 / (dist + 1.0e-6), dim=1, keepdim=True)
weight = (1.0 / (dist + 1.0e-6)) / norm
pc1_lap_0 = torch.sum(weight.unsqueeze(-1) * pc1_lap_more, dim=1)
loss_dict['laplacian'] = loss_config.laplacian_weight * \
torch.sum((pc1_lap_0 - pc0_laplacian) ** 2, dim=-1).mean()
if loss_config.smooth_weight > 0.0:
grouped_flow = pd_flow01[kidx0] # (N, K, 3)
loss_dict['smooth'] = loss_config.smooth_weight * \
(((grouped_flow - pd_flow01.unsqueeze(1)) ** 2).sum(-1).sum(-1) / 9.0).mean()
return loss_dict
def compute_loss(self, batch, desc_output, all_sels, compute_metric=False):
num_batches = len(batch[DS.PC][0])
loss_dict = defaultdict(list)
loss_config = self.hparams.self_supervised_loss
for batch_idx in range(num_batches):
cur_pc0, cur_pc1 = batch[DS.PC][0][batch_idx], batch[DS.PC][1][batch_idx]
cur_sel0, cur_sel1 = all_sels[batch_idx * 2 + 0], all_sels[batch_idx * 2 + 1]
cur_feat0 = desc_output.features_at(batch_idx * 2 + 0)
cur_feat1 = desc_output.features_at(batch_idx * 2 + 1)
dist_mat = torch.cdist(cur_feat0, cur_feat1) / torch.maximum(
torch.tensor(np.float32(self.hparams.td_min), device=self.device), self.td)
cur_sub_pc0, cur_sub_pc1 = cur_pc0[cur_sel0], cur_pc1[cur_sel1]
cur_pd0 = torch.softmax(-dist_mat, dim=1) @ cur_sub_pc1 - cur_sub_pc0
cur_pd1 = torch.softmax(-dist_mat, dim=0).transpose(-1, -2) @ cur_sub_pc0 - cur_sub_pc1
cur_loss_unsup01 = self.compute_self_sup_loss(cur_sub_pc0, cur_sub_pc1, cur_pd0, loss_config)
cur_loss_unsup10 = self.compute_self_sup_loss(cur_sub_pc1, cur_sub_pc0, cur_pd1, loss_config)
for ss_lkey in cur_loss_unsup01.keys():
loss_dict[f'self-{ss_lkey}'].append(cur_loss_unsup01[ss_lkey])
loss_dict[f'self-{ss_lkey}'].append(cur_loss_unsup10[ss_lkey])
loss_dict = {k: sum(v) / len(v) for k, v in loss_dict.items()}
return loss_dict, {}
def get_dataset_spec(self):
return [DS.FILENAME, DS.QUANTIZED_COORDS, DS.PC]
def train_dataloader(self):
train_set = FlowDataset(**self.hparams.train_kwargs, spec=self.get_dataset_spec(),
hparams=self.hparams, augmentor=None)
torch.manual_seed(0)
return DataLoader(train_set, batch_size=self.hparams.batch_size, shuffle=True,
num_workers=4, collate_fn=list_collate)
| 4,150 | 48.416667 | 105 | py |
synorim-merged | synorim-merged/pytorch/models/basis_net.py | import random
from collections import defaultdict
import MinkowskiEngine as ME
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from torch.utils.data import DataLoader
from dataset.base import DatasetSpec as DS, list_collate
from dataset.flow_dataset import FlowDataset, DataAugmentor
from metric import PairwiseFlowMetric
from models.spconv import ResUNet
from models.base_model import BaseModel
import numpy as np
from utils.point import propagate_features
class HuberKernel:
"""
Huber loss influence function: Equ.(S.9), with the square root being pre-applied.
"""
def __init__(self, robust_k):
self.robust_k = robust_k
def apply(self, b_mat, a_mat, c_cur):
if c_cur is None:
return torch.ones((a_mat.size(0), ), device=a_mat.device)
fit_res = torch.norm(b_mat - torch.matmul(a_mat, c_cur), dim=-1)
sigma = self.robust_k
w_func = torch.where(fit_res < sigma, torch.ones_like(fit_res), sigma / fit_res)
return torch.sqrt(w_func)
class Model(BaseModel):
"""
This model trains the basis&refine network. This is the 2nd stage of training.
"""
def __init__(self, hparams):
super().__init__(hparams)
self.backbone = ResUNet(self.hparams.backbone_args,
in_channels=3,
out_channels=self.hparams.n_basis,
normalize_feature=False,
conv1_kernel_size=3)
self.refine = ResUNet(self.hparams.refine_args,
in_channels=9,
out_channels=3,
normalize_feature=False,
conv1_kernel_size=3)
self.t = Parameter(torch.tensor(np.float32(self.hparams.t_init)), requires_grad=True)
self.desc_net = self.load_module(self.hparams.desc_checkpoint)
assert self.desc_net.hparams.voxel_size == self.hparams.voxel_size, "Voxel size of two models must match!"
def forward(self, batch):
num_batches = len(batch[DS.QUANTIZED_COORDS][0][0])
num_views = len(batch[DS.QUANTIZED_COORDS])
all_coords, all_feats, all_sels = [], [], []
for batch_idx in range(num_batches):
for view_idx in range(num_views):
all_coords.append(batch[DS.QUANTIZED_COORDS][view_idx][0][batch_idx])
cur_sel = batch[DS.QUANTIZED_COORDS][view_idx][1][batch_idx]
all_sels.append(cur_sel)
all_feats.append(batch[DS.PC][view_idx][batch_idx][cur_sel])
coords_batch, feats_batch = ME.utils.sparse_collate(all_coords, all_feats, device=self.device)
sinput = ME.SparseTensor(feats_batch, coordinates=coords_batch)
soutput = self.backbone(sinput)
self.desc_net.eval()
with torch.no_grad():
s_desc_output = self.desc_net.backbone(sinput)
return [soutput, s_desc_output], all_sels
@staticmethod
def align_basis_via_gt(basis0, basis1, pc0, pc1, flow01, mask0):
"""
Align basis0 and basis1 via ground-truth mapping
Gt flow is always consistent, so we only need one direction.
"""
warpped_pc0 = pc0 + flow01
dist_mat = torch.cdist(warpped_pc0, pc1) # (B, M, M)
min_idx = torch.argmin(dist_mat, dim=-1) # (B, M), (B, M)
id0_mask = mask0
id1_mask = min_idx[id0_mask]
return basis0[id0_mask], basis1[id1_mask]
def align_basis_via_pd_test(self, basis0, basis1, desc0, desc1, thres=0.3):
"""
Align basis0 (N_k, M) and basis1 (N_l, M) using descriptor search.
This function is used for test-time purpose with multiple inputs, not during training.
"""
# Cross-check + distance prune.
dist_mat = torch.cdist(desc0, desc1)
min_dist, min_idx = torch.min(dist_mat, dim=-1)
_, min_idx2 = torch.min(dist_mat, dim=-2)
mutual_mask = min_idx2[min_idx] == torch.arange(min_idx.size(0), device=self.device)
full_mask = torch.logical_and(mutual_mask, min_dist < thres)
source_inds = torch.where(full_mask)[0]
target_inds = min_idx[source_inds]
return basis0[source_inds], basis1[target_inds]
def align_basis_via_pd_train(self, basis0, basis1, desc0, desc1, force_output=False):
"""
Align basis0 (N_k, M) and basis1 (N_l, M) using descriptor search.
This function is used in training time.
"""
dist_mat = torch.cdist(desc0, desc1)
min_dist, min_idx = torch.min(dist_mat, dim=-1)
_, min_idx2 = torch.min(dist_mat, dim=-2)
mutual_mask = min_idx2[min_idx] == torch.arange(min_idx.size(0), device=self.device)
# Relaxed for better stability.
full_mask = torch.logical_and(mutual_mask, min_dist < 0.5)
source_inds = torch.where(full_mask)[0]
target_inds = min_idx[source_inds]
if source_inds.size(0) < self.hparams.n_match_th:
if force_output:
# Even more relaxed if 0.5 still not works.
source_inds = torch.where(min_dist < 0.8)[0]
target_inds = min_idx[source_inds]
return basis0[source_inds], basis1[target_inds]
# Indicates matching failure...
return None, None
return basis0[source_inds], basis1[target_inds]
def align_basis(self, basis0, basis1, desc0, desc1, pc0, pc1, flow01, mask0):
"""
Random choose different strategies during training.
"""
if random.random() < self.hparams.gt_align_prob and self.hparams.is_training and flow01 is not None:
return self.align_basis_via_gt(basis0, basis1, pc0, pc1, flow01, mask0)
else:
# For validation we never use gt.
phi_0, phi_1 = self.align_basis_via_pd_train(
basis0, basis1, desc0, desc1, force_output=flow01 is None)
# ... but if prediction fails, we have no other choices.
if phi_0 is None:
return self.align_basis_via_gt(basis0, basis1, pc0, pc1, flow01, mask0)
return phi_0, phi_1
def get_robust_kernel(self):
if self.hparams.robust_kernel is not None:
if self.hparams.robust_kernel.type == "huber":
return HuberKernel(self.hparams.robust_kernel.robust_k), \
self.hparams.robust_kernel.robust_iter
else:
raise NotImplementedError
return None, 1
@staticmethod
def solve_optimal_maps(phi_i, phi_j, robust_kernel=None, k_i=None, k_j=None, sqrt_mu=1.0,
c_init=None, num_iter=1):
"""
This solves for the optimal FMap using the algorithm described in Alg.S1
:param phi_i: phi_k^(kl)
:param phi_j: phi_l^(kl)
:param robust_kernel: robust kernel class
:param k_i: H_k
:param k_j: H_l
:param sqrt_mu: relative weights of cycle term and data term. We use 1.
:param c_init: initial maps. If not provided, it is assumed to be identity.
:param num_iter: controls the IRLS iterations, if this is put under the synchronization, then 1 iter is enough.
:return:
"""
assert num_iter == 1 or robust_kernel is not None, "You do not have to iterate if there's no robust norm."
if phi_i.size(0) < 5:
# A well-posed solution is not feasible.
# Leave initialization unchanged.
if c_init is None:
return torch.eye(phi_i.size(1), device=phi_i.device)
return c_init
c_current = c_init
for iter_i in range(num_iter):
# If there is robust kernel, then compute weight for this iteration.
if robust_kernel is not None:
sw = robust_kernel.apply(phi_j, phi_i, c_current)
s_phi, s_ita = phi_j * sw.unsqueeze(-1), phi_i * sw.unsqueeze(-1)
else:
s_phi, s_ita = phi_j, phi_i
if k_i is None or k_j is None or sqrt_mu == 0.0:
# No kron or vec needed because without mu the expression can be simplified.
# On GPU, inverse(ATA) is way faster -- however, it is not accurate which makes the algorithm diverge
c_current = torch.pinverse(s_ita, rcond=1e-4) @ s_phi
else:
s_A = k_i * sqrt_mu
s_B = k_j * sqrt_mu
mI = torch.eye(s_phi.size(1), device=phi_i.device)
nI = torch.eye(s_ita.size(1), device=phi_j.device)
# Gamma_kl
left_mat = torch.kron(torch.matmul(s_ita.transpose(-1, -2), s_ita), nI) + \
torch.kron(mI, torch.matmul(s_B, s_B.transpose(-1, -2)))
# b_kl
right_mat = (torch.matmul(s_ita.transpose(-1, -2), s_phi) +
torch.matmul(s_A, s_B.transpose(-1, -2))).view(-1)
# --- cpu version for solve takes 80% less time.
# c_current = torch.linalg.solve(left_mat, right_mat).view(s_phi.size(1), s_ita.size(1))
c_current = torch.linalg.solve(left_mat.cpu(), right_mat.cpu()).view(
s_phi.size(1), s_ita.size(1)).to(left_mat.device)
return c_current
def compute_flow_from_maps(self, basis_i, basis_j, c_ij, sub_pc_i, sub_pc_j,
pcond_multiplier=None, sparse_coords=None):
"""
Given the functional mapping, compute scene flow.
:param basis_i: Bases of source point cloud (N_k, M)
:param basis_j: Bases of target point cloud (N_l, M)
:param c_ij: Functional maps (M, M)
:param sub_pc_i: source point cloud (N_k, 3)
:param sub_pc_j: target point cloud (N_l, 3)
:param pcond_multiplier: \\Sigma V^T matrix used in preconditioning
:param sparse_coords: Spconv coordinates for forwarding refinement network.
:return: dict of (N_k, 3) flow vectors
"""
basis_i_aligned = basis_i @ c_ij
output_flow = {}
# Compute F^f
pc_j_center = torch.mean(sub_pc_j, dim=0, keepdim=True)
inv_basis_j = torch.pinverse(basis_j, rcond=1e-4)
basis_flow = basis_i_aligned @ inv_basis_j @ (sub_pc_j - pc_j_center) + pc_j_center - sub_pc_i
output_flow['f'] = basis_flow
# Compute F^n
if pcond_multiplier is not None:
basis_dist = torch.cdist(basis_i_aligned @ pcond_multiplier, basis_j @ pcond_multiplier)
else:
basis_dist = torch.cdist(basis_i_aligned, basis_j)
soft_corr_mat = F.softmax(-basis_dist / torch.maximum(
torch.tensor(np.float32(self.hparams.t_min), device=self.device), self.t), dim=-1)
ot_flow = torch.matmul(soft_corr_mat, sub_pc_j) - sub_pc_i
# Compute final F
all_feats = [sub_pc_i, basis_flow, ot_flow]
all_feats = [torch.cat(all_feats, dim=1)]
coords_batch, feats_batch = ME.utils.sparse_collate([sparse_coords], all_feats, device=self.device)
sinput = ME.SparseTensor(feats_batch, coordinates=coords_batch)
soutput = self.refine(sinput)
output_flow['final'] = soutput.features_at(0) + ot_flow
return output_flow
def compute_loss(self, batch, basis_output, all_sels, compute_metric=False):
num_batches = len(batch[DS.PC][0])
loss_dict = defaultdict(list)
metric_dict = defaultdict(list)
metric = PairwiseFlowMetric(compute_epe3d=True, compute_acc3d_outlier=False)
robust_kernel, robust_iter = self.get_robust_kernel()
for batch_idx in range(num_batches):
cur_pc0, cur_pc1 = batch[DS.PC][0][batch_idx], batch[DS.PC][1][batch_idx]
cur_sel0, cur_sel1 = all_sels[batch_idx * 2 + 0], all_sels[batch_idx * 2 + 1]
cur_gt0, cur_gt1 = batch[DS.FULL_FLOW][(0, 1)][batch_idx], batch[DS.FULL_FLOW][(1, 0)][batch_idx]
cur_mask0, cur_mask1 = batch[DS.FULL_MASK][(0, 1)][batch_idx], batch[DS.FULL_MASK][(1, 0)][batch_idx]
cur_sub_pc0, cur_sub_pc1 = cur_pc0[cur_sel0], cur_pc1[cur_sel1]
if cur_gt0 is not None:
cur_sub_gt0, cur_sub_mask0 = cur_gt0[cur_sel0], cur_mask0[cur_sel0]
else:
cur_sub_gt0, cur_sub_mask0 = None, None
if cur_gt1 is not None:
cur_sub_gt1, cur_sub_mask1 = cur_gt1[cur_sel1], cur_mask1[cur_sel1]
else:
cur_sub_gt1, cur_sub_mask1 = None, None
cur_basis0 = basis_output[0].features_at(batch_idx * 2 + 0)
cur_basis1 = basis_output[0].features_at(batch_idx * 2 + 1)
cur_desc0 = basis_output[1].features_at(batch_idx * 2 + 0)
cur_desc1 = basis_output[1].features_at(batch_idx * 2 + 1)
aligned_basis0, aligned_basis1 = self.align_basis(
cur_basis0, cur_basis1, cur_desc0, cur_desc1, cur_sub_pc0, cur_sub_pc1, cur_sub_gt0, cur_sub_mask0)
c01 = self.solve_optimal_maps(aligned_basis0, aligned_basis1,
robust_kernel=robust_kernel, num_iter=robust_iter)
c10 = self.solve_optimal_maps(aligned_basis1, aligned_basis0,
robust_kernel=robust_kernel, num_iter=robust_iter)
if self.hparams.ctc_weight:
ctc_err = c01 @ c10 - torch.eye(c01.size(0), device=self.device)
loss_dict['ctc'].append(self.hparams.ctc_weight * torch.sum(ctc_err ** 2))
coords0 = batch[DS.QUANTIZED_COORDS][0][0][batch_idx]
coords1 = batch[DS.QUANTIZED_COORDS][1][0][batch_idx]
flow01 = self.compute_flow_from_maps(cur_basis0, cur_basis1, c01,
cur_sub_pc0, cur_sub_pc1, sparse_coords=coords0)
flow10 = self.compute_flow_from_maps(cur_basis1, cur_basis0, c10,
cur_sub_pc1, cur_sub_pc0, sparse_coords=coords1)
if cur_sub_gt0 is not None:
loss_dict['flow'].append(torch.linalg.norm(flow01['final'] - cur_sub_gt0, dim=-1).mean())
if cur_sub_gt1 is not None:
loss_dict['flow'].append(torch.linalg.norm(flow10['final'] - cur_sub_gt1, dim=-1).mean())
if self.hparams.flow_f_weight > 0.0:
if cur_sub_gt0 is not None:
basis_err01 = torch.linalg.norm(flow01['f'] - cur_sub_gt0, dim=-1)
basis_err01 = basis_err01[basis_err01 < self.hparams.flow_f_max_bound]
if basis_err01.size(0) > 0:
loss_dict['flow_f'].append(self.hparams.flow_f_weight * basis_err01.mean())
if cur_sub_gt1 is not None:
basis_err10 = torch.linalg.norm(flow10['f'] - cur_sub_gt1, dim=-1)
basis_err10 = basis_err10[basis_err10 < self.hparams.flow_f_max_bound]
if basis_err10.size(0) > 0:
loss_dict['flow_f'].append(self.hparams.flow_f_weight * basis_err10.mean())
if self.hparams.smoothness_weight > 0.0:
dist00 = torch.cdist(cur_sub_pc0, cur_sub_pc0)
_, kidx0 = torch.topk(dist00, 10, dim=-1, largest=False, sorted=False)
grouped_flow0 = flow01['final'][kidx0] # (N, K, 3)
smooth_loss0 = self.hparams.smoothness_weight * \
(((grouped_flow0 - flow01['final'].unsqueeze(1)) ** 2).sum(-1).sum(-1) / 9.0).mean()
dist11 = torch.cdist(cur_sub_pc1, cur_sub_pc1)
_, kidx1 = torch.topk(dist11, 10, dim=-1, largest=False, sorted=False)
grouped_flow1 = flow10['final'][kidx1] # (N, K, 3)
smooth_loss1 = self.hparams.smoothness_weight * \
(((grouped_flow1 - flow10['final'].unsqueeze(1)) ** 2).sum(-1).sum(-1) / 9.0).mean()
loss_dict['smooth'].append(smooth_loss0)
loss_dict['smooth'].append(smooth_loss1)
if compute_metric:
with torch.no_grad():
if cur_gt0 is not None:
pd_full_flow01 = propagate_features(cur_pc0[cur_sel0], cur_pc0, flow01['final'], batched=False)
epe3d01 = metric.evaluate(cur_gt0, pd_full_flow01, cur_mask0)['epe3d']
epe3d01_full = metric.evaluate(cur_gt0, pd_full_flow01)['epe3d']
metric_dict[f'epe3d'].append(epe3d01.item())
metric_dict[f'epe3d-full'].append(epe3d01_full.item())
if cur_gt1 is not None:
pd_full_flow10 = propagate_features(cur_pc1[cur_sel1], cur_pc1, flow10['final'], batched=False)
epe3d10 = metric.evaluate(cur_gt1, pd_full_flow10, cur_mask1)['epe3d']
epe3d10_full = metric.evaluate(cur_gt1, pd_full_flow10)['epe3d']
metric_dict[f'epe3d'].append(epe3d10.item())
metric_dict[f'epe3d-full'].append(epe3d10_full.item())
loss_dict = {k: sum(v) / len(v) for k, v in loss_dict.items()}
if compute_metric:
metric_dict = {k: np.mean(v) for k, v in metric_dict.items()}
else:
metric_dict = {}
return loss_dict, metric_dict
def training_step(self, batch, batch_idx):
desc_output, all_sels = self(batch)
loss_dict, metric_dict = self.compute_loss(batch, desc_output, all_sels, compute_metric=False)
for metric_name, metric_val in metric_dict.items():
self.log(f'train_loss/{metric_name}', metric_val)
for loss_name, loss_val in loss_dict.items():
self.log(f'train_loss/{loss_name}', loss_val)
loss_sum = sum([t for t in loss_dict.values()])
self.log('train_loss/sum', loss_sum)
return loss_sum
def validation_step(self, batch, batch_idx):
desc_output, all_sels = self(batch)
loss_dict, metric_dict = self.compute_loss(batch, desc_output, all_sels, compute_metric=True)
for metric_name, metric_val in metric_dict.items():
self.log(f'val_loss/{metric_name}', metric_val)
for loss_name, loss_val in loss_dict.items():
self.log(f'val_loss/{loss_name}', loss_val)
loss_sum = sum([t for t in loss_dict.values()])
self.log('val_loss', loss_sum)
return loss_sum
def get_dataset_spec(self):
return [DS.QUANTIZED_COORDS, DS.PC, DS.FULL_FLOW, DS.FULL_MASK]
def train_dataloader(self):
train_set = FlowDataset(**self.hparams.train_kwargs, spec=self.get_dataset_spec(),
hparams=self.hparams, augmentor=DataAugmentor(self.hparams.train_augmentation))
torch.manual_seed(0) # Ensure shuffle is consistent.
return DataLoader(train_set, batch_size=self.hparams.batch_size, shuffle=True,
num_workers=4, collate_fn=list_collate)
def val_dataloader(self):
val_set = FlowDataset(**self.hparams.val_kwargs, spec=self.get_dataset_spec(), hparams=self.hparams)
return DataLoader(val_set, batch_size=self.hparams.batch_size, shuffle=False,
num_workers=4, collate_fn=list_collate)
| 19,450 | 48.874359 | 119 | py |
synorim-merged | synorim-merged/pytorch/models/spconv.py | import torch
import torch.nn as nn
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MEF
class BasicBlockBase(nn.Module):
"""
A double-conv ResBlock with relu activation, with residual connection.
"""
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, D=3):
super(BasicBlockBase, self).__init__()
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=3, stride=stride, dimension=D)
self.norm1 = ME.MinkowskiInstanceNorm(planes)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=3, stride=1, dilation=dilation, bias=False, dimension=D)
self.norm2 = ME.MinkowskiInstanceNorm(planes)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = MEF.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = MEF.relu(out)
return out
class ResUNet(ME.MinkowskiNetwork):
"""
Our main network structure - a U-Net with residual double-conv blocks.
Please refer to the appendix of our paper for illustration of the model.
"""
def __init__(self, network_config,
in_channels=3, out_channels=32, normalize_feature=None, conv1_kernel_size=None, D=3):
super().__init__(D)
channels = network_config.channels
tr_channels = list(network_config.tr_channels)
assert len(channels) == len(tr_channels)
channels[0] = in_channels
tr_channels.append(0)
self.normalize_feature = normalize_feature
self.in_convs, self.in_norms, self.in_blocks = nn.ModuleList(), nn.ModuleList(), nn.ModuleList()
self.out_convs, self.out_norms, self.out_blocks = nn.ModuleList(), nn.ModuleList(), nn.ModuleList()
for layer_id in range(len(channels) - 1):
self.in_convs.append(ME.MinkowskiConvolution(
in_channels=channels[layer_id],
out_channels=channels[layer_id + 1],
kernel_size=conv1_kernel_size if layer_id == 0 else 3,
stride=1 if layer_id == 0 else 2,
dilation=1, bias=False, dimension=D))
self.in_norms.append(ME.MinkowskiInstanceNorm(channels[layer_id + 1]))
self.in_blocks.append(BasicBlockBase(
channels[layer_id + 1], channels[layer_id + 1], D=D))
self.out_convs.append(ME.MinkowskiConvolutionTranspose(
in_channels=channels[layer_id + 1] + tr_channels[layer_id + 2],
out_channels=tr_channels[layer_id + 1],
kernel_size=1 if layer_id == 0 else 3,
stride=1 if layer_id == 0 else 2,
dilation=1,
bias=False,
dimension=D))
if layer_id > 0:
self.out_norms.append(ME.MinkowskiInstanceNorm(tr_channels[layer_id + 1]))
self.out_blocks.append(BasicBlockBase(
tr_channels[layer_id + 1], tr_channels[layer_id + 1], D=D))
self.final = ME.MinkowskiConvolution(
in_channels=tr_channels[1], out_channels=out_channels,
kernel_size=1, stride=1, dilation=1, bias=True, dimension=D)
def forward(self, x):
skip_outputs = []
for layer_id in range(len(self.in_convs)):
out_skip = self.in_convs[layer_id](x)
out_skip = self.in_norms[layer_id](out_skip)
out_skip = self.in_blocks[layer_id](out_skip)
x = MEF.relu(out_skip)
skip_outputs.append(out_skip)
for layer_id in range(len(self.in_convs) - 1, -1, -1):
x = self.out_convs[layer_id](x)
if layer_id > 0:
x = self.out_norms[layer_id - 1](x)
x = self.out_blocks[layer_id - 1](x)
x_tr = MEF.relu(x)
if layer_id > 0:
x = ME.cat(x_tr, skip_outputs[layer_id - 1])
out = self.final(x)
if self.normalize_feature:
return ME.SparseTensor(
out.F / torch.norm(out.F, p=2, dim=1, keepdim=True),
coordinate_map_key=out.coordinate_map_key,
coordinate_manager=out.coordinate_manager)
else:
return out
| 4,471 | 38.22807 | 107 | py |
synorim-merged | synorim-merged/pytorch/utils/exp.py | import pickle
import random
from collections import OrderedDict
import sys
import numpy as np
import torch
import functools
from pathlib import Path
from omegaconf import OmegaConf
def seed_everything(seed: int):
"""
Setup global seed to ensure reproducibility.
:param seed: integer value
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def parse_config_yaml(yaml_path: Path, args: OmegaConf = None, override: bool = True) -> OmegaConf:
"""
Load yaml file, and optionally merge it with existing ones.
This supports a light-weight (recursive) inclusion scheme.
:param yaml_path: path to the yaml file
:param args: previous config
:param override: if option clashes, whether or not to overwrite previous ones.
:return: new config.
"""
if args is None:
args = OmegaConf.create()
configs = OmegaConf.load(yaml_path)
if "include_configs" in configs:
base_configs = configs["include_configs"]
del configs["include_configs"]
if isinstance(base_configs, str):
base_configs = [base_configs]
# Update the config from top to down.
for base_config in base_configs:
base_config_path = yaml_path.parent / Path(base_config)
configs = parse_config_yaml(base_config_path, configs, override=False)
if "assign" in configs:
overlays = configs["assign"]
del configs["assign"]
assign_config = OmegaConf.from_dotlist([f"{k}={v}" for k, v in overlays.items()])
configs = OmegaConf.merge(configs, assign_config)
if override:
return OmegaConf.merge(args, configs)
else:
return OmegaConf.merge(configs, args)
def to_target_device(obj, device):
if isinstance(obj, tuple):
return tuple(map(functools.partial(to_target_device, device=device), obj))
if isinstance(obj, list):
return list(map(functools.partial(to_target_device, device=device), obj))
if isinstance(obj, dict):
return dict(map(functools.partial(to_target_device, device=device), obj.items()))
if isinstance(obj, torch.Tensor):
return obj.to(device)
if isinstance(obj, torch.nn.Module):
return obj.to(device)
return obj
class AverageMeter:
"""
Maintain named lists of numbers. Compute their average to evaluate dataset statistics.
This can not only used for loss, but also for progressive training logging, supporting import/export data.
"""
def __init__(self):
self.loss_dict = OrderedDict()
def clear(self):
self.loss_dict.clear()
def export(self, f):
if isinstance(f, str):
f = open(f, 'wb')
pickle.dump(self.loss_dict, f)
def load(self, f):
if isinstance(f, str):
f = open(f, 'rb')
self.loss_dict = pickle.load(f)
return self
def append_loss(self, losses):
for loss_name, loss_val in losses.items():
if loss_val is None:
continue
if loss_name not in self.loss_dict.keys():
self.loss_dict.update({loss_name: [loss_val]})
else:
self.loss_dict[loss_name].append(loss_val)
def get_mean_loss_dict(self):
loss_dict = {}
for loss_name, loss_arr in self.loss_dict.items():
loss_dict[loss_name] = sum(loss_arr) / len(loss_arr)
return loss_dict
def get_mean_loss(self):
mean_loss_dict = self.get_mean_loss_dict()
if len(mean_loss_dict) == 0:
return 0.0
else:
return sum(mean_loss_dict.values()) / len(mean_loss_dict)
def get_printable_mean(self):
text = ""
all_loss_sum = 0.0
for loss_name, loss_mean in self.get_mean_loss_dict().items():
all_loss_sum += loss_mean
text += "(%s:%.4f) " % (loss_name, loss_mean)
text += " sum = %.4f" % all_loss_sum
return text
def get_newest_loss_dict(self, return_count=False):
loss_dict = {}
loss_count_dict = {}
for loss_name, loss_arr in self.loss_dict.items():
if len(loss_arr) > 0:
loss_dict[loss_name] = loss_arr[-1]
loss_count_dict[loss_name] = len(loss_arr)
if return_count:
return loss_dict, loss_count_dict
else:
return loss_dict
def get_printable_newest(self):
nloss_val, nloss_count = self.get_newest_loss_dict(return_count=True)
return ", ".join([f"{loss_name}[{nloss_count[loss_name] - 1}]: {nloss_val[loss_name]}"
for loss_name in nloss_val.keys()])
def print_format_loss(self, color=None):
if hasattr(sys.stdout, "terminal"):
color_device = sys.stdout.terminal
else:
color_device = sys.stdout
if color == "y":
color_device.write('\033[93m')
elif color == "g":
color_device.write('\033[92m')
elif color == "b":
color_device.write('\033[94m')
print(self.get_printable_mean(), flush=True)
if color is not None:
color_device.write('\033[0m')
| 5,232 | 32.120253 | 110 | py |
synorim-merged | synorim-merged/pytorch/utils/point.py | import torch
def index_points_group(points, knn_idx, t=False):
"""
Input:
points: input points data, [B, N', C], or [B, C, N'](transposed)
knn_idx: sample index data, [B, N, K]
Return:
new_points:, indexed points data, [B, N, K, C] or [B, C, N, K](transposed)
"""
B, Np, C = points.size()
if t:
Np, C = C, Np
_, N, K = knn_idx.size()
knn_idx = knn_idx.reshape(B, -1)
if not t:
new_points = torch.gather(points, dim=1, index=knn_idx.unsqueeze(-1).expand(-1, -1, points.size(-1)))
new_points = new_points.reshape(B, N, K, C)
else:
new_points = torch.gather(points, dim=-1, index=knn_idx.unsqueeze(1).expand(-1, points.size(1), -1))
new_points = new_points.reshape(B, C, N, K)
return new_points
def propagate_features(source_pc: torch.Tensor, target_pc: torch.Tensor,
source_feat: torch.Tensor, nk: int = 3, batched: bool = True):
"""
Propagate features from the domain of source to the domain of target.
:param source_pc: (B, N, 3) point coordinates
:param target_pc: (B, M, 3) point coordinates
:param source_feat: (B, N, F) source features
:param nk: propagate k number
:param batched: whether dimension B is present or not.
:return: (B, M, F) target feature
"""
if not batched:
source_pc = source_pc.unsqueeze(0)
target_pc = target_pc.unsqueeze(0)
source_feat = source_feat.unsqueeze(0)
dist = torch.cdist(target_pc, source_pc) # (B, N, M)
dist, group_idx = torch.topk(dist, nk, dim=-1, largest=False, sorted=False) # (B, N, K)
# Shifted reciprocal function.
w_func = 1 / (dist + 1.0e-6)
weight = (w_func / torch.sum(w_func, dim=-1, keepdim=True)).unsqueeze(-1) # (B, N, k, 1)
sparse_feature = index_points_group(source_feat, group_idx)
full_flow = (sparse_feature * weight).sum(-2) # (B, N, C)
if not batched:
full_flow = full_flow[0]
return full_flow
| 2,010 | 34.280702 | 109 | py |
stylegan3 | stylegan3-main/legacy.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Converting legacy network pickle into the new format."""
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
fp16_kwargs.num_fp16_res = 4
fp16_kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
from training import networks_stylegan2
network_class = networks_stylegan2.Generator
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 512),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 512),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 8),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
kwarg('conditioning')
kwarg('fused_modconv')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
G = network_class(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
# pylint: disable=f-string-without-interpolation
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
r'.*\.act_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', None),
mbstd_num_channels = kwarg('mbstd_num_features', 1),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
kwarg('conditioning')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks_stylegan2
D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
# pylint: disable=f-string-without-interpolation
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,561 | 50.117284 | 154 | py |
stylegan3 | stylegan3-main/gen_video.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate lerp videos using pretrained network pickle."""
import copy
import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import imageio
import numpy as np
import scipy.interpolate
import torch
from tqdm import tqdm
import legacy
#----------------------------------------------------------------------------
def layout_grid(img, grid_w=None, grid_h=1, float_to_uint8=True, chw_to_hwc=True, to_numpy=True):
batch_size, channels, img_h, img_w = img.shape
if grid_w is None:
grid_w = batch_size // grid_h
assert batch_size == grid_w * grid_h
if float_to_uint8:
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)
img = img.reshape(grid_h, grid_w, channels, img_h, img_w)
img = img.permute(2, 0, 3, 1, 4)
img = img.reshape(channels, grid_h * img_h, grid_w * img_w)
if chw_to_hwc:
img = img.permute(1, 2, 0)
if to_numpy:
img = img.cpu().numpy()
return img
#----------------------------------------------------------------------------
def gen_interp_video(G, mp4: str, seeds, shuffle_seed=None, w_frames=60*4, kind='cubic', grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, device=torch.device('cuda'), **video_kwargs):
grid_w = grid_dims[0]
grid_h = grid_dims[1]
if num_keyframes is None:
if len(seeds) % (grid_w*grid_h) != 0:
raise ValueError('Number of input seeds must be divisible by grid W*H')
num_keyframes = len(seeds) // (grid_w*grid_h)
all_seeds = np.zeros(num_keyframes*grid_h*grid_w, dtype=np.int64)
for idx in range(num_keyframes*grid_h*grid_w):
all_seeds[idx] = seeds[idx % len(seeds)]
if shuffle_seed is not None:
rng = np.random.RandomState(seed=shuffle_seed)
rng.shuffle(all_seeds)
zs = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])).to(device)
ws = G.mapping(z=zs, c=None, truncation_psi=psi)
_ = G.synthesis(ws[:1]) # warm up
ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:])
# Interpolation.
grid = []
for yi in range(grid_h):
row = []
for xi in range(grid_w):
x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1))
y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1])
interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0)
row.append(interp)
grid.append(row)
# Render video.
video_out = imageio.get_writer(mp4, mode='I', fps=60, codec='libx264', **video_kwargs)
for frame_idx in tqdm(range(num_keyframes * w_frames)):
imgs = []
for yi in range(grid_h):
for xi in range(grid_w):
interp = grid[yi][xi]
w = torch.from_numpy(interp(frame_idx / w_frames)).to(device)
img = G.synthesis(ws=w.unsqueeze(0), noise_mode='const')[0]
imgs.append(img)
video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h))
video_out.close()
#----------------------------------------------------------------------------
def parse_range(s: Union[str, List[int]]) -> List[int]:
'''Parse a comma separated list of numbers or ranges and return a list of ints.
Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
'''
if isinstance(s, list): return s
ranges = []
range_re = re.compile(r'^(\d+)-(\d+)$')
for p in s.split(','):
m = range_re.match(p)
if m:
ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
else:
ranges.append(int(p))
return ranges
#----------------------------------------------------------------------------
def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]:
'''Parse a 'M,N' or 'MxN' integer tuple.
Example:
'4x2' returns (4,2)
'0,1' returns (0,1)
'''
if isinstance(s, tuple): return s
m = re.match(r'^(\d+)[x,](\d+)$', s)
if m:
return (int(m.group(1)), int(m.group(2)))
raise ValueError(f'cannot parse tuple {s}')
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=parse_range, help='List of random seeds', required=True)
@click.option('--shuffle-seed', type=int, help='Random seed to use for shuffling seed order', default=None)
@click.option('--grid', type=parse_tuple, help='Grid width/height, e.g. \'4x3\' (default: 1x1)', default=(1,1))
@click.option('--num-keyframes', type=int, help='Number of seeds to interpolate through. If not specified, determine based on the length of the seeds array given by --seeds.', default=None)
@click.option('--w-frames', type=int, help='Number of frames to interpolate between latents', default=120)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--output', help='Output .mp4 filename', type=str, required=True, metavar='FILE')
def generate_images(
network_pkl: str,
seeds: List[int],
shuffle_seed: Optional[int],
truncation_psi: float,
grid: Tuple[int,int],
num_keyframes: Optional[int],
w_frames: int,
output: str
):
"""Render a latent vector interpolation video.
Examples:
\b
# Render a 4x2 grid of interpolations for seeds 0 through 31.
python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
Animation length and seed keyframes:
The animation length is either determined based on the --seeds value or explicitly
specified using the --num-keyframes option.
When num keyframes is specified with --num-keyframes, the output video length
will be 'num_keyframes*w_frames' frames.
If --num-keyframes is not specified, the number of seeds given with
--seeds must be divisible by grid size W*H (--grid). In this case the
output video length will be '# seeds/(w*h)*w_frames' frames.
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
gen_interp_video(G=G, mp4=output, bitrate='12M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, seeds=seeds, shuffle_seed=shuffle_seed, psi=truncation_psi)
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 7,283 | 39.243094 | 190 | py |
stylegan3 | stylegan3-main/avg_spectra.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Compare average power spectra between real and generated images,
or between multiple generators."""
import os
import numpy as np
import torch
import torch.fft
import scipy.ndimage
import matplotlib.pyplot as plt
import click
import tqdm
import dnnlib
import legacy
from training import dataset
#----------------------------------------------------------------------------
# Setup an iterator for streaming images, in uint8 NCHW format, based on the
# respective command line options.
def stream_source_images(source, num, seed, device, data_loader_kwargs=None): # => num_images, image_size, image_iter
ext = source.split('.')[-1].lower()
if data_loader_kwargs is None:
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
if ext == 'pkl':
if num is None:
raise click.ClickException('--num is required when --source points to network pickle')
with dnnlib.util.open_url(source) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device)
def generate_image(seed):
rnd = np.random.RandomState(seed)
z = torch.from_numpy(rnd.randn(1, G.z_dim)).to(device)
c = torch.zeros([1, G.c_dim], device=device)
if G.c_dim > 0:
c[:, rnd.randint(G.c_dim)] = 1
return (G(z=z, c=c) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
_ = generate_image(seed) # warm up
image_iter = (generate_image(seed + idx) for idx in range(num))
return num, G.img_resolution, image_iter
elif ext == 'zip' or os.path.isdir(source):
dataset_obj = dataset.ImageFolderDataset(path=source, max_size=num, random_seed=seed)
if num is not None and num != len(dataset_obj):
raise click.ClickException(f'--source contains fewer than {num} images')
data_loader = torch.utils.data.DataLoader(dataset_obj, batch_size=1, **data_loader_kwargs)
image_iter = (image.to(device) for image, _label in data_loader)
return len(dataset_obj), dataset_obj.resolution, image_iter
else:
raise click.ClickException('--source must point to network pickle, dataset zip, or directory')
#----------------------------------------------------------------------------
# Load average power spectrum from the specified .npz file and construct
# the corresponding heatmap for visualization.
def construct_heatmap(npz_file, smooth):
npz_data = np.load(npz_file)
spectrum = npz_data['spectrum']
image_size = npz_data['image_size']
hmap = np.log10(spectrum) * 10 # dB
hmap = np.fft.fftshift(hmap)
hmap = np.concatenate([hmap, hmap[:1, :]], axis=0)
hmap = np.concatenate([hmap, hmap[:, :1]], axis=1)
if smooth > 0:
sigma = spectrum.shape[0] / image_size * smooth
hmap = scipy.ndimage.gaussian_filter(hmap, sigma=sigma, mode='nearest')
return hmap, image_size
#----------------------------------------------------------------------------
@click.group()
def main():
"""Compare average power spectra between real and generated images,
or between multiple generators.
Example:
\b
# Calculate dataset mean and std, needed in subsequent steps.
python avg_spectra.py stats --source=~/datasets/ffhq-1024x1024.zip
\b
# Calculate average spectrum for the training data.
python avg_spectra.py calc --source=~/datasets/ffhq-1024x1024.zip \\
--dest=tmp/training-data.npz --mean=112.684 --std=69.509
\b
# Calculate average spectrum for a pre-trained generator.
python avg_spectra.py calc \\
--source=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhq-1024x1024.pkl \\
--dest=tmp/stylegan3-r.npz --mean=112.684 --std=69.509 --num=70000
\b
# Display results.
python avg_spectra.py heatmap tmp/training-data.npz
python avg_spectra.py heatmap tmp/stylegan3-r.npz
python avg_spectra.py slices tmp/training-data.npz tmp/stylegan3-r.npz
\b
# Save as PNG.
python avg_spectra.py heatmap tmp/training-data.npz --save=tmp/training-data.png --dpi=300
python avg_spectra.py heatmap tmp/stylegan3-r.npz --save=tmp/stylegan3-r.png --dpi=300
python avg_spectra.py slices tmp/training-data.npz tmp/stylegan3-r.npz --save=tmp/slices.png --dpi=300
"""
#----------------------------------------------------------------------------
@main.command()
@click.option('--source', help='Network pkl, dataset zip, or directory', metavar='[PKL|ZIP|DIR]', required=True)
@click.option('--num', help='Number of images to process [default: all]', metavar='INT', type=click.IntRange(min=1))
@click.option('--seed', help='Random seed for selecting the images', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
def stats(source, num, seed, device=torch.device('cuda')):
"""Calculate dataset mean and standard deviation needed by 'calc'."""
torch.multiprocessing.set_start_method('spawn')
num_images, _image_size, image_iter = stream_source_images(source=source, num=num, seed=seed, device=device)
# Accumulate moments.
moments = torch.zeros([3], dtype=torch.float64, device=device)
for image in tqdm.tqdm(image_iter, total=num_images):
image = image.to(torch.float64)
moments += torch.stack([torch.ones_like(image).sum(), image.sum(), image.square().sum()])
moments = moments / moments[0]
# Compute mean and standard deviation.
mean = moments[1]
std = (moments[2] - moments[1].square()).sqrt()
print(f'--mean={mean:g} --std={std:g}')
#----------------------------------------------------------------------------
@main.command()
@click.option('--source', help='Network pkl, dataset zip, or directory', metavar='[PKL|ZIP|DIR]', required=True)
@click.option('--dest', help='Where to store the result', metavar='NPZ', required=True)
@click.option('--mean', help='Dataset mean for whitening', metavar='FLOAT', type=float, required=True)
@click.option('--std', help='Dataset standard deviation for whitening', metavar='FLOAT', type=click.FloatRange(min=0), required=True)
@click.option('--num', help='Number of images to process [default: all]', metavar='INT', type=click.IntRange(min=1))
@click.option('--seed', help='Random seed for selecting the images', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
@click.option('--beta', help='Shape parameter for the Kaiser window', metavar='FLOAT', type=click.FloatRange(min=0), default=8, show_default=True)
@click.option('--interp', help='Frequency-domain interpolation factor', metavar='INT', type=click.IntRange(min=1), default=4, show_default=True)
def calc(source, dest, mean, std, num, seed, beta, interp, device=torch.device('cuda')):
"""Calculate average power spectrum and store it in .npz file."""
torch.multiprocessing.set_start_method('spawn')
num_images, image_size, image_iter = stream_source_images(source=source, num=num, seed=seed, device=device)
spectrum_size = image_size * interp
padding = spectrum_size - image_size
# Setup window function.
window = torch.kaiser_window(image_size, periodic=False, beta=beta, device=device)
window *= window.square().sum().rsqrt()
window = window.ger(window).unsqueeze(0).unsqueeze(1)
# Accumulate power spectrum.
spectrum = torch.zeros([spectrum_size, spectrum_size], dtype=torch.float64, device=device)
for image in tqdm.tqdm(image_iter, total=num_images):
image = (image.to(torch.float64) - mean) / std
image = torch.nn.functional.pad(image * window, [0, padding, 0, padding])
spectrum += torch.fft.fftn(image, dim=[2,3]).abs().square().mean(dim=[0,1])
spectrum /= num_images
# Save result.
if os.path.dirname(dest):
os.makedirs(os.path.dirname(dest), exist_ok=True)
np.savez(dest, spectrum=spectrum.cpu().numpy(), image_size=image_size)
#----------------------------------------------------------------------------
@main.command()
@click.argument('npz-file', nargs=1)
@click.option('--save', help='Save the plot and exit', metavar='[PNG|PDF|...]')
@click.option('--dpi', help='Figure resolution', metavar='FLOAT', type=click.FloatRange(min=1), default=100, show_default=True)
@click.option('--smooth', help='Amount of smoothing', metavar='FLOAT', type=click.FloatRange(min=0), default=1.25, show_default=True)
def heatmap(npz_file, save, smooth, dpi):
"""Visualize 2D heatmap based on the given .npz file."""
hmap, image_size = construct_heatmap(npz_file=npz_file, smooth=smooth)
# Setup plot.
plt.figure(figsize=[6, 4.8], dpi=dpi, tight_layout=True)
freqs = np.linspace(-0.5, 0.5, num=hmap.shape[0], endpoint=True) * image_size
ticks = np.linspace(freqs[0], freqs[-1], num=5, endpoint=True)
levels = np.linspace(-40, 20, num=13, endpoint=True)
# Draw heatmap.
plt.xlim(ticks[0], ticks[-1])
plt.ylim(ticks[0], ticks[-1])
plt.xticks(ticks)
plt.yticks(ticks)
plt.contourf(freqs, freqs, hmap, levels=levels, extend='both', cmap='Blues')
plt.gca().set_aspect('equal')
plt.colorbar(ticks=levels)
plt.contour(freqs, freqs, hmap, levels=levels, extend='both', linestyles='solid', linewidths=1, colors='midnightblue', alpha=0.2)
# Display or save.
if save is None:
plt.show()
else:
if os.path.dirname(save):
os.makedirs(os.path.dirname(save), exist_ok=True)
plt.savefig(save)
#----------------------------------------------------------------------------
@main.command()
@click.argument('npz-files', nargs=-1, required=True)
@click.option('--save', help='Save the plot and exit', metavar='[PNG|PDF|...]')
@click.option('--dpi', help='Figure resolution', metavar='FLOAT', type=click.FloatRange(min=1), default=100, show_default=True)
@click.option('--smooth', help='Amount of smoothing', metavar='FLOAT', type=click.FloatRange(min=0), default=0, show_default=True)
def slices(npz_files, save, dpi, smooth):
"""Visualize 1D slices based on the given .npz files."""
cases = [dnnlib.EasyDict(npz_file=npz_file) for npz_file in npz_files]
for c in cases:
c.hmap, c.image_size = construct_heatmap(npz_file=c.npz_file, smooth=smooth)
c.label = os.path.splitext(os.path.basename(c.npz_file))[0]
# Check consistency.
image_size = cases[0].image_size
hmap_size = cases[0].hmap.shape[0]
if any(c.image_size != image_size or c.hmap.shape[0] != hmap_size for c in cases):
raise click.ClickException('All .npz must have the same resolution')
# Setup plot.
plt.figure(figsize=[12, 4.6], dpi=dpi, tight_layout=True)
hmap_center = hmap_size // 2
hmap_range = np.arange(hmap_center, hmap_size)
freqs0 = np.linspace(0, image_size / 2, num=(hmap_size // 2 + 1), endpoint=True)
freqs45 = np.linspace(0, image_size / np.sqrt(2), num=(hmap_size // 2 + 1), endpoint=True)
xticks0 = np.linspace(freqs0[0], freqs0[-1], num=9, endpoint=True)
xticks45 = np.round(np.linspace(freqs45[0], freqs45[-1], num=9, endpoint=True))
yticks = np.linspace(-50, 30, num=9, endpoint=True)
# Draw 0 degree slice.
plt.subplot(1, 2, 1)
plt.title('0\u00b0 slice')
plt.xlim(xticks0[0], xticks0[-1])
plt.ylim(yticks[0], yticks[-1])
plt.xticks(xticks0)
plt.yticks(yticks)
for c in cases:
plt.plot(freqs0, c.hmap[hmap_center, hmap_range], label=c.label)
plt.grid()
plt.legend(loc='upper right')
# Draw 45 degree slice.
plt.subplot(1, 2, 2)
plt.title('45\u00b0 slice')
plt.xlim(xticks45[0], xticks45[-1])
plt.ylim(yticks[0], yticks[-1])
plt.xticks(xticks45)
plt.yticks(yticks)
for c in cases:
plt.plot(freqs45, c.hmap[hmap_range, hmap_range], label=c.label)
plt.grid()
plt.legend(loc='upper right')
# Display or save.
if save is None:
plt.show()
else:
if os.path.dirname(save):
os.makedirs(os.path.dirname(save), exist_ok=True)
plt.savefig(save)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 12,693 | 44.826715 | 146 | py |
stylegan3 | stylegan3-main/gen_images.py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Generate images using pretrained network pickle."""
import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
import legacy
#----------------------------------------------------------------------------
def parse_range(s: Union[str, List]) -> List[int]:
'''Parse a comma separated list of numbers or ranges and return a list of ints.
Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
'''
if isinstance(s, list): return s
ranges = []
range_re = re.compile(r'^(\d+)-(\d+)$')
for p in s.split(','):
m = range_re.match(p)
if m:
ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
else:
ranges.append(int(p))
return ranges
#----------------------------------------------------------------------------
def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
'''Parse a floating point 2-vector of syntax 'a,b'.
Example:
'0,1' returns (0,1)
'''
if isinstance(s, tuple): return s
parts = s.split(',')
if len(parts) == 2:
return (float(parts[0]), float(parts[1]))
raise ValueError(f'cannot parse 2-vector {s}')
#----------------------------------------------------------------------------
def make_transform(translate: Tuple[float,float], angle: float):
m = np.eye(3)
s = np.sin(angle/360.0*np.pi*2)
c = np.cos(angle/360.0*np.pi*2)
m[0][0] = c
m[0][1] = s
m[0][2] = translate[0]
m[1][0] = -s
m[1][1] = c
m[1][2] = translate[1]
return m
#----------------------------------------------------------------------------
@click.command()
@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
@click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', required=True)
@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
@click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2')
@click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE')
@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def generate_images(
network_pkl: str,
seeds: List[int],
truncation_psi: float,
noise_mode: str,
outdir: str,
translate: Tuple[float,float],
rotate: float,
class_idx: Optional[int]
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
python gen_images.py --outdir=out --trunc=1 --seeds=2 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
\b
# Generate uncurated images with truncation using the MetFaces-U dataset
python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
os.makedirs(outdir, exist_ok=True)
# Labels.
label = torch.zeros([1, G.c_dim], device=device)
if G.c_dim != 0:
if class_idx is None:
raise click.ClickException('Must specify class label with --class when using a conditional network')
label[:, class_idx] = 1
else:
if class_idx is not None:
print ('warn: --class=lbl ignored when running on an unconditional network')
# Generate images.
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
# Construct an inverse rotation/translation matrix and pass to the generator. The
# generator expects this matrix as an inverse to avoid potentially failing numerical
# operations in the network.
if hasattr(G.synthesis, 'input'):
m = make_transform(translate, rotate)
m = np.linalg.inv(m)
G.synthesis.input.transform.copy_(torch.from_numpy(m))
img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
#----------------------------------------------------------------------------
if __name__ == "__main__":
generate_images() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 5,735 | 38.287671 | 144 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.