code stringlengths 101 5.91M |
|---|
def input_fn(is_training, data_dir, batch_size):
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
if is_training:
dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)
dataset = dataset.interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return process_record_dataset(dataset=dataset, is_training=is_training, batch_size=batch_size, shuffle_buffer=_SHUFFLE_BUFFER, parse_record_fn=parse_record) |
def model_scaling(layer_setting, arch_setting):
new_layer_setting = copy.deepcopy(layer_setting)
for layer_cfg in new_layer_setting:
for block_cfg in layer_cfg:
block_cfg[1] = make_divisible((block_cfg[1] * arch_setting[0]), 8)
split_layer_setting = [new_layer_setting[0]]
for layer_cfg in new_layer_setting[1:(- 1)]:
tmp_index = [0]
for i in range((len(layer_cfg) - 1)):
if (layer_cfg[(i + 1)][1] != layer_cfg[i][1]):
tmp_index.append((i + 1))
tmp_index.append(len(layer_cfg))
for i in range((len(tmp_index) - 1)):
split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[(i + 1)]])
split_layer_setting.append(new_layer_setting[(- 1)])
num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:(- 1)]]
new_layers = [int(math.ceil((arch_setting[1] * num))) for num in num_of_layers]
merge_layer_setting = [split_layer_setting[0]]
for (i, layer_cfg) in enumerate(split_layer_setting[1:(- 1)]):
if (new_layers[i] <= num_of_layers[i]):
tmp_layer_cfg = layer_cfg[:new_layers[i]]
else:
tmp_layer_cfg = (copy.deepcopy(layer_cfg) + ([layer_cfg[(- 1)]] * (new_layers[i] - num_of_layers[i])))
if ((tmp_layer_cfg[0][3] == 1) and (i != 0)):
merge_layer_setting[(- 1)] += tmp_layer_cfg.copy()
else:
merge_layer_setting.append(tmp_layer_cfg.copy())
merge_layer_setting.append(split_layer_setting[(- 1)])
return merge_layer_setting |
class TFSegformerMLP(tf.keras.layers.Layer):
def __init__(self, config: SegformerConfig, **kwargs):
super().__init__(**kwargs)
self.proj = tf.keras.layers.Dense(config.decoder_hidden_size, name='proj')
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
height = shape_list(hidden_states)[1]
width = shape_list(hidden_states)[2]
hidden_dim = shape_list(hidden_states)[(- 1)]
hidden_states = tf.reshape(hidden_states, ((- 1), (height * width), hidden_dim))
hidden_states = self.proj(hidden_states)
return hidden_states |
_schema(QasmQobjInstructionSchema)
class QasmQobjInstruction(QobjInstruction):
def __init__(self, name, **kwargs):
super().__init__(name=name, **kwargs) |
class ResnetCompleteNetworkTest(tf.test.TestCase):
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v2_small'):
block = resnet_v2.resnet_v2_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue(('predictions' in end_points))
self.assertListEqual(end_points['predictions'].get_shape().as_list(), [2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, include_root_block=False, spatial_squeeze=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, output_stride=output_stride, spatial_squeeze=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
(output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride)
if (output_stride is None):
factor = 1
else:
factor = (nominal_stride // output_stride)
output = resnet_utils.subsample(output, factor)
tf.get_variable_scope().reuse_variables()
(expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
def testUnknownBatchSize(self):
batch = 2
(height, width) = (65, 65)
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, _) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(output, _) = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(output, _) = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32)) |
(scope='session')
def saliency_gpt2_model_tiny():
return inseq.load_model('hf-internal-testing/tiny-random-GPT2LMHeadModel', 'saliency') |
class Task(NamedTuple):
video_name: str
video_path: str
out_path: str
min_frame: int
max_frame: int
target_fps: float
max_height: int |
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1')
net += (scale * up)
if activation_fn:
net = activation_fn(net)
return net |
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expand_ratio, dilation=1, norm_layer=nn.BatchNorm2d):
super(InvertedResidual, self).__init__()
assert (stride in [1, 2])
self.use_res_connect = ((stride == 1) and (in_channels == out_channels))
layers = list()
inter_channels = int(round((in_channels * expand_ratio)))
if (expand_ratio != 1):
layers.append(_ConvBNReLU(in_channels, inter_channels, 1, relu6=True, norm_layer=norm_layer))
layers.extend([_ConvBNReLU(inter_channels, inter_channels, 3, stride, dilation, dilation, groups=inter_channels, relu6=True, norm_layer=norm_layer), nn.Conv2d(inter_channels, out_channels, 1, bias=False), norm_layer(out_channels)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
def process(args):
data_root = (Path(args.data_root).absolute() / args.lang)
print('Generating manifest...')
df_top_n = get_top_n(data_root)
(id_to_split, speakers) = get_splits(df_top_n)
if args.convert_to_wav:
convert_to_wav(data_root, df_top_n['path'].tolist())
manifest_by_split = {split: defaultdict(list) for split in SPLITS}
for sample in tqdm(df_top_n.to_dict(orient='index').values()):
sample_id = sample['id']
split = id_to_split[sample_id]
manifest_by_split[split]['id'].append(sample_id)
if args.convert_to_wav:
audio_path = ((data_root / 'wav') / f'{sample_id}.wav')
else:
audio_path = ((data_root / 'clips') / f'{sample_id}.mp3')
manifest_by_split[split]['audio'].append(audio_path.as_posix())
manifest_by_split[split]['n_frames'].append(sample['n_frames'])
manifest_by_split[split]['tgt_text'].append(sample['sentence'])
manifest_by_split[split]['speaker'].append(sample['client_id'])
manifest_by_split[split]['src_text'].append(sample['sentence'])
output_root = Path(args.output_manifest_root).absolute()
output_root.mkdir(parents=True, exist_ok=True)
for split in SPLITS:
save_df_to_tsv(pd.DataFrame.from_dict(manifest_by_split[split]), (output_root / f'{split}.audio.tsv')) |
class LinearClassifierEvaluation(pl.LightningModule):
def __init__(self, trunk: DictConfig, classifier: DictConfig, optimizer: DictConfig, pretrained_trunk_path: str, trunk_pattern: str='^(trunk\\.)', train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, val_time_augmentation: Optional[DictConfig]=None, test_time_augmentation: Optional[DictConfig]=None) -> None:
super().__init__()
self.save_hyperparameters()
self.optimizer_cfg = optimizer
trunk_state_dict = get_sub_state_dict_from_pl_ckpt(checkpoint_path=pretrained_trunk_path, pattern=trunk_pattern)
trunk_state_dict = remove_pattern_in_keys_from_dict(d=trunk_state_dict, pattern=trunk_pattern)
self.trunk = hydra.utils.instantiate(trunk)
self.trunk.load_state_dict(trunk_state_dict)
for param in self.trunk.parameters():
param.requires_grad = False
self.classifier = hydra.utils.instantiate(classifier)
self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None)
self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None)
self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None)
self.val_time_augmentation = (get_test_time_augmentation_fn(**val_time_augmentation) if val_time_augmentation else None)
self.test_time_augmentation = (get_test_time_augmentation_fn(**test_time_augmentation) if test_time_augmentation else None)
def learnable_params(self) -> List[Parameter]:
params = list(self.classifier.parameters())
return params
def num_layers(self) -> int:
return self.classifier.num_layers
def get_param_layer_id(self, name: str) -> int:
self.classifier.get_param_layer_id(name[len('classifier.'):])
def training_steps_per_epoch(self) -> Optional[int]:
if (self.trainer.datamodule is not None):
return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size)
else:
return None
def on_fit_start(self) -> None:
num_classes = self.trainer.datamodule.num_classes
task = ('binary' if (num_classes <= 2) else 'multiclass')
self.train_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device)
self.train_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device)
self.val_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device)
self.val_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device)
def on_test_start(self) -> None:
num_classes = self.trainer.datamodule.num_classes
task = ('binary' if (num_classes <= 2) else 'multiclass')
self.test_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device)
self.test_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device)
def forward(self, x: Tensor) -> Dict[(str, Any)]:
with torch.no_grad():
h = self.trunk(x)
preds = self.classifier(h)
return {'preds': preds, 'h': h}
def configure_optimizers(self) -> Dict[(Any, Any)]:
(optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.training_steps_per_epoch, model=self)
if (scheduler is None):
return optimizer
return {'optimizer': optimizer, 'lr_scheduler': scheduler}
def shared_step(self, x: Tensor):
with torch.no_grad():
h = self.trunk(x)
preds = self.classifier(h)
return preds
def on_train_epoch_start(self) -> None:
self.trunk.eval()
def training_step(self, batch: Tensor, batch_idx: int) -> Tensor:
(x, targets) = (batch['input'], batch['label'])
if (self.train_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.train_transform(x)
preds = self.shared_step(x)
loss = nn.functional.cross_entropy(preds, targets)
acc_1 = self.train_acc_1(preds, targets)
acc_5 = self.train_acc_5(preds, targets)
self.log('train/loss', loss, on_epoch=True)
self.log('train/acc_1', acc_1, on_epoch=True, prog_bar=True)
self.log('train/acc_5', acc_5, on_epoch=True)
return loss
def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor:
if (self.val_time_augmentation is not None):
(x, targets, ids) = (batch['input'], batch['label'], batch['idx'])
if (self.val_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.val_transform(x)
preds = self.shared_step(x)
preds = preds.softmax((- 1))
(preds, targets, ids) = self.val_time_augmentation(preds, targets, ids)
else:
(x, targets) = (batch['input'], batch['label'])
if (self.val_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.val_transform(x)
preds = self.shared_step(x)
loss = nn.functional.cross_entropy(preds, targets)
self.val_acc_1(preds, targets)
self.val_acc_5(preds, targets)
self.log('val/loss', loss)
self.log('val/acc_1', self.val_acc_1, prog_bar=True)
self.log('val/acc_5', self.val_acc_5)
return loss
def test_step(self, batch: Tensor, batch_idx: int) -> Tensor:
if (self.test_time_augmentation is not None):
(x, targets, ids) = (batch['input'], batch['label'], batch['idx'])
if (self.test_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.test_transform(x)
preds = self.shared_step(x)
preds = preds.softmax((- 1))
(preds, targets, ids) = self.test_time_augmentation(preds, targets, ids)
else:
(x, targets) = (batch['input'], batch['label'])
if (self.test_transform is not None):
with torch.no_grad():
with torch.cuda.amp.autocast(enabled=False):
x = self.test_transform(x)
preds = self.shared_step(x)
loss = nn.functional.cross_entropy(preds, targets)
self.test_acc_1(preds, targets)
self.test_acc_5(preds, targets)
self.log('test/loss', loss)
self.log('test/acc_1', self.test_acc_1, prog_bar=True)
self.log('test/acc_5', self.test_acc_5)
return loss |
class NeuronCoverage():
def __init__(self, thresholds=(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)):
self._thresholds = thresholds
self._layer_neuron_id_to_global_neuron_id = {}
self._results = {}
self._num_layer = 0
self._num_neuron = 0
self._num_input = 0
self._report_layers_and_neurons = True
def update(self, intermediate_layer_outputs, features_index):
intermediate_layer_outputs_new = []
for intermediate_layer_output in intermediate_layer_outputs:
intermediate_layer_output = common.to_numpy(intermediate_layer_output)
intermediate_layer_outputs_new.append(intermediate_layer_output)
intermediate_layer_outputs = intermediate_layer_outputs_new
if (len(self._results.keys()) == 0):
current_global_neuron_id = 0
for (layer_id, intermediate_layer_output) in enumerate(intermediate_layer_outputs):
intermediate_layer_output_single_input = intermediate_layer_output[0]
num_layer_neuron = intermediate_layer_output_single_input.shape[features_index]
for layer_neuron_id in range(num_layer_neuron):
self._layer_neuron_id_to_global_neuron_id[(layer_id, layer_neuron_id)] = current_global_neuron_id
current_global_neuron_id += 1
self._num_layer += 1
self._num_neuron += num_layer_neuron
for threshold in self._thresholds:
self._results[threshold] = np.zeros(shape=self._num_neuron)
num_input = len(intermediate_layer_outputs[0])
self._num_input += num_input
for layer_id in range(len(intermediate_layer_outputs)):
intermediate_layer_outputs[layer_id] = self._scale(intermediate_layer_outputs[layer_id])
for (layer_id, intermediate_layer_output) in enumerate(intermediate_layer_outputs):
if (len(intermediate_layer_output.shape) > 2):
result = self._calc_1(intermediate_layer_output, features_index)
else:
result = self._calc_2(intermediate_layer_output, features_index)
num_layer_neuron = intermediate_layer_outputs[layer_id][0].shape[features_index]
for layer_neuron_id in range(num_layer_neuron):
global_neuron_id = self._layer_neuron_id_to_global_neuron_id[(layer_id, layer_neuron_id)]
for threshold in self._thresholds:
if (result[layer_neuron_id] > threshold):
self._results[threshold][global_neuron_id] = True
def report(self, *args):
if self._report_layers_and_neurons:
self._report_layers_and_neurons = False
print('[NeuronCoverage] Time:{:s}, Layers: {:d}, Neurons: {:d}'.format(common.readable_time_str(), self._num_layer, self._num_neuron))
for threshold in self._thresholds:
print('[NeuronCoverage] Time:{:s}, Num: {:d}, Threshold: {:.6f}, Neuron Coverage: {:.6f}({:d}/{:d})'.format(common.readable_time_str(), self._num_input, threshold, self.get(threshold), len([v for v in self._results[threshold] if v]), self._num_neuron))
def get(self, threshold):
return ((len([v for v in self._results[threshold] if v]) / self._num_neuron) if (self._num_neuron != 0) else 0)
(parallel=True)
def _scale(intermediate_layer_output):
for input_id in prange(intermediate_layer_output.shape[0]):
intermediate_layer_output[input_id] = ((intermediate_layer_output[input_id] - intermediate_layer_output[input_id].min()) / (intermediate_layer_output[input_id].max() - intermediate_layer_output[input_id].min()))
return intermediate_layer_output
(parallel=True)
def _calc_1(intermediate_layer_output, features_index):
num_layer_neuron = intermediate_layer_output[0].shape[features_index]
result = np.zeros(shape=num_layer_neuron, dtype=np.float32)
for input_id in prange(intermediate_layer_output.shape[0]):
for layer_neuron_id in prange(num_layer_neuron):
if (features_index == (- 1)):
neuron_output = intermediate_layer_output[input_id][(..., layer_neuron_id)]
else:
neuron_output = intermediate_layer_output[input_id][layer_neuron_id]
mean = np.mean(neuron_output)
if (mean > result[layer_neuron_id]):
result[layer_neuron_id] = mean
return result
(parallel=True)
def _calc_2(intermediate_layer_output, features_index):
num_layer_neuron = intermediate_layer_output[0].shape[features_index]
result = np.zeros(shape=num_layer_neuron, dtype=np.float32)
for input_id in prange(intermediate_layer_output.shape[0]):
for layer_neuron_id in prange(num_layer_neuron):
if (features_index == (- 1)):
neuron_output = intermediate_layer_output[input_id][(..., layer_neuron_id)]
else:
neuron_output = intermediate_layer_output[input_id][layer_neuron_id]
if (neuron_output > result[layer_neuron_id]):
result[layer_neuron_id] = neuron_output
return result |
def _NeedToReturnNothingDiagnoser(msg):
gcc_regex = (_GCC_FILE_LINE_RE + "instantiated from here\\n.*gmock-actions\\.h.*error: instantiation of \\'testing::internal::ReturnAction<R>::Impl<F>::value_\\' as type \\'void\\'")
clang_regex1 = (("error: field has incomplete type \\'Result\\' \\(aka \\'void\\'\\)(\\r)?\\n(.*\\n)*?" + _CLANG_NON_GMOCK_FILE_LINE_RE) + "note: in instantiation of function template specialization \\'testing::internal::ReturnAction<(?P<return_type>.*)>::operator Action<void \\(.*\\)>\\' requested here")
clang_regex2 = (("error: field has incomplete type \\'Result\\' \\(aka \\'void\\'\\)(\\r)?\\n(.*\\n)*?" + _CLANG_NON_GMOCK_FILE_LINE_RE) + "note: in instantiation of function template specialization \\'testing::internal::DoBothAction<.*>::operator Action<(?P<return_type>.*) \\(.*\\)>\\' requested here")
diagnosis = '\nYou are using an action that returns %(return_type)s, but it needs to return\nvoid. Please use a void-returning action instead.\n\nAll actions but the last in DoAll(...) must return void. Perhaps you need\nto re-arrange the order of actions in a DoAll(), if you are using one?'
return _GenericDiagnoser('NRN', 'Need to Return Nothing', [(gcc_regex, (diagnosis % {'return_type': '*something*'})), (clang_regex1, diagnosis), (clang_regex2, diagnosis)], msg) |
def training(sess, neuralnet, saver, dataset, epochs, batch_size):
start_time = time.time()
loss_tr = 0
list_loss = []
list_psnr = []
list_psnr_static = []
makedir((PACK_PATH + '/training'))
makedir((PACK_PATH + '/static'))
makedir((PACK_PATH + '/static/reconstruction'))
print(('\nTraining SRCNN to %d epochs' % epochs))
train_writer = tf.compat.v1.summary.FileWriter((PACK_PATH + '/Checkpoint'))
(X_static, Y_static, _) = dataset.next_train(batch_size=1)
img_input = np.squeeze(X_static, axis=0)
img_ground = np.squeeze(Y_static, axis=0)
plt.imsave(('%s/static/bicubic.png' % PACK_PATH), img_input)
plt.imsave(('%s/static/high-resolution.png' % PACK_PATH), img_ground)
iteration = 0
for epoch in range(epochs):
while True:
(X_tr, Y_tr, terminator) = dataset.next_train(batch_size=batch_size)
(summaries, _) = sess.run([neuralnet.summaries, neuralnet.optimizer], feed_dict={neuralnet.inputs: X_tr, neuralnet.outputs: Y_tr})
(loss_tr, psnr_tr) = sess.run([neuralnet.loss, neuralnet.psnr], feed_dict={neuralnet.inputs: X_tr, neuralnet.outputs: Y_tr})
list_loss.append(loss_tr)
list_psnr.append(psnr_tr)
train_writer.add_summary(summaries, iteration)
iteration += 1
if terminator:
break
(X_tmp, Y_tmp) = (np.expand_dims(X_tr[0], axis=0), np.expand_dims(Y_tr[0], axis=0))
(img_recon, tmp_psnr) = sess.run([neuralnet.recon, neuralnet.psnr], feed_dict={neuralnet.inputs: X_tmp, neuralnet.outputs: Y_tmp})
(img_input, img_recon, img_ground) = (np.squeeze(X_tmp, axis=0), np.squeeze(img_recon, axis=0), np.squeeze(Y_tmp, axis=0))
plt.clf()
plt.rcParams['font.size'] = 100
plt.figure(figsize=(100, 40))
plt.subplot(131)
plt.title('Low-Resolution')
plt.imshow(img_input)
plt.subplot(132)
plt.title('Reconstruction')
plt.imshow(img_recon)
plt.subplot(133)
plt.title('High-Resolution')
plt.imshow(img_ground)
plt.tight_layout(pad=1, w_pad=1, h_pad=1)
plt.savefig(('%s/training/%09d_psnr_%d.png' % (PACK_PATH, epoch, int(tmp_psnr))))
plt.close()
(img_recon, tmp_psnr) = sess.run([neuralnet.recon, neuralnet.psnr], feed_dict={neuralnet.inputs: X_static, neuralnet.outputs: Y_static})
list_psnr_static.append(tmp_psnr)
img_recon = np.squeeze(img_recon, axis=0)
plt.imsave(('%s/static/reconstruction/%09d_psnr_%d.png' % (PACK_PATH, epoch, int(tmp_psnr))), img_recon)
print(('Epoch [%d / %d] | Loss: %f PSNR: %f' % (epoch, epochs, loss_tr, psnr_tr)))
saver.save(sess, (PACK_PATH + '/Checkpoint/model_checker'))
print(('Final Epcoh | Loss: %f PSNR: %f' % (loss_tr, psnr_tr)))
elapsed_time = (time.time() - start_time)
print(('Elapsed: ' + str(elapsed_time)))
save_graph(contents=list_loss, xlabel='Iteration', ylabel='L2 loss', savename='loss')
save_graph(contents=list_psnr, xlabel='Iteration', ylabel='PSNR (dB)', savename='psnr')
save_graph(contents=list_psnr_static, xlabel='Iteration', ylabel='PSNR (dB)', savename='psnr_static') |
class ResidualAttentionModel(nn.Module):
def __init__(self):
super(ResidualAttentionModel, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.rb1 = ResidualBlock(32, 1)
self.mpool1 = nn.MaxPool2d(kernel_size=2)
self.features = nn.Sequential(AttentionModule_stg0(32, 32))
self.classifier = nn.Sequential(CRResidualBlock(32, 8, (4, 16)), CRResidualBlock(8, 4, (8, 32)), CRResidualBlock(4, 2, (16, 64)), CRResidualBlock(2, 1, (32, 128)))
self.mpool2 = nn.Sequential(nn.BatchNorm2d(1), nn.ReLU(inplace=True), nn.AvgPool2d(kernel_size=(3, 20), stride=2))
self.fc = nn.Linear(189, 1)
def _weights_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
xavier_normal_(m.weight)
elif (classname.find('Linear') != (- 1)):
xavier_normal_(m.weight)
m.bias.data.zero_()
elif (classname.find('BatchNorm') != (- 1)):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
def forward(self, x):
x = self.conv1(x)
x = self.rb1(x)
x = self.mpool1(x)
x = self.features(x)
x = self.classifier(x)
x = self.mpool2(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return F.sigmoid(x) |
class LightGCN(nn.Module):
def __init__(self, num_users: int, num_items: int, emb_dim: int, num_layers: int=3, drop_rate: float=0.0) -> None:
super().__init__()
(self.num_users, self.num_items) = (num_users, num_items)
self.num_layers = num_layers
self.drop_rate = drop_rate
self.u_embedding = nn.Embedding(num_users, emb_dim)
self.i_embedding = nn.Embedding(num_items, emb_dim)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.u_embedding.weight, 0, 0.1)
nn.init.normal_(self.i_embedding.weight, 0, 0.1)
def forward(self, ui_bigraph: BiGraph) -> Tuple[(torch.Tensor, torch.Tensor)]:
drop_rate = (self.drop_rate if self.training else 0.0)
u_embs = self.u_embedding.weight
i_embs = self.i_embedding.weight
all_embs = torch.cat([u_embs, i_embs], dim=0)
embs_list = [all_embs]
for _ in range(self.num_layers):
all_embs = ui_bigraph.smoothing_with_GCN(all_embs, drop_rate=drop_rate)
embs_list.append(all_embs)
embs = torch.stack(embs_list, dim=1)
embs = torch.mean(embs, dim=1)
(u_embs, i_embs) = torch.split(embs, [self.num_users, self.num_items], dim=0)
return (u_embs, i_embs) |
def test_init_pose(index):
init_pose_list = [[25.0, 0.0, np.pi], [24.8, 3.13, ((np.pi * 26) / 25)], [24.21, 6.22, ((np.pi * 27) / 25)], [23.24, 9.2, ((np.pi * 28) / 25)], [21.91, 12.04, ((np.pi * 29) / 25)], [20.23, 14.69, ((np.pi * 30) / 25)], [18.22, 17.11, ((np.pi * 31) / 25)], [15.94, 19.26, ((np.pi * 32) / 25)], [13.4, 21.11, ((np.pi * 33) / 25)], [10.64, 22.62, ((np.pi * 34) / 25)], [7.73, 23.78, ((np.pi * 35) / 25)], [4.68, 24.56, ((np.pi * 36) / 25)], [1.57, 24.95, ((np.pi * 37) / 25)], [(- 1.57), 24.95, ((np.pi * 38) / 25)], [(- 4.68), 24.56, ((np.pi * 39) / 25)], [(- 7.73), 23.78, ((np.pi * 40) / 25)], [(- 10.64), 22.62, ((np.pi * 41) / 25)], [(- 13.4), 21.11, ((np.pi * 42) / 25)], [(- 15.94), 19.26, ((np.pi * 43) / 25)], [(- 18.22), 17.11, ((np.pi * 44) / 25)], [(- 20.23), 14.69, ((np.pi * 45) / 25)], [(- 21.91), 12.04, ((np.pi * 46) / 25)], [(- 23.24), 9.2, ((np.pi * 47) / 25)], [(- 24.21), 6.22, ((np.pi * 48) / 25)], [(- 24.8), 3.13, ((np.pi * 49) / 25)], [(- 25.0), (- 0.0), ((np.pi * 50) / 25)], [(- 24.8), (- 3.13), ((np.pi * 51) / 25)], [(- 24.21), (- 6.22), ((np.pi * 52) / 25)], [(- 23.24), (- 9.2), ((np.pi * 53) / 25)], [(- 21.91), (- 12.04), ((np.pi * 54) / 25)], [(- 20.23), (- 14.69), ((np.pi * 55) / 25)], [(- 18.22), (- 17.11), ((np.pi * 56) / 25)], [(- 15.94), (- 19.26), ((np.pi * 57) / 25)], [(- 13.4), (- 21.11), ((np.pi * 58) / 25)], [(- 10.64), (- 22.62), ((np.pi * 59) / 25)], [(- 7.73), (- 23.78), ((np.pi * 60) / 25)], [(- 4.68), (- 24.56), ((np.pi * 61) / 25)], [(- 1.57), (- 24.95), ((np.pi * 62) / 25)], [1.57, (- 24.95), ((np.pi * 63) / 25)], [4.68, (- 24.56), ((np.pi * 64) / 25)], [7.73, (- 23.78), ((np.pi * 65) / 25)], [10.64, (- 22.62), ((np.pi * 66) / 25)], [13.4, (- 21.11), ((np.pi * 67) / 25)], [15.94, (- 19.26), ((np.pi * 68) / 25)], [18.22, (- 17.11), ((np.pi * 69) / 25)], [20.23, (- 14.69), ((np.pi * 70) / 25)], [21.91, (- 12.04), ((np.pi * 71) / 25)], [23.24, (- 9.2), ((np.pi * 72) / 25)], [24.21, (- 6.22), ((np.pi * 73) / 25)], [24.8, (- 3.13), ((np.pi * 74) / 25)]]
return init_pose_list[index] |
def longest_common_subsequence(a, b):
if ((not a) or (not b)):
return ''
elif (a[0] == b[0]):
return (a[0] + longest_common_subsequence(a[1:], b))
else:
return max(longest_common_subsequence(a, b[1:]), longest_common_subsequence(a[1:], b), key=len) |
class SpeakerDiarizationConfig(base.PipelineConfig):
def __init__(self, segmentation: (m.SegmentationModel | None)=None, embedding: (m.EmbeddingModel | None)=None, duration: float=5, step: float=0.5, latency: ((float | Literal[('max', 'min')]) | None)=None, tau_active: float=0.6, rho_update: float=0.3, delta_new: float=1, gamma: float=3, beta: float=10, max_speakers: int=20, normalize_embedding_weights: bool=False, device: (torch.device | None)=None, sample_rate: int=16000, **kwargs):
self.segmentation = (segmentation or m.SegmentationModel.from_pyannote('pyannote/segmentation'))
self.embedding = (embedding or m.EmbeddingModel.from_pyannote('pyannote/embedding'))
self._duration = duration
self._sample_rate = sample_rate
self._step = step
self._latency = latency
if ((self._latency is None) or (self._latency == 'min')):
self._latency = self._step
elif (self._latency == 'max'):
self._latency = self._duration
self.tau_active = tau_active
self.rho_update = rho_update
self.delta_new = delta_new
self.gamma = gamma
self.beta = beta
self.max_speakers = max_speakers
self.normalize_embedding_weights = normalize_embedding_weights
self.device = (device or torch.device(('cuda' if torch.cuda.is_available() else 'cpu')))
def duration(self) -> float:
return self._duration
def step(self) -> float:
return self._step
def latency(self) -> float:
return self._latency
def sample_rate(self) -> int:
return self._sample_rate |
def split(table_path, train_path, val_path, test_path):
table = pd.read_csv(table_path)
table = table.drop_duplicates(['molecule', 'linker'])
linker_sizes = []
fragment_sizes = []
number_of_linkers = []
number_of_fragments = []
for (linker_smi, fragments_smi) in tqdm(table[['linker', 'fragments']].values):
linker = Chem.MolFromSmiles(linker_smi)
fragments = Chem.MolFromSmiles(fragments_smi)
linker_sizes.append(linker.GetNumAtoms())
fragment_sizes.append(fragments.GetNumAtoms())
number_of_linkers.append(len(linker_smi.split('.')))
number_of_fragments.append(len(fragments_smi.split('.')))
table['linker_size'] = linker_sizes
table['fragment_size'] = fragment_sizes
table['num_linkers'] = number_of_linkers
table['num_fragments'] = number_of_fragments
table = table[(table.num_fragments > 2)]
grouped = table[['molecule', 'linker_size', 'num_linkers']].groupby('molecule').max().reset_index()
grouped['stratify'] = ((grouped.linker_size.astype(str) + '_') + grouped.num_linkers.astype(str))
counts = grouped['stratify'].value_counts()
rare = set(counts[(counts < 10)].index.values)
grouped['stratify'] = grouped['stratify'].apply((lambda g: ('rare' if (g in rare) else g)))
smiles = grouped.molecule.values
stratify = grouped.stratify.values
(train_smi, test_smi, train_strat, test_strat) = train_test_split(smiles, stratify, test_size=200, stratify=stratify, random_state=42)
(train_smi, val_smi, train_strat, val_strat) = train_test_split(train_smi, train_strat, test_size=200, stratify=train_strat, random_state=42)
assert (len((set(val_smi) & set(test_smi))) == 0)
assert (len((set(val_smi) & set(train_smi))) == 0)
assert (len((set(test_smi) & set(train_smi))) == 0)
train_data = table[table.molecule.isin(train_smi)]
val_data = table[table.molecule.isin(val_smi)]
test_data = table[table.molecule.isin(test_smi)]
print(f'Train size: {len(train_smi)} molecules, {len(train_data)} examples')
print(f'Val size: {len(val_smi)} molecules, {len(val_data)} examples')
print(f'Test size: {len(test_smi)} molecules, {len(test_data)} examples')
train_data.to_csv(train_path)
val_data.to_csv(val_path)
test_data.to_csv(test_path) |
def H(a, x):
P = (x ** 2)
H0 = np.exp((- (x ** 2)))
Q = (1.5 / (x ** 2))
return (H0 - (((a / np.sqrt(np.pi)) / P) * ((((H0 * H0) * (((((4.0 * P) * P) + (7.0 * P)) + 4.0) + Q)) - Q) - 1))) |
def medium_oshi_zumo_nfsp_avg_policy_params(env: MultiAgentEnv) -> Dict[(str, Any)]:
return {'framework': 'torch', 'num_gpus': float(os.getenv('WORKER_GPU_NUM', 0.0)), 'num_workers': 0, 'num_gpus_per_worker': float(os.getenv('WORKER_GPU_NUM', 0.0)), 'num_envs_per_worker': 1, 'learning_starts': 16000, 'train_batch_size': 2048, 'lr': 0.1, 'model': merge_dicts(MODEL_DEFAULTS, {'fcnet_activation': 'relu', 'fcnet_hiddens': [128, 128], 'custom_model': get_valid_action_fcn_class_for_env(env=env)})} |
def svm_predict(arg1, arg2=None, arg3=None, arg4=None, arg5=None):
if arg2:
arg1_array = (c_float * len(arg1))()
arg1_array[:] = arg1
arg2_array = (c_char_p * len(arg2))()
arg2_array[:] = arg2
thundersvm.load_from_python_interface(arg1_array, arg2_array, len(arg1_array))
if arg5:
arg5_list = arg5.encode('utf-8').split()
arg5_array = (c_char_p * len(arg5_list))()
arg5_array[:] = arg5_list
thundersvm.thundersvm_predict_after_parse(arg3.encode('utf-8'), arg4.encode('utf-8'), arg5_array, len(arg5_array))
else:
arg5_array = None
thundersvm.thundersvm_predict_after_parse(arg3.encode('utf-8'), arg4.encode('utf-8'), arg5_array, 0)
else:
param_list = arg1.split()
param_list.insert(0, 'thundersvm-predict')
param_array = (c_char_p * len(param_list))()
param_array[:] = param_list
thundersvm.thundersvm_predict(len(param_list), param_array) |
class Div255Input(Module):
def __init__(self, inplace: bool=True, dtype: dtype=torch.get_default_dtype()) -> None:
super().__init__()
self.inplace = inplace
self.dtype = dtype
def forward(self, x: (Tensor | List[Tensor])) -> Tensor:
if (type(x) is Tensor):
return div_255(x, inplace=self.inplace, dtype=self.dtype)
return [div_255(el, inplace=self.inplace, dtype=self.dtype) for el in x]
def __repr__(self):
return f'{__class__.__name__}(inplace={self.inplace}, dtype={self.dtype})' |
class ImageNet21KParser():
def __init__(self, add_adj=False):
self.nlp = spacy.load('en_core_web_sm')
self.look_up = {}
with open('datasets/class_names/imagenet-21k.txt') as f:
class_names = f.read()
class_names = class_names.split()
self.class_names = ([''] * len(class_names))
self.add_adj = add_adj
for (i, synonym) in enumerate(class_names):
synonym = synonym.lower()
synonym = synonym.replace('_', ' ')
self.class_names[i] = synonym
doc = self.nlp(synonym)
lemma_s = []
for token in doc:
word = token.lemma_
if word.startswith('('):
break
lemma_s.append(word)
lemma_s = ' '.join(lemma_s)
lemma_s = lemma_s.replace(' - ', '-')
self.look_up[lemma_s] = i
def parse(self, sentence):
sentence = sentence.lower()
doc = self.nlp(sentence)
lemma_sentence = []
for token in doc:
lemma_sentence.append(token.lemma_)
lemma_sentence = ' '.join(lemma_sentence)
nns = []
category_ids = []
for s in self.look_up:
if ((' {} '.format(s) in lemma_sentence) or lemma_sentence.startswith((s + ' ')) or lemma_sentence.endswith((' ' + s)) or (lemma_sentence == s)):
nns.append(s)
category_ids.append(self.look_up[s])
if self.add_adj:
words = nltk.word_tokenize(sentence)
words = [word for word in words if (word not in set(stopwords.words('english')))]
tagged = nltk.pos_tag(words)
for (word, tag) in tagged:
if (tag in ['JJ', 'JJR', 'JJS']):
if (word not in nns):
nns.append(word)
return (nns, category_ids) |
class GlobalContext(nn.Module):
def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=(1.0 / 8), rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
super(GlobalContext, self).__init__()
act_layer = get_act_layer(act_layer)
self.conv_attn = (nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None)
if (rd_channels is None):
rd_channels = make_divisible((channels * rd_ratio), rd_divisor, round_limit=0.0)
if fuse_add:
self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
else:
self.mlp_add = None
if fuse_scale:
self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
else:
self.mlp_scale = None
self.gate = create_act_layer(gate_layer)
self.init_last_zero = init_last_zero
self.reset_parameters()
def reset_parameters(self):
if (self.conv_attn is not None):
nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
if (self.mlp_add is not None):
nn.init.zeros_(self.mlp_add.fc2.weight)
def forward(self, x):
(B, C, H, W) = x.shape
if (self.conv_attn is not None):
attn = self.conv_attn(x).reshape(B, 1, (H * W))
attn = F.softmax(attn, dim=(- 1)).unsqueeze(3)
context = (x.reshape(B, C, (H * W)).unsqueeze(1) attn)
context = context.view(B, C, 1, 1)
else:
context = x.mean(dim=(2, 3), keepdim=True)
if (self.mlp_scale is not None):
mlp_x = self.mlp_scale(context)
x = (x * self.gate(mlp_x))
if (self.mlp_add is not None):
mlp_x = self.mlp_add(context)
x = (x + mlp_x)
return x |
def val(epoch):
global best_rmse
is_best_model = False
net.eval()
total_step_val = 0
eval_loss = 0.0
error_sum_val = {'MSE': 0, 'RMSE': 0, 'ABS_REL': 0, 'LG10': 0, 'MAE': 0, 'DELTA1.02': 0, 'DELTA1.05': 0, 'DELTA1.10': 0, 'DELTA1.25': 0, 'DELTA1.25^2': 0, 'DELTA1.25^3': 0}
tbar = tqdm(valloader)
for (batch_idx, sample) in enumerate(tbar):
[inputs, targets] = [sample['rgbd'], sample['depth']]
with torch.no_grad():
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
(inputs, targets) = (Variable(inputs, volatile=True), Variable(targets))
outputs = net(inputs)
loss = criterion(outputs, targets)
targets = targets.data.cpu()
outputs = outputs.data.cpu()
loss = loss.data.cpu()
eval_loss += loss.item()
error_str = ('Epoch: %d, loss=%.4f' % (epoch, (eval_loss / (batch_idx + 1))))
tbar.set_description(error_str)
error_result = utils.evaluate_error(gt_depth=targets, pred_depth=outputs)
total_step_val += args.batch_size_eval
error_avg = utils.avg_error(error_sum_val, error_result, total_step_val, args.batch_size_eval)
utils.print_error('eval_result: step(average)', epoch, batch_idx, loss, error_result, error_avg, print_out=True)
if utils.updata_best_model(error_avg, best_rmse):
is_best_model = True
best_rmse = error_avg['RMSE']
for param_group in optimizer.param_groups:
old_lr = float(param_group['lr'])
utils.log_result_lr(args.save_dir, error_avg, epoch, old_lr, is_best_model, 'eval')
if is_best_model:
print(('==> saving best model at epoch %d' % epoch))
best_model_pytorch = os.path.join(args.save_dir, 'best_model.pth')
torch.save(net.state_dict(), best_model_pytorch)
scheduler.step(error_avg['MAE'], epoch) |
def main(source_root):
dest_root = '/media/pc/6T/jasonjzhao/data/MS-Celeb-1M_Resized'
mkdir(dest_root)
cwd = os.getcwd()
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
if (not os.path.isdir(dest_root)):
os.mkdir(dest_root)
for subfolder in tqdm(os.listdir(source_root)):
if (not os.path.isdir(os.path.join(dest_root, subfolder))):
os.mkdir(os.path.join(dest_root, subfolder))
for image_name in os.listdir(os.path.join(source_root, subfolder)):
print('Processing\t{}'.format(os.path.join(source_root, subfolder, image_name)))
img = cv2.imread(os.path.join(source_root, subfolder, image_name))
if (type(img) == type(None)):
print(('damaged image %s, del it' % img))
os.remove(img)
continue
size = img.shape
(h, w) = (size[0], size[1])
if (max(w, h) > 512):
img_pad = process_image(img)
else:
img_pad = img
cv2.imwrite(os.path.join(dest_root, subfolder, (image_name.split('.')[0] + '.jpg')), img_pad) |
def _pad_kv_cache_view(t: torch.Tensor, len: int, device: torch.device, pos: int=2) -> torch.Tensor:
cur_size = list(t.size())
if (cur_size[pos] < len):
zeros = get_zero_tensor(len, cur_size, device, pos)
padded_view = torch.cat((zeros, t), dim=pos)
return padded_view
elif (cur_size[pos] > len):
padded_view = t.narrow(pos, (cur_size[pos] - len), len)
return padded_view
else:
return t |
class PyramidPooling(nn.Module):
def __init__(self, in_channels, upscale_out_size):
super(PyramidPooling, self).__init__()
pool_out_sizes = [1, 2, 3, 6]
assert (len(pool_out_sizes) == 4)
assert ((in_channels % 4) == 0)
mid_channels = (in_channels // 4)
self.branches = Concurrent()
self.branches.add_module('branch1', Identity())
for (i, pool_out_size) in enumerate(pool_out_sizes):
self.branches.add_module('branch{}'.format((i + 2)), PyramidPoolingBranch(in_channels=in_channels, out_channels=mid_channels, pool_out_size=pool_out_size, upscale_out_size=upscale_out_size))
def forward(self, x):
x = self.branches(x)
return x |
class Logger():
def __init__(self, log_dir, n_logged_samples=10, summary_writer=SummaryWriter):
self._log_dir = log_dir
print('')
print('logging outputs to ', log_dir)
print('')
self._n_logged_samples = n_logged_samples
self._summ_writer = summary_writer(log_dir, flush_secs=1, max_queue=1)
def log_scalar(self, scalar, name, step_):
self._summ_writer.add_scalar('{}'.format(name), scalar, step_)
def log_scalars(self, scalar_dict, group_name, step, phase):
self._summ_writer.add_scalars('{}_{}'.format(group_name, phase), scalar_dict, step)
def log_image(self, image, name, step):
assert (len(image.shape) == 3)
self._summ_writer.add_image('{}'.format(name), image, step)
def log_video(self, video_frames, name, step, fps=10):
assert (len(video_frames.shape) == 5), 'Need [N, T, C, H, W] input tensor for video logging!'
self._summ_writer.add_video('{}'.format(name), video_frames, step, fps=fps)
def log_paths_as_videos(self, paths, step, max_videos_to_save=2, fps=10, video_title='video'):
videos = [np.transpose(p['image_obs'], [0, 3, 1, 2]) for p in paths]
max_videos_to_save = np.min([max_videos_to_save, len(videos)])
max_length = videos[0].shape[0]
for i in range(max_videos_to_save):
if (videos[i].shape[0] > max_length):
max_length = videos[i].shape[0]
for i in range(max_videos_to_save):
if (videos[i].shape[0] < max_length):
padding = np.tile([videos[i][(- 1)]], ((max_length - videos[i].shape[0]), 1, 1, 1))
videos[i] = np.concatenate([videos[i], padding], 0)
videos = np.stack(videos[:max_videos_to_save], 0)
self.log_video(videos, video_title, step, fps=fps)
def log_figures(self, figure, name, step, phase):
assert (figure.shape[0] > 0), 'Figure logging requires input shape [batch x figures]!'
self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step)
def log_figure(self, figure, name, step, phase):
self._summ_writer.add_figure('{}_{}'.format(name, phase), figure, step)
def log_graph(self, array, name, step, phase):
im = plot_graph(array)
self._summ_writer.add_image('{}_{}'.format(name, phase), im, step)
def dump_scalars(self, log_path=None):
log_path = (os.path.join(self._log_dir, 'scalar_data.json') if (log_path is None) else log_path)
self._summ_writer.export_scalars_to_json(log_path)
def flush(self):
self._summ_writer.flush() |
_metaclass(ABCMeta)
class Discretizer(object):
def get_nr_bin(self):
pass
def get_bin(self, v):
pass |
def get_reporting_integration_callbacks(report_to):
for integration in report_to:
if (integration not in INTEGRATION_TO_CALLBACK):
raise ValueError(f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported.")
return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to] |
def sampling(blm, w_mu=None, N=1, alpha=1.0):
if (w_mu is None):
w_mu = get_post_params_mean(blm)
if (N == 1):
z = (np.random.randn(blm.nbasis) * alpha)
else:
z = (np.random.randn(blm.nbasis, N) * alpha)
U = blm.stats[0]
invUz = scipy.linalg.solve_triangular(U, z, lower=False, overwrite_b=False, check_finite=False)
return (invUz.transpose() + w_mu).transpose() |
class BaseVideoDataset(torch.utils.data.Dataset):
def __init__(self, name, root, image_loader=jpeg4py_loader_w_failsafe):
self.name = name
self.root = root
self.image_loader = image_loader
self.sequence_list = []
self.class_list = []
def __len__(self):
return self.get_num_sequences()
def __getitem__(self, index):
return None
def is_video_sequence(self):
return True
def is_synthetic_video_dataset(self):
return False
def get_name(self):
raise NotImplementedError
def get_num_sequences(self):
return len(self.sequence_list)
def has_class_info(self):
return False
def has_occlusion_info(self):
return False
def get_num_classes(self):
return len(self.class_list)
def get_class_list(self):
return self.class_list
def get_sequences_in_class(self, class_name):
raise NotImplementedError
def has_segmentation_info(self):
return False
def get_sequence_info(self, seq_id):
raise NotImplementedError
def get_frames(self, seq_id, frame_ids, anno=None):
raise NotImplementedError |
def find_and_remove_errors(mode, out_root, ref_bin_xy, ref_data, s):
true_ref_xy = np.array([[e, n] for (e, n) in zip(ref_data['easting'], ref_data['northing'])])
binned_ref_xy = np.array([ref_bin_xy[math.floor(l)] for l in ref_data['l']])
ref_errors = np.linalg.norm((true_ref_xy - binned_ref_xy), axis=1)
ref_hist_path = os.path.join(out_root, '{}_{}_bin_errors.png'.format(s, mode))
if (not os.path.exists(ref_hist_path)):
plt.clf()
plt.hist(ref_errors, bins=1000, histtype='step')
plt.savefig(ref_hist_path)
for key in ref_data.keys():
ref_data[key] = [el for (el, er) in zip(ref_data[key], ref_errors) if (er < 5.0)]
save_csv(ref_data, os.path.join(out_root, '{}_{}.csv'.format(s, mode)))
stats = dict()
stats['raw_mean_error'] = np.mean(ref_errors)
stats['raw_median_error'] = np.median(ref_errors)
stats['raw_max_error'] = np.max(ref_errors)
stats['raw_min_error'] = np.min(ref_errors)
stats['raw_error_std'] = np.std(ref_errors)
clean_errors = [er for er in ref_errors if (er < 5.0)]
stats['clean_mean_error'] = np.mean(clean_errors)
stats['clean_median_error'] = np.median(clean_errors)
stats['clean_max_error'] = np.max(clean_errors)
stats['clean_min_error'] = np.min(clean_errors)
stats['clean_error_std'] = np.std(clean_errors)
save_csv(stats, os.path.join(out_root, '{}_{}_errors.csv'.format(s, mode)))
return (len(ref_data['t']), ref_data) |
class BrokenRecordableEnv(object):
metadata = {'render.modes': [None, 'rgb_array']}
def render(self, mode=None):
pass |
def main(config):
cudnn.benchmark = True
if (not os.path.exists(config.log_dir)):
os.makedirs(config.log_dir)
if (not os.path.exists(config.model_save_dir)):
os.makedirs(config.model_save_dir)
if (not os.path.exists(config.sample_dir)):
os.makedirs(config.sample_dir)
vcc_loader = get_loader(hparams)
solver = Solver(vcc_loader, config, hparams)
solver.train() |
def _dreg(model, x, K):
(_, px_z, zs) = model(x, K)
lpz = model.pz(*model.pz_params).log_prob(zs).sum((- 1))
lpx_z = (px_z.log_prob(x).view(*px_z.batch_shape[:2], (- 1)) * model.llik_scaling)
qz_x = model.qz_x(*[p.detach() for p in model.qz_x_params])
lqz_x = qz_x.log_prob(zs).sum((- 1))
lw = ((lpz + lpx_z.sum((- 1))) - lqz_x)
return (lw, zs) |
_module()
class Loader():
def __init__(self, ann_file, parser, repeat=1):
assert isinstance(ann_file, str)
assert isinstance(repeat, int)
assert isinstance(parser, dict)
assert (repeat > 0)
assert osp.exists(ann_file), f'{ann_file} is not exist'
self.ori_data_infos = self._load(ann_file)
self.parser = build_parser(parser)
self.repeat = repeat
def __len__(self):
return (len(self.ori_data_infos) * self.repeat)
def _load(self, ann_file):
raise NotImplementedError
def __getitem__(self, index):
return self.parser.get_item(self.ori_data_infos, index)
def __iter__(self):
self._n = 0
return self
def __next__(self):
if (self._n < len(self)):
data = self[self._n]
self._n += 1
return data
raise StopIteration |
def calculate_fid_given_paths(paths, inception_path, low_profile=False):
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
(m1, s1) = _handle_path(paths[0], sess, low_profile=low_profile)
(m2, s2) = _handle_path(paths[1], sess, low_profile=low_profile)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
sess.close()
return fid_value |
class custom_build_ext(build_ext):
def build_extension(self, ext):
if isinstance(ext, TensorflowExtension):
ext.compile()
filename = self.get_ext_filename(ext.name)
if (not self.dry_run):
os.makedirs(path.join(self.build_lib, path.dirname(filename)), exist_ok=True)
copy_file(filename, path.join(self.build_lib, filename), verbose=self.verbose, dry_run=self.dry_run)
else:
super(custom_build_ext, self).build_extension(ext)
def get_ext_filename(self, fullname):
return (path.sep.join(fullname.split('.')) + '.so') |
def _cifar100_to_cifar20(target):
_dict = {0: 4, 1: 1, 2: 14, 3: 8, 4: 0, 5: 6, 6: 7, 7: 7, 8: 18, 9: 3, 10: 3, 11: 14, 12: 9, 13: 18, 14: 7, 15: 11, 16: 3, 17: 9, 18: 7, 19: 11, 20: 6, 21: 11, 22: 5, 23: 10, 24: 7, 25: 6, 26: 13, 27: 15, 28: 3, 29: 15, 30: 0, 31: 11, 32: 1, 33: 10, 34: 12, 35: 14, 36: 16, 37: 9, 38: 11, 39: 5, 40: 5, 41: 19, 42: 8, 43: 8, 44: 15, 45: 13, 46: 14, 47: 17, 48: 18, 49: 10, 50: 16, 51: 4, 52: 17, 53: 4, 54: 2, 55: 0, 56: 17, 57: 4, 58: 18, 59: 17, 60: 10, 61: 3, 62: 2, 63: 12, 64: 12, 65: 16, 66: 12, 67: 1, 68: 9, 69: 19, 70: 2, 71: 10, 72: 0, 73: 1, 74: 16, 75: 12, 76: 9, 77: 13, 78: 15, 79: 13, 80: 16, 81: 19, 82: 2, 83: 4, 84: 6, 85: 19, 86: 5, 87: 5, 88: 8, 89: 19, 90: 18, 91: 1, 92: 2, 93: 15, 94: 6, 95: 0, 96: 17, 97: 8, 98: 14, 99: 13}
return _dict[target] |
def validation(data_iter, net):
net.eval()
(losses, batch_num, acc, acc_num) = (0, 0, 0, 0)
criterion = nn.BCELoss()
for (batch_idx, batch) in enumerate(data_iter):
(qbatch, rbatch, label) = batch
qbatch = torch.from_numpy(qbatch)
rbatch = torch.from_numpy(rbatch)
label = torch.from_numpy(label).float()
batch_size = qbatch.shape[0]
if torch.cuda.is_available():
(qbatch, rbatch) = (qbatch.cuda(), rbatch.cuda())
label = label.cuda()
scores = net(qbatch, rbatch)
loss = criterion(scores, label)
s = (scores >= 0.5)
acc += torch.sum((s.float() == label)).item()
acc_num += batch_size
batch_num += 1
losses += loss.item()
return (round((losses / batch_num), 4), round((acc / acc_num), 4)) |
def gen_downsample(inchannel, outchannel, layer_num):
if (layer_num == 1):
(yield nn.Conv2d(inchannel, outchannel, 1, stride=1, bias=False))
else:
(yield nn.Conv2d(inchannel, outchannel, 1, stride=2, bias=False))
(yield nn.BatchNorm2d(outchannel)) |
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
(qf, q_pids, q_cloth_ids, q_camids) = ([], [], [], [])
for (batch_idx, (imgs, pids, cloth_ids, camids, _)) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update((time.time() - end))
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_cloth_ids.extend(cloth_ids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_cloth_ids = np.asarray(q_cloth_ids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
(gf, g_pids, g_cloth_ids, g_camids) = ([], [], [], [])
for (batch_idx, (imgs, pids, cloth_ids, camids, _)) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update((time.time() - end))
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_cloth_ids.extend(cloth_ids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_cloth_ids = np.asarray(g_cloth_ids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('==> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))
(m, n) = (qf.size(0), gf.size(0))
distmat = (torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t())
distmat.addmm_(1, (- 2), qf, gf.t())
distmat = distmat.numpy()
print('Computing CMC and mAP')
(cmc, mAP) = evaluate(distmat, q_pids, g_pids, q_cloth_ids, g_cloth_ids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print('Results ')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[(r - 1)]))
print('')
if return_distmat:
return distmat
return cmc[0] |
class LabelEncoderTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self, random_state: Optional[np.random.RandomState]=None):
self.random_state = random_state
def fit(self, X: Optional[PIPELINE_DATA_DTYPE]=None, y: PIPELINE_DATA_DTYPE=None) -> 'LabelEncoderTransformer':
self.preprocessor = LabelEncoder(try_to_fit_numeric=False)
self.preprocessor.fit(y)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'LabelEncoderTransformer', 'name': 'LabelEncoder Transformer', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
return ConfigurationSpace() |
def gradient_update(scores, grads):
m = len(grads)
tmp = torch.zeros_like(grads[0])
for m_i in range(m):
tmp += (scores[m_i] * grads[m_i])
tmp /= m
return tmp |
def parse_args():
parser = argparse.ArgumentParser('Get Test Result of VQA Network')
parser.add_argument('--cfg', type=str, help='path to answer net config yaml')
parser.add_argument('--ckpt', type=str, help='path to checkpoint of answer net')
parser.add_argument('--bs', type=int)
parser.add_argument('--gpus', type=int, nargs='+')
parser.add_argument('--model-dir', type=str, help='root path to store checkpoint')
parser.add_argument('--result-path', type=str, help='path to store test result file.')
parser.add_argument('--result-name', type=str)
parser.add_argument('--split', default='test2015')
args = parser.parse_args()
if (args.cfg is not None):
update_config(args.cfg)
if (args.bs is not None):
config.TEST.BATCH_IMAGES = args.bs
if (args.gpus is not None):
config.GPUS = ','.join([str(gpu) for gpu in args.gpus])
if (args.split is not None):
config.DATASET.TEST_IMAGE_SET = args.split
if (args.model_dir is not None):
config.OUTPUT_PATH = os.path.join(args.model_dir, config.OUTPUT_PATH)
return (args, config) |
def export_fbx(pkl_path):
input = pkl_path
output = pkl_path.replace('.pkl', '.fbx')
execute_python = '/apdcephfs/share_1227775/shingxchen/libs/blender_bpy/blender-2.93.2-linux-x64/blender'
export_scripts = './scripts/fbx_output.py'
os.system(f'{execute_python} -noaudio --background --python {export_scripts} --input {input} --output {output}') |
class GolbalContextBlock(tf.keras.layers.Layer):
def __init__(self, inplanes, ratio, headers, pooling_type='att', att_scale=False, fusion_type='channel_add', **kwargs):
super().__init__(name='GCB', **kwargs)
assert (pooling_type in ['att', 'avg'])
assert (fusion_type in ['channel_add', 'channel_concat', 'channel_mul'])
assert (((inplanes % headers) == 0) and (inplanes >= 8))
self.headers = headers
self.inplanes = inplanes
self.ratio = ratio
self.planes = int((inplanes * ratio))
self.pooling_type = pooling_type
self.fusion_type = fusion_type
self.att_scale = att_scale
self.single_header_inplanes = int((inplanes / headers))
if (self.pooling_type == 'att'):
self.conv_mask = tf.keras.layers.Conv2D(1, kernel_size=1, kernel_initializer=tf.initializers.he_normal())
else:
self.avg_pool = tf.keras.layers.AveragePooling2D(pool_size=1)
if (self.fusion_type == 'channel_add'):
self.channel_add_conv = tf.keras.Sequential([tf.keras.layers.Conv2D(self.planes, kernel_size=1, kernel_initializer=tf.initializers.he_normal()), tf.keras.layers.LayerNormalization([1, 2, 3]), tf.keras.layers.ReLU(), tf.keras.layers.Conv2D(self.inplanes, kernel_size=1, kernel_initializer=tf.initializers.he_normal())], name='channel_add_conv')
elif (self.fusion_type == 'channel_concat'):
self.channel_concat_conv = tf.keras.Sequential([tf.keras.layers.Conv2D(self.planes, kernel_size=1, kernel_initializer=tf.initializers.he_normal()), tf.keras.layers.LayerNormalization([1, 2, 3]), tf.keras.layers.ReLU(), tf.keras.layers.Conv2D(self.inplanes, kernel_size=1, kernel_initializer=tf.initializers.he_normal())], name='channel_concat_conv')
self.cat_conv = tf.keras.layers.Conv2D(self.inplanes, kernel_size=1, kernel_initializer=tf.initializers.he_normal())
self.layer_norm = tf.keras.layers.LayerNormalization(axis=[1, 2, 3])
else:
self.channel_mul_conv = tf.keras.Sequential([tf.keras.layers.Conv2D(self.planes, kernel_size=1, kernel_initializer=tf.initializers.he_normal()), tf.keras.layers.LayerNormalization([1, 2, 3]), tf.keras.layers.ReLU(), tf.keras.layers.Conv2D(self.inplanes, kernel_size=1, kernel_initializer=tf.initializers.he_normal())], name='channel_mul_conv')
def spatial_pool(self, inputs: tf.Tensor):
B = tf.shape(inputs)[0]
H = tf.shape(inputs)[1]
W = tf.shape(inputs)[2]
C = tf.shape(inputs)[3]
if (self.pooling_type == 'att'):
x = tf.reshape(inputs, shape=(B, H, W, self.headers, self.single_header_inplanes))
x = tf.transpose(x, perm=(0, 3, 1, 2, 4))
x = tf.reshape(x, shape=((B * self.headers), H, W, self.single_header_inplanes))
input_x = x
input_x = tf.reshape(input_x, shape=((B * self.headers), 1, (H * W), self.single_header_inplanes))
input_x = tf.transpose(input_x, perm=[0, 1, 3, 2])
context_mask = self.conv_mask(x)
context_mask = tf.reshape(context_mask, shape=((B * self.headers), 1, (H * W), 1))
if (self.att_scale and (self.headers > 1)):
context_mask = (context_mask / tf.sqrt(self.single_header_inplanes))
context_mask = tf.keras.activations.softmax(context_mask, axis=2)
context = tf.matmul(input_x, context_mask)
context = tf.reshape(context, shape=(B, 1, C, 1))
context = tf.transpose(context, perm=(0, 1, 3, 2))
else:
context = self.avg_pool(inputs)
return context
def call(self, inputs, **kwargs):
context = self.spatial_pool(inputs)
out = inputs
if (self.fusion_type == 'channel_mul'):
channel_mul_term = tf.sigmoid(self.channel_mul_conv(context))
out = (channel_mul_term * out)
elif (self.fusion_type == 'channel_add'):
channel_add_term = self.channel_add_conv(context)
out = (out + channel_add_term)
else:
channel_concat_term = self.channel_concat_conv(context)
B = tf.shape(out)[0]
H = tf.shape(out)[1]
W = tf.shape(out)[2]
C = tf.shape(out)[3]
out = tf.concat([out, tf.broadcast_to(channel_concat_term, shape=(B, H, W, C))], axis=(- 1))
out = self.cat_conv(out)
out = self.layer_norm(out)
out = tf.keras.activations.relu(out)
return out |
class BloomForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def parse_args():
parser = argparse.ArgumentParser(description='Pretrain llama2 with atorch fsdp.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=False)
parser.add_argument('--dataset_path', type=str, default=None, help='A dir containing dataset with .arrow format.')
parser.add_argument('--block_size', type=int, default=None, help='Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training. Default to the model max input length for single sentence inputs (take into account special tokens).')
parser.add_argument('--max_steps', type=int, default=100, help='Max steps for training.')
parser.add_argument('--init_emtpy_offload', action='store_true', help='If passed, use init_empty_weights_with_disk_offload.')
parser.add_argument('--precision', type=str, choices=['fp32', 'bf16_amp', 'fp16_amp', 'bf16'], default='bf16_amp')
parser.add_argument('--per_device_train_batch_size', type=int, default=0, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--peft_type', type=str, default=None, help='Whether use peft and use what type of peft.')
parser.add_argument('--lora_r', type=int, default=8, help='Lora attention dimension.')
parser.add_argument('--lora_alpha', type=int, default=16, help='The alpha parameter for Lora scaling.')
parser.add_argument('--lora_dropout', type=float, default=0.05, help='The dropout probability for Lora layers.')
parser.add_argument('--lora_target_modules', nargs='*', default=['q_proj', 'v_proj'], help='The names of the modules to apply Lora to.')
parser.add_argument('--peft_task_type', type=str, default=TaskType.CAUSAL_LM, choices=[TaskType.SEQ_CLS, TaskType.SEQ_2_SEQ_LM, TaskType.CAUSAL_LM, TaskType.TOKEN_CLS], help='Peft task type.')
parser.add_argument('--gradient_checkpointing', action='store_true', help='Use gradient checkpointing or not.')
args = parser.parse_args()
return args |
class UniGCN(nn.Module):
def __init__(self, in_channels: int, hid_channels: int, num_classes: int, use_bn: bool=False, drop_rate: float=0.5) -> None:
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(UniGCNConv(in_channels, hid_channels, use_bn=use_bn, drop_rate=drop_rate))
self.layers.append(UniGCNConv(hid_channels, num_classes, use_bn=use_bn, is_last=True))
def forward(self, X: torch.Tensor, hg: 'dhg.Hypergraph') -> torch.Tensor:
for layer in self.layers:
X = layer(X, hg)
return X |
class Dataset(ABC):
def __init__(self, name):
self.name = name
self.output_file = None
self.max_chunks = None
def from_default_config(cls, name):
config = json.loads(importlib.resources.read_text(mapping, 'default_{name}.json'.format(name=name)))
return cls(name, **config)
def from_config_file(cls, name, config_file):
with open(config_file, 'r') as cf:
config = json.load(cf)
return cls(name, **config)
def from_config_string(cls, name, config_string):
config = json.loads(config_string)
return cls(name, **config)
def get_chunks(self, num_chunks):
pass
def process_chunk(self, chunk, ks, chunk_id):
pass
def postprocess_metadata(self, metadata):
pass |
def get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params, border_val_seg=(- 1), pin_memory=True, seeds_train=None, seeds_val=None, regions=None):
assert (params.get('mirror') is None), 'old version of params, use new keyword do_mirror'
tr_transforms = []
if (params.get('selected_data_channels') is not None):
tr_transforms.append(DataChannelSelectionTransform(params.get('selected_data_channels')))
if (params.get('selected_seg_channels') is not None):
tr_transforms.append(SegChannelSelectionTransform(params.get('selected_seg_channels')))
if ((params.get('dummy_2D') is not None) and params.get('dummy_2D')):
tr_transforms.append(Convert3DTo2DTransform())
tr_transforms.append(SpatialTransform(patch_size, patch_center_dist_from_border=None, do_elastic_deform=params.get('do_elastic'), alpha=params.get('elastic_deform_alpha'), sigma=params.get('elastic_deform_sigma'), do_rotation=params.get('do_rotation'), angle_x=params.get('rotation_x'), angle_y=params.get('rotation_y'), angle_z=params.get('rotation_z'), do_scale=params.get('do_scaling'), scale=params.get('scale_range'), border_mode_data=params.get('border_mode_data'), border_cval_data=0, order_data=3, border_mode_seg='constant', border_cval_seg=border_val_seg, order_seg=1, random_crop=params.get('random_crop'), p_el_per_sample=params.get('p_eldef'), p_scale_per_sample=params.get('p_scale'), p_rot_per_sample=params.get('p_rot'), independent_scale_for_each_axis=params.get('independent_scale_factor_for_each_axis')))
if ((params.get('dummy_2D') is not None) and params.get('dummy_2D')):
tr_transforms.append(Convert2DTo3DTransform())
if params.get('do_gamma'):
tr_transforms.append(GammaTransform(params.get('gamma_range'), False, True, retain_stats=params.get('gamma_retain_stats'), p_per_sample=params['p_gamma']))
if params.get('do_mirror'):
tr_transforms.append(MirrorTransform(params.get('mirror_axes')))
if (params.get('mask_was_used_for_normalization') is not None):
mask_was_used_for_normalization = params.get('mask_was_used_for_normalization')
tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))
tr_transforms.append(RemoveLabelTransform((- 1), 0))
if ((params.get('move_last_seg_chanel_to_data') is not None) and params.get('move_last_seg_chanel_to_data')):
tr_transforms.append(MoveSegAsOneHotToData(1, params.get('all_segmentation_labels'), 'seg', 'data'))
if (params.get('cascade_do_cascade_augmentations') and (not None) and params.get('cascade_do_cascade_augmentations')):
tr_transforms.append(ApplyRandomBinaryOperatorTransform(channel_idx=list(range((- len(params.get('all_segmentation_labels'))), 0)), p_per_sample=params.get('cascade_random_binary_transform_p'), key='data', strel_size=params.get('cascade_random_binary_transform_size')))
tr_transforms.append(RemoveRandomConnectedComponentFromOneHotEncodingTransform(channel_idx=list(range((- len(params.get('all_segmentation_labels'))), 0)), key='data', p_per_sample=params.get('cascade_remove_conn_comp_p'), fill_with_other_class_p=params.get('cascade_remove_conn_comp_max_size_percent_threshold'), dont_do_if_covers_more_than_X_percent=params.get('cascade_remove_conn_comp_fill_with_other_class_p')))
tr_transforms.append(RenameTransform('seg', 'target', True))
if (regions is not None):
tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
tr_transforms = Compose(tr_transforms)
batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'), params.get('num_cached_per_thread'), seeds=seeds_train, pin_memory=pin_memory)
val_transforms = []
val_transforms.append(RemoveLabelTransform((- 1), 0))
if (params.get('selected_data_channels') is not None):
val_transforms.append(DataChannelSelectionTransform(params.get('selected_data_channels')))
if (params.get('selected_seg_channels') is not None):
val_transforms.append(SegChannelSelectionTransform(params.get('selected_seg_channels')))
if ((params.get('move_last_seg_chanel_to_data') is not None) and params.get('move_last_seg_chanel_to_data')):
val_transforms.append(MoveSegAsOneHotToData(1, params.get('all_segmentation_labels'), 'seg', 'data'))
val_transforms.append(RenameTransform('seg', 'target', True))
if (regions is not None):
val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))
val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))
val_transforms = Compose(val_transforms)
batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms, max((params.get('num_threads') // 2), 1), params.get('num_cached_per_thread'), seeds=seeds_val, pin_memory=pin_memory)
return (batchgenerator_train, batchgenerator_val) |
def test_perfect_dice_score():
dice_score = metrics.dice(tp=75, fp=0, fn=0)
assert (dice_score == 1) |
def list_dtypes():
return [o3c.float32, o3c.float64, o3c.int8, o3c.int16, o3c.int32, o3c.int64, o3c.uint8, o3c.uint16, o3c.uint32, o3c.uint64, o3c.bool] |
class Resnet18(nn.Module):
def __init__(self, path):
super(Resnet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
self.init_weight(path)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x)
feat16 = self.layer3(feat8)
feat32 = self.layer4(feat16)
return (feat8, feat16, feat32)
def init_weight(self, path):
state_dict = torch.load(path)
self_state_dict = self.state_dict()
for (k, v) in state_dict.items():
if ('fc' in k):
continue
self_state_dict.update({k: v})
self.load_state_dict(self_state_dict)
def get_params(self):
(wd_params, nowd_params) = ([], [])
for (name, module) in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if (not (module.bias is None)):
nowd_params.append(module.bias)
elif isinstance(module, nn.modules.batchnorm._BatchNorm):
nowd_params += list(module.parameters())
return (wd_params, nowd_params) |
class PytorchGELUTanh(nn.Module):
def __init__(self):
super().__init__()
if (version.parse(torch.__version__) < version.parse('1.12.0')):
raise ImportError(f'You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use PytorchGELUTanh. Please upgrade torch.')
def forward(self, input: Tensor) -> Tensor:
return nn.functional.gelu(input, approximate='tanh') |
class RandomVisionLabeledDataset(VisionDataset):
def __init__(self, size: Iterable[int], num_classes: int=10, transform: Optional[Module]=None):
super().__init__('data/', transform=transform)
self.length = size[0]
self.data = torch.randn(size)
self.labels = torch.randint(num_classes, size=(size[0], 1))
def __getitem__(self, index):
data = self.data[index]
if (self.transform is not None):
data = self.transform(data)
return {'input': data, 'label': self.labels[index]}
def __len__(self):
return self.length |
class ResNetBackboneGN(ResNetBackbone):
def __init__(self, layers, num_groups=32, in_channels=3):
super().__init__(layers, norm_layer=(lambda x: nn.GroupNorm(num_groups, x)), in_channels=in_channels)
def init_backbone(self, path):
with open(path, 'rb') as f:
state_dict = pickle.load(f, encoding='latin1')
state_dict = state_dict['blobs']
our_state_dict_keys = list(self.state_dict().keys())
new_state_dict = {}
gn_trans = (lambda x: ('gn_s' if (x == 'weight') else 'gn_b'))
layeridx2res = (lambda x: ('res' + str((int(x) + 2))))
block2branch = (lambda x: ('branch2' + ('a', 'b', 'c')[(int(x[(- 1):]) - 1)]))
for key in our_state_dict_keys:
parts = key.split('.')
transcribed_key = ''
if (parts[0] == 'conv1'):
transcribed_key = 'conv1_w'
elif (parts[0] == 'bn1'):
transcribed_key = ('conv1_' + gn_trans(parts[1]))
elif (parts[0] == 'layers'):
if (int(parts[1]) >= self.num_base_layers):
continue
transcribed_key = layeridx2res(parts[1])
transcribed_key += (('_' + parts[2]) + '_')
if (parts[3] == 'downsample'):
transcribed_key += 'branch1_'
if (parts[4] == '0'):
transcribed_key += 'w'
else:
transcribed_key += gn_trans(parts[5])
else:
transcribed_key += (block2branch(parts[3]) + '_')
if ('conv' in parts[3]):
transcribed_key += 'w'
else:
transcribed_key += gn_trans(parts[4])
new_state_dict[key] = torch.Tensor(state_dict[transcribed_key])
self.load_state_dict(new_state_dict, strict=False) |
class Memory():
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:] |
class UNet3D_CCT(nn.Module):
def __init__(self, in_channels=1, out_channels=3, init_features=64):
super(UNet3D_CCT, self).__init__()
features = init_features
self.encoder1 = UNet3D_CCT._block(in_channels, features, name='enc1')
self.pool1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.encoder2 = UNet3D_CCT._block(features, (features * 2), name='enc2')
self.pool2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.encoder3 = UNet3D_CCT._block((features * 2), (features * 4), name='enc3')
self.pool3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.encoder4 = UNet3D_CCT._block((features * 4), (features * 8), name='enc4')
self.pool4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.bottleneck = UNet3D_CCT._block((features * 8), (features * 16), name='bottleneck')
self.main_decoder = Decoder(features, out_channels)
self.aux_decoder1 = Decoder(features, out_channels)
self.aux_decoder2 = Decoder(features, out_channels)
self.aux_decoder3 = Decoder(features, out_channels)
def forward(self, x):
enc1 = self.encoder1(x)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
main_seg = self.main_decoder(bottleneck, enc4, enc3, enc2, enc1)
aux_seg1 = self.main_decoder(FeatureNoise()(bottleneck), FeatureNoise()(enc4), FeatureNoise()(enc3), FeatureNoise()(enc2), FeatureNoise()(enc1))
aux_seg2 = self.main_decoder(Dropout(bottleneck), Dropout(enc4), Dropout(enc3), Dropout(enc2), Dropout(enc1))
aux_seg3 = self.main_decoder(FeatureDropout(bottleneck), FeatureDropout(enc4), FeatureDropout(enc3), FeatureDropout(enc2), FeatureDropout(enc1))
return (main_seg, aux_seg1, aux_seg2, aux_seg3)
def _block(in_channels, features, name):
return nn.Sequential(OrderedDict([((name + 'conv1'), nn.Conv3d(in_channels=in_channels, out_channels=features, kernel_size=3, padding=1, bias=True)), ((name + 'norm1'), nn.BatchNorm3d(num_features=features)), ((name + 'relu1'), nn.ReLU(inplace=True)), ((name + 'conv2'), nn.Conv3d(in_channels=features, out_channels=features, kernel_size=3, padding=1, bias=True)), ((name + 'norm2'), nn.BatchNorm3d(num_features=features)), ((name + 'relu2'), nn.ReLU(inplace=True))])) |
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0):
geometry = data['features']['geometry'][(batch_idxs, ...)]
atom_types = data['features']['atom_types'][(batch_idxs, ...)]
atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES)
targets = data['targets'][(batch_idxs, ...)]
geometry = Variable(geometry)
atom_types = Variable(atom_types)
atom_types_one_hot = Variable(atom_types_one_hot)
targets = Variable(targets)
if torch.cuda.is_available():
atom_types_one_hot = atom_types_one_hot.cuda(device_id)
geometry = geometry.cuda(device_id)
atom_types = atom_types.cuda(device_id)
targets = targets.cuda(device_id)
outputs = mlp(atom_types_one_hot)
outputs += s2cnn(geometry, atom_types)
loss = criterion(outputs, targets)
return loss |
def test_solarmach_pfss():
date = '2021-4-1 1:00:00'
body_list = ['Earth', 'STEREO-A']
vsw_list = [400, 400]
sm = SolarMACH(date, body_list, vsw_list, reference_long=100, reference_lat=10)
gong_map = get_gong_map(time=date, filepath=None)
assert (isinstance(gong_map, pfsspy.map.GongSynopticMap) or isinstance(gong_map, sunpy.map.sources.gong.GONGSynopticMap))
pfss_solution = calculate_pfss_solution(gong_map=gong_map, rss=2.5, coord_sys='Carrington')
assert isinstance(pfss_solution, pfsspy.output.Output)
(fig, ax) = sm.plot_pfss(rss=2.5, pfss_solution=pfss_solution, vary=True, return_plot_object=True, markers='numbers', long_sector=[290, 328], long_sector_vsw=[400, 600], long_sector_color='red', reference_vsw=400.0)
assert isinstance(fig, matplotlib.figure.Figure) |
class Cmns(NERBase, MentionDetectionBase):
def __init__(self, base_url, wiki_version, n=5):
self.__n = n
super().__init__(base_url, wiki_version)
def predict(self, sentence, sentences_doc):
self.__ngrams_overlap = []
self.mentions = []
self.rank_ens(sentence)
return self.mentions
def rank_ens(self, sentence):
self.__get_ngrams(sentence)
self.__recursive_rank_ens(self.__n)
def __get_ngrams(self, sentence):
print(sentence)
self.__ngrams = defaultdict(list)
for ngram in self.__gen_ngrams(sentence):
self.__ngrams[len(ngram[0].split())].append(ngram)
def __recursive_rank_ens(self, n):
if (n == 0):
return
for (ngram, pos, end) in self.__ngrams[n]:
if (not self.__is_overlapping(ngram, pos)):
mention = self.preprocess_mention(ngram)
freq = self.wiki_db.wiki(mention, 'wiki', 'freq')
if freq:
self.mentions.append(Span(ngram, pos, end, freq, '#NGRAM#'))
self.__ngrams_overlap.append([ngram, pos])
self.__recursive_rank_ens((n - 1))
def __is_overlapping(self, ngram, pos_prop):
for (exist_ngram, exist_pos) in self.__ngrams_overlap:
if (ngram in exist_ngram):
range_exist = set(range(exist_pos, (exist_pos + len(exist_ngram))))
range_new = set(range(pos_prop, (pos_prop + len(ngram))))
if (len(range_exist.intersection(range_new)) > 0):
return True
return False
def __find_end_pos(self, ngram, sent, start_pos):
splt = ngram.split()
end = start_pos
for s in splt:
end = sent.find(s, end)
end += len(s)
return end
def __find_start_pos(self, query, start):
word_cnt = 0
space_found = True
pos = 0
for char in query:
if char.isspace():
space_found = True
elif space_found:
space_found = False
word_cnt += 1
if (word_cnt == (start + 1)):
break
pos += 1
return pos
def __build_ngram(self, ngram, terms, start, i):
quit = False
for j in range(1, np.min([i, self.__n])):
lookup = terms[(start + j)]
if (not re.match('^[_\\W]+$', lookup)):
ngram += ' {}'.format(lookup)
else:
quit = True
break
return (ngram, quit)
def __gen_ngrams(self, query):
terms = query.split()
ngrams = []
for i in range(1, (len(terms) + 1)):
offset = 0
for start in range(0, ((len(terms) - i) + 1)):
ngram = terms[start]
if re.match('^[_\\W]+$', terms[start]):
continue
(ngram, quit) = self.__build_ngram(ngram, terms, start, i)
if quit:
continue
pos = self.__find_start_pos(query, start)
end = self.__find_end_pos(ngram, query, pos)
ngrams.append([ngram, pos, end])
return ngrams |
class _TFVolume(tf.Module, Registrable):
def __init__(self, log_scale: bool=True, **kwargs: Any) -> None:
super().__init__()
self.log_scale = log_scale
def __call__(self, box_tensor: TFBoxTensor) -> tf.Tensor:
raise NotImplementedError |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = FilterResponseNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = FilterResponseNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), FilterResponseNorm2d((self.expansion * planes)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out |
def missing_whitespace(logical_line):
line = logical_line
for index in range((len(line) - 1)):
char = line[index]
if ((char in ',;:') and (line[(index + 1)] not in WHITESPACE)):
before = line[:index]
if ((char == ':') and (before.count('[') > before.count(']')) and (before.rfind('{') < before.rfind('['))):
continue
if ((char == ',') and (line[(index + 1)] == ')')):
continue
(yield (index, ("E231 missing whitespace after '%s'" % char))) |
(os.environ.get('CIRCLECI'), 'Require COCO data and model zoo.')
class TestCaffe2Export(unittest.TestCase):
def setUp(self):
setup_logger()
def _test_model(self, config_path, device='cpu'):
from detectron2.export import Caffe2Model, add_export_config, export_caffe2_model
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(config_path))
cfg = add_export_config(cfg)
cfg.MODEL.DEVICE = device
inputs = [{'image': self._get_test_image()}]
model = build_model(cfg)
DetectionCheckpointer(model).load(model_zoo.get_checkpoint_url(config_path))
c2_model = export_caffe2_model(cfg, model, copy.deepcopy(inputs))
with tempfile.TemporaryDirectory(prefix='detectron2_unittest') as d:
c2_model.save_protobuf(d)
c2_model.save_graph(os.path.join(d, 'test.svg'), inputs=copy.deepcopy(inputs))
c2_model = Caffe2Model.load_protobuf(d)
c2_model(inputs)[0]['instances']
def _get_test_image(self):
try:
file_name = DatasetCatalog.get('coco_2017_train')[0]['file_name']
assert PathManager.exists(file_name)
except Exception:
self.skipTest('COCO dataset not available.')
with PathManager.open(file_name, 'rb') as f:
buf = f.read()
img = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), cv2.IMREAD_COLOR)
assert (img is not None), file_name
return torch.from_numpy(img.transpose(2, 0, 1))
def testMaskRCNN(self):
self._test_model('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
((not torch.cuda.is_available()), 'CUDA not available')
def testMaskRCNNGPU(self):
self._test_model('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml', device='cuda')
def testRetinaNet(self):
self._test_model('COCO-Detection/retinanet_R_50_FPN_3x.yaml')
def testPanopticFPN(self):
self._test_model('COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml') |
def training_loss_3rd_item_task_fastgcnnew(batch_index, model, sess, train_data, is_training):
train_loss = 0.0
(train_target_item, train_k_shot_user, train_second_order_items, train_third_order_users, train_oracle_item_ebd, train_mask_num_second_order_item, train_mask_num_third_order_user) = train_data
for index in batch_index:
(batch_target_item, batch_kshot_user, batch_2nd_item, batch_3rd_user, batch_oracle_item_ebd, batch_mask_num_2nd_item, batch_mask_num_3rd_user) = gfn.split_batch_item(train_target_item, train_k_shot_user, train_second_order_items, train_third_order_users, train_oracle_item_ebd, train_mask_num_second_order_item, train_mask_num_third_order_user, index)
feed_dict = {model.target_item: batch_oracle_item_ebd, model.support_user_1st_pos_: batch_kshot_user, model.training_phrase_user_task: is_training, model.support_item_2nd_pos_: batch_2nd_item, model.training_phrase_item_task: is_training, model.support_user_3rd_pos: batch_3rd_user}
train_loss += sess.run(model.loss_3rd_item_pos, feed_dict)
return (train_loss / len(batch_index)) |
def _open_url(url):
try:
from webbrowser import open as wbopen
wbopen(url)
except:
pass |
class ImageLogger(Callback):
def __init__(self):
super().__init__()
_zero_only
def log_img(self, pl_module, batch, current_epoch, split='train'):
with torch.no_grad():
(images, labels) = batch
recons = pl_module.stage1(images)
images = images.cpu()
recons = recons.cpu()
grid_org = ((torchvision.utils.make_grid(images, nrow=8) + 1.0) / 2.0)
grid_rec = ((torchvision.utils.make_grid(recons, nrow=8) + 1.0) / 2.0)
grid_rec = torch.clip(grid_rec, min=0, max=1)
pl_module.logger.experiment.add_image(f'images_org/{split}', grid_org, global_step=current_epoch)
pl_module.logger.experiment.add_image(f'images_rec/{split}', grid_rec, global_step=current_epoch)
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((batch_idx == 0) and (trainer.current_epoch < 5)):
self.log_img(pl_module, batch, current_epoch=trainer.current_epoch, split='train')
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
if ((batch_idx == 0) and (trainer.current_epoch < 5)):
self.log_img(pl_module, batch, current_epoch=trainer.current_epoch, split='test') |
def test_clpr_model(model, input_doc):
feature = input_doc._.CLPR_Features
label = input_doc._.CLPR_Labels
feature = np.asarray(feature)
predictions = model.predict(feature)
(acc, prec, rec, f1) = utilities.print_metrics(label, predictions)
return (acc, prec, rec, f1) |
class ResNet(nn.Module):
def __init__(self, block, layers, dataset_history, dataset2num_classes, network_width_multiplier, shared_layer_info, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.network_width_multiplier = network_width_multiplier
self.shared_layer_info = shared_layer_info
self.inplanes = int((64 * network_width_multiplier))
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nl.SharableConv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, (network_width_multiplier * 64), layers[0])
self.layer2 = self._make_layer(block, (network_width_multiplier * 128), layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, (network_width_multiplier * 256), layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, (network_width_multiplier * 512), layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
(self.datasets, self.classifiers) = (dataset_history, nn.ModuleList())
self.dataset2num_classes = dataset2num_classes
if self.datasets:
self._reconstruct_classifiers()
for m in self.modules():
if isinstance(m, nl.SharableConv2d):
nn.init.normal_(m.weight, 0, 0.001)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _reconstruct_classifiers(self):
for (dataset, num_classes) in self.dataset2num_classes.items():
self.classifiers.append(nn.Linear(int((self.shared_layer_info[dataset]['network_width_multiplier'] * 2048)), num_classes))
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
result_planes = int((planes * block.expansion))
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != result_planes)):
downsample = nn.Sequential(conv1x1(self.inplanes, result_planes, stride), norm_layer(result_planes))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = result_planes
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def add_dataset(self, dataset, num_classes):
if (dataset not in self.datasets):
self.datasets.append(dataset)
self.dataset2num_classes[dataset] = num_classes
self.classifiers.append(nn.Linear(int((2048 * self.network_width_multiplier)), num_classes))
nn.init.normal_(self.classifiers[self.datasets.index(dataset)].weight, 0, 0.01)
nn.init.constant_(self.classifiers[self.datasets.index(dataset)].bias, 0)
def set_dataset(self, dataset):
assert (dataset in self.datasets)
self.classifier = self.classifiers[self.datasets.index(dataset)]
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
_sentencepiece
_tokenizers
class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = FNetTokenizer
rust_tokenizer_class = FNetTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
test_sentencepiece_ignore_case = True
test_seq2seq = False
def setUp(self):
super().setUp()
tokenizer = FNetTokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_text = 'this is a test'
output_text = 'this is a test'
return (input_text, output_text)
def test_convert_token_and_id(self):
token = '<pad>'
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[(- 1)], 'eloquent')
self.assertEqual(len(vocab_keys), 30000)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 30000)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = 'I was born in 92000, and this is false.'
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_full_tokenizer(self):
tokenizer = FNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['', 'T', 'his', 'is', 'a', 'test'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [13, 1, 4398, 25, 21, 1289])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, ['', 'I', 'was', 'born', 'in', '9', '2000', ',', 'and', 'this', 'is', 'fal', 's', 'e', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, ['', '<unk>', 'was', 'born', 'in', '9', '2000', ',', 'and', 'this', 'is', 'fal', 's', '<unk>', '.'])
def test_sequence_builders(self):
tokenizer = FNetTokenizer(SAMPLE_VOCAB)
text = tokenizer.encode('sequence builders')
text_2 = tokenizer.encode('multi-sequence build')
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]))
assert (encoded_pair == (((([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]) + text_2) + [tokenizer.sep_token_id]))
def test_special_tokens_initialization(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
added_tokens = [AddedToken('<special>', lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
r_output = tokenizer_r.encode('Hey this is a <special> token')
special_token_id = tokenizer_r.encode('<special>', add_special_tokens=False)[0]
self.assertTrue((special_token_id in r_output))
if self.test_slow_tokenizer:
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
p_output = tokenizer_p.encode('Hey this is a <special> token')
cr_output = tokenizer_r.encode('Hey this is a <special> token')
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue((special_token_id in p_output))
self.assertTrue((special_token_id in cr_output))
def test_special_tokens_initialization_from_slow(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
added_tokens = [AddedToken('<special>', lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True)
special_token_id = tokenizer_r.encode('<special>', add_special_tokens=False)[0]
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
p_output = tokenizer_p.encode('Hey this is a <special> token')
cr_output = tokenizer_r.encode('Hey this is a <special> token')
self.assertEqual(p_output, cr_output)
self.assertTrue((special_token_id in p_output))
self.assertTrue((special_token_id in cr_output))
def test_padding(self, max_length=50):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
input_r = tokenizer_r.encode('This is a simple input', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode('This is a simple input', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode('This is a simple input', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', padding='longest')
input_p = tokenizer_p.encode('This is a simple input', padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', padding=True)
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', padding='longest')
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus('This is a simple input', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a simple input', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', padding='longest')
input_p = tokenizer_p.encode_plus('This is a simple input', padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', padding='longest')
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, pad_to_max_length=True)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='longest')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], padding='longest')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], max_length=max_length, truncation=True, padding='max_length')
input_p = tokenizer_p.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], max_length=max_length, truncation=True, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], padding=True)
input_p = tokenizer_p.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], padding='longest')
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a input 1')
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus('This is a input 1')
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
input_r = tokenizer_r.encode_plus('This is a input 1')
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.encode_plus('This is a input 1')
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def assert_batch_padded_input_match(self, input_r: dict, input_p: dict, max_length: int, pad_token_id: int, model_main_input_name: str='input_ids'):
for i_r in input_r.values():
(self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length))
(self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length))
for (i_r, i_p) in zip(input_r[model_main_input_name], input_p[model_main_input_name]):
self.assert_padded_input_match(i_r, i_p, max_length, pad_token_id)
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[4, 4616, 107, 163, 328, 14, 63, 1726, 106, 11954, 16659, 23, 83, 16688, 11427, 328, 107, 36, 11954, 16659, 23, 83, 16688, 6153, 82, 961, 16688, 3474, 16710, 1696, 2306, 16688, 10854, 2524, 3827, 561, 163, 3474, 16680, 62, 226, 2092, 16680, 379, 3474, 16660, 16680, 2436, 16667, 16671, 16680, 999, 87, 3474, 16680, 2436, 16667, 5208, 800, 16710, 68, 2018, 2959, 3037, 163, 16663, 11617, 16710, 36, 2018, 2959, 4737, 163, 16663, 16667, 16674, 16710, 91, 372, 5087, 16745, 2205, 82, 961, 3608, 38, 1770, 16745, 7984, 36, 2565, 751, 9017, 1204, 864, 218, 1244, 16680, 11954, 16659, 23, 83, 36, 14686, 23, 7619, 16678, 5], [4, 28, 532, 65, 1929, 33, 391, 16688, 3979, 9, 2565, 7849, 299, 225, 34, 2040, 305, 167, 289, 16667, 16078, 32, 1966, 181, 4626, 63, 10575, 71, 851, 1491, 36, 624, 4757, 38, 208, 8038, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], [4, 13, 1467, 5187, 26, 2521, 4567, 16664, 372, 13, 16209, 3314, 16678, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='google/fnet-base', revision='34219a71ca20e280cc6000b89673a169c65d605c') |
def search_replace_conv_linear(model, name_model='', arr=[]):
prev = None
for (i, m) in enumerate(model.children()):
modules_names = [key for key in model._modules.keys()]
layer_name = (((name_model + '.') + modules_names[i]) if (name_model != '') else (name_model + modules_names[i]))
exec_str_m = ("model._modules['%s']" % layer_name)
if (is_Conv(m) or is_Linear(m)):
model = replace_layer(model, m, exec_str_m)
prev = m
return model |
class Annealer():
def __init__(self, initial_value, final_value, n_steps_anneal, start_step=0, default=None, mode='geometric'):
if (n_steps_anneal < 0):
n_steps_anneal *= (- 1)
(initial_value, final_value) = (final_value, initial_value)
self.initial_value = initial_value
self.final_value = final_value
self.n_steps_anneal = n_steps_anneal
self.start_step = start_step
self.default = (default if (default is not None) else self.initial_value)
self.mode = mode.lower()
if (self.mode == 'linear'):
delta = (self.final_value - self.initial_value)
self.factor = (delta / self.n_steps_anneal)
elif (self.mode == 'constant'):
pass
elif (self.mode == 'geometric'):
delta = (self.final_value / self.initial_value)
self.factor = (delta ** (1 / self.n_steps_anneal))
else:
raise ValueError(f'Unkown mode : {mode}.')
self.reset_parameters()
def reset_parameters(self):
self.n_training_calls = 0
def is_annealing(self, n_update_calls):
not_const = (self.mode != 'constant')
is_not_finised = (n_update_calls < (self.n_steps_anneal + self.start_step))
return (not_const and is_not_finised)
def __call__(self, is_update=False, n_update_calls=None):
if is_update:
self.n_training_calls += 1
if (n_update_calls is None):
n_update_calls = self.n_training_calls
if (self.start_step > n_update_calls):
return self.default
n_actual_training_calls = (n_update_calls - self.start_step)
if self.is_annealing(n_update_calls):
current = self.initial_value
if (self.mode == 'geometric'):
current *= (self.factor ** n_actual_training_calls)
elif (self.mode == 'linear'):
current += (self.factor * n_actual_training_calls)
else:
raise ValueError(f'Unkown mode : {self.mode}.')
else:
current = self.final_value
return current |
def attention_layer(x, a, x_mask, a_mask, sim_func, scope='', output_alignment=False):
n = tf.shape(x)[1]
m = tf.shape(a)[1]
dist_matrix = sim_func(x, a)
joint_mask = compute_attention_mask(x_mask, a_mask, n, m)
if (joint_mask is not None):
dist_matrix += (VERY_NEGATIVE_NUMBER * (1 - tf.cast(joint_mask, dist_matrix.dtype)))
probs = tf.nn.softmax(dist_matrix)
attention_vector = tf.matmul(probs, a)
if output_alignment:
return (attention_vector, probs)
else:
return attention_vector |
class VNet(nn.Module):
def __init__(self, non_linearity='elu', in_channels=1, classes=4, init_features_maps=16, kernel_size=5, padding=2):
super(VNet, self).__init__()
self.classes = classes
self.in_channels = in_channels
self.in_tr = InputTransition(in_channels, init_features_maps, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding)
self.down_tr32 = DownTransition(init_features_maps, nConvs=1, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.down_tr64 = DownTransition((init_features_maps * 2), nConvs=2, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.down_tr128 = DownTransition((init_features_maps * 4), nConvs=3, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.down_tr256 = DownTransition((init_features_maps * 8), nConvs=2, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=False, larger=True)
self.up_tr256 = UpTransition((init_features_maps * 16), (init_features_maps * 16), nConvs=2, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.up_tr128 = UpTransition((init_features_maps * 16), (init_features_maps * 8), nConvs=2, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.up_tr64 = UpTransition((init_features_maps * 8), (init_features_maps * 4), nConvs=1, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.up_tr32 = UpTransition((init_features_maps * 4), (init_features_maps * 2), nConvs=1, non_linearity=non_linearity, kernel_size=kernel_size, padding=padding, dropout=True)
self.out_tr = OutputTransition((init_features_maps * 2), classes, non_linearity, kernel_size, padding=padding)
def forward(self, x):
out16 = self.in_tr(x)
out32 = self.down_tr32(out16)
out64 = self.down_tr64(out32)
out128 = self.down_tr128(out64)
out256 = self.down_tr256(out128)
out = self.up_tr256(out256, out128)
out = self.up_tr128(out, out64)
out = self.up_tr64(out, out32)
out = self.up_tr32(out, out16)
out = self.out_tr(out)
return out
def test(self, device='cpu'):
size = 32
input_tensor = torch.rand(1, self.in_channels, size, size, size)
ideal_out = torch.rand(1, self.classes, size, size, size)
out_pred = self.forward(input_tensor)
assert (ideal_out.shape == out_pred.shape)
summary(self.to(torch.device(device)), (self.in_channels, size, size, size), device=device)
print('Vnet test is complete') |
class Synface(Dataset):
def __init__(self, dataset_path, img_size, **kwargs):
super().__init__()
self.data = glob.glob(dataset_path)
assert (len(self.data) > 0), "Can't find data; make sure you specify the path to your dataset"
self.transform = transforms.Compose([transforms.CenterCrop(170), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), transforms.RandomHorizontalFlip(p=0.5), transforms.Resize((img_size, img_size), interpolation=0)])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
X = PIL.Image.open(self.data[index])
X = self.transform(X)
return (X, 0) |
def _test_tinshift_assert(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
inputs = [torch.rand(2, 3, 4, 2), torch.rand(2, 3, 4, 2)]
shifts = [torch.rand(2, 3), torch.rand(2, 5)]
for (x, shift) in zip(inputs, shifts):
x = x.cuda()
shift = shift.cuda()
with pytest.raises(ValueError):
tin_shift(x, shift) |
def test_get(fbdict):
fbdict['a'] = 'b'
assert (fbdict.get('a', 18) == 'b')
assert (fbdict.get('fall1', 18) == 7)
assert (fbdict.get('notexisting', 18) == 18)
assert (fbdict.get('fall3', 18) is True) |
def simplify_padding(padding_shapes):
all_same = True
padding_init = padding_shapes[0]
for pad in padding_shapes[1:]:
if (pad != padding_init):
all_same = False
return (all_same, padding_init) |
def sample_points(N, C, D):
assert (D == 3), 'D must be 3 to sample 3d points'
assert (C == 3), 'C must be 3 to sample 3d points'
p1 = np.array([1, (- 1), 3])
p2 = np.array([2, 3, 4])
p3 = np.array([(- 5), 6, 7])
np.random.seed(1)
x = np.random.uniform(size=(1, N))
np.random.seed(42)
y = np.random.uniform(size=(1, N))
R = np.array([[0., 0., 0], [(- 0.), 0., 0], [0, 0, 1]])
R2 = np.array([[0., 0, 0.], [0, 1, 0], [(- 0.), 0, 0.]])
normal = np.cross((p1 - p2), (p1 - p3))
d = (((p1[0] * normal[0]) + (p1[1] * normal[1])) + (p1[2] * normal[2]))
d = (- d)
z = ((((- d) - (normal[0] * x)) - (normal[1] * y)) / normal[2])
X1 = np.concatenate((x, y, z), axis=0)
X1 = (X1 - np.mean(X1, axis=1, keepdims=True))
normal = np.cross((p1 - p2), (p1 - p3)).dot(R)
d = (((p1[0] * normal[0]) + (p1[1] * normal[1])) + (p1[2] * normal[2]))
d = (- d)
z = ((((- d) - (normal[0] * x)) - (normal[1] * y)) / normal[2])
X2 = np.concatenate((x, y, z), axis=0)
X2 = (X2 - np.mean(X2, axis=1, keepdims=True))
normal = np.cross((p1 - p2), (p1 - p3)).dot(R2)
d = (((p1[0] * normal[0]) + (p1[1] * normal[1])) + (p1[2] * normal[2]))
d = (- d)
z = ((((- d) - (normal[0] * x)) - (normal[1] * y)) / normal[2])
X3 = np.concatenate((x, y, z), axis=0)
X3 = (X3 - np.mean(X3, axis=1, keepdims=True))
return np.concatenate((X1, X2, X3), axis=1).T |
_tf
_retrieval
_sentencepiece
_tokenizers
class TFRagModelIntegrationTests(unittest.TestCase):
_property
def token_model(self):
return TFRagTokenForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn')
_property
def sequence_model(self):
return TFRagSequenceForGeneration.from_pretrained_question_encoder_generator('facebook/dpr-question_encoder-single-nq-base', 'facebook/bart-large-cnn')
def token_model_nq_checkpoint(self, retriever):
return TFRagTokenForGeneration.from_pretrained('facebook/rag-token-nq', retriever=retriever)
def get_rag_config(self):
question_encoder_config = AutoConfig.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
generator_config = AutoConfig.from_pretrained('facebook/bart-large-cnn')
return RagConfig.from_question_encoder_generator_configs(question_encoder_config, generator_config, bos_token_id=0, decoder_start_token_id=2, eos_token_id=2, is_encoder_decoder=True, pad_token_id=1, vocab_size=50264, title_sep=' / ', doc_sep=' // ', n_docs=5, max_combined_length=300, dataset='wiki_dpr', dataset_split='train', index_name='exact', index_path=None, use_dummy_dataset=True, retrieval_vector_size=768, retrieval_batch_size=8)
def test_rag_sequence_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
rag_sequence = self.sequence_model
rag_sequence.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids
output = rag_sequence(input_ids, labels=decoder_input_ids)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.7368])
tf.debugging.assert_near(output.loss, expected_loss, atol=0.001)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=0.001)
def test_rag_token_inference(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids
output = rag_token(input_ids, labels=decoder_input_ids)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.3557])
tf.debugging.assert_near(output.loss, expected_loss, atol=0.001)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=0.001)
def test_rag_token_inference_nq_checkpoint(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
rag_token = self.token_model_nq_checkpoint(retriever=rag_retriever)
with tempfile.TemporaryDirectory() as tmpdirname:
rag_token.save_pretrained(tmpdirname)
rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids
output = rag_token(input_ids, labels=decoder_input_ids)
expected_shape = tf.TensorShape([5, 5, 50265])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[62.9402, 62.7107, 62.2382, 62.1194, 61.8578]])
expected_loss = tf.convert_to_tensor([32.521812])
tf.debugging.assert_near(output.loss, expected_loss, atol=0.001)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=0.001)
def test_rag_token_inference_save_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
rag_token = self.token_model
rag_token.set_retriever(rag_retriever)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids
rag_token(input_ids, labels=decoder_input_ids)
with tempfile.TemporaryDirectory() as tmpdirname:
rag_token.save_pretrained(tmpdirname)
rag_token = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
output = rag_token(input_ids, labels=decoder_input_ids)
expected_shape = tf.TensorShape([5, 5, 50264])
self.assertEqual(output.logits.shape, expected_shape)
expected_doc_scores = tf.convert_to_tensor([[75.0286, 74.4998, 74.0804, 74.0306, 73.9504]])
expected_loss = tf.convert_to_tensor([36.3557])
tf.debugging.assert_near(output.loss, expected_loss, atol=0.001)
tf.debugging.assert_near(output.doc_scores, expected_doc_scores, atol=0.001)
def test_init_and_from_pretrained(self):
rag_config = self.get_rag_config()
rag_decoder_tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
rag_question_encoder_tokenizer = DPRQuestionEncoderTokenizer.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
rag_retriever = RagRetriever(rag_config, question_encoder_tokenizer=rag_question_encoder_tokenizer, generator_tokenizer=rag_decoder_tokenizer)
rag_config = RagConfig.from_pretrained('facebook/rag-sequence-base')
rag = TFRagTokenForGeneration(rag_config, retriever=rag_retriever)
input_ids = rag_question_encoder_tokenizer('who sings does he love me with reba', return_tensors='tf').input_ids
decoder_input_ids = rag_decoder_tokenizer('Linda Davis', return_tensors='tf').input_ids
rag(input_ids, decoder_input_ids=decoder_input_ids)
with tempfile.TemporaryDirectory() as tmpdirname:
rag.save_pretrained(tmpdirname)
rag = TFRagTokenForGeneration.from_pretrained(tmpdirname, retriever=rag_retriever)
def test_data_questions(self):
return ['who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z']
def test_rag_token_greedy_search(self):
tokenizer = RagTokenizer.from_pretrained('facebook/rag-token-nq')
retriever = RagRetriever.from_pretrained('facebook/rag-token-nq', index_name='exact', use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained('facebook/rag-token-nq', retriever=retriever)
input_dict = tokenizer(self.test_data_questions[:2], return_tensors='tf', padding=True, truncation=True)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
rag_token.config.num_beams = 1
output_ids = rag_token.generate(input_ids, attention_mask=attention_mask)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [' albert einstein', ' september 22, 2017']
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
def test_rag_token_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained('facebook/rag-token-nq')
retriever = RagRetriever.from_pretrained('facebook/rag-token-nq', index_name='exact', use_dummy_dataset=True)
rag_token = TFRagTokenForGeneration.from_pretrained('facebook/rag-token-nq', retriever=retriever)
input_dict = tokenizer(self.test_data_questions, return_tensors='tf', padding=True, truncation=True)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
EXPECTED_OUTPUTS = [' albert einstein', ' september 22, 2017', ' amplitude modulation', ' stefan persson', ' april 20, 2018', ' the 1970s', ' 7.1. 2', ' 13']
output_ids = rag_token.generate(input_ids[:4], attention_mask=attention_mask[:4])
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertListEqual(outputs, EXPECTED_OUTPUTS[:4])
output_ids = rag_token.generate(input_ids[4:], attention_mask=attention_mask[4:])
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertListEqual(outputs, EXPECTED_OUTPUTS[4:])
def test_rag_sequence_generate_batch(self):
tokenizer = RagTokenizer.from_pretrained('facebook/rag-sequence-nq')
retriever = RagRetriever.from_pretrained('facebook/rag-sequence-nq', index_name='exact', use_dummy_dataset=True)
rag_sequence = TFRagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
input_dict = tokenizer(self.test_data_questions, return_tensors='tf', padding=True, truncation=True)
input_ids = input_dict.input_ids
attention_mask = input_dict.attention_mask
output_ids = rag_sequence.generate(input_ids, attention_mask=attention_mask)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [' albert einstein', ' june 22, 2018', ' amplitude modulation', ' tim besley ( chairman )', ' june 20, 2018', ' 1980', ' 7.0', ' 8']
self.assertListEqual(outputs, EXPECTED_OUTPUTS)
def test_rag_sequence_generate_batch_from_context_input_ids(self):
tokenizer = RagTokenizer.from_pretrained('facebook/rag-sequence-nq')
retriever = RagRetriever.from_pretrained('facebook/rag-sequence-nq', index_name='exact', use_dummy_dataset=True)
rag_sequence = TFRagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
input_dict = tokenizer(self.test_data_questions, return_tensors='tf', padding=True, truncation=True)
input_ids = input_dict.input_ids
question_hidden_states = rag_sequence.question_encoder(input_ids)[0]
docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors='tf')
doc_scores = tf.squeeze(tf.matmul(tf.expand_dims(question_hidden_states, axis=[1]), docs_dict['retrieved_doc_embeds'], transpose_b=True), axis=[1])
output_ids = rag_sequence.generate(context_input_ids=docs_dict['context_input_ids'], context_attention_mask=docs_dict['context_attention_mask'], doc_scores=doc_scores, do_deduplication=True)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
EXPECTED_OUTPUTS = [' albert einstein', ' june 22, 2018', ' amplitude modulation', ' tim besley ( chairman )', ' june 20, 2018', ' 1980', ' 7.0', ' 8']
self.assertListEqual(outputs, EXPECTED_OUTPUTS) |
def main():
args = parse_args()
assert (args.out or args.show), 'Please specify at least one operation (save or show the results) with the argument "--out" or "--show"'
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.workers == 0):
args.workers = cfg.data.workers_per_gpu
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
if (args.seed is not None):
set_random_seed(args.seed)
if ('all' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif ('benchmark' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']
elif ('noise' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif ('blur' in args.corruptions):
corruptions = ['defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur']
elif ('weather' in args.corruptions):
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif ('digital' in args.corruptions):
corruptions = ['contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']
elif ('holdout' in args.corruptions):
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif ('None' in args.corruptions):
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
(rank, _) = get_dist_info()
aggregated_results = {}
for (corr_i, corruption) in enumerate(corruptions):
aggregated_results[corruption] = {}
for (sev_i, corruption_severity) in enumerate(args.severities):
if ((corr_i > 0) and (corruption_severity == 0)):
aggregated_results[corruption][0] = aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
if (corruption_severity > 0):
corruption_trans = dict(type='Corrupt', corruption=corruption, severity=corruption_severity)
test_data_cfg['pipeline'].insert(1, corruption_trans)
print('\nTesting {} at severity {}'.format(corruption, corruption_severity))
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=args.workers, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if (args.out and (rank == 0)):
eval_results_filename = ((osp.splitext(args.out)[0] + '_results') + osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if (cfg.dataset_type == 'VOCDataset'):
if eval_types:
for eval_type in eval_types:
if (eval_type == 'bbox'):
test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
logger = ('print' if args.summaries else None)
(mean_ap, eval_results) = voc_eval_with_return(args.out, test_dataset, args.iou_thr, logger)
aggregated_results[corruption][corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation is supported for pascal voc')
elif eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if (eval_types == ['proposal_fast']):
result_file = args.out
elif (not isinstance(outputs[0], dict)):
result_files = dataset.results2json(outputs, args.out)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out
(+ '.{}'.format(name))
result_files = dataset.results2json(outputs_, result_file)
eval_results = coco_eval_with_return(result_files, eval_types, dataset.coco)
aggregated_results[corruption][corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;\nUse --eval to select a task')
mmcv.dump(aggregated_results, eval_results_filename)
if (rank == 0):
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if (cfg.dataset_type == 'VOCDataset'):
get_results(eval_results_filename, dataset='voc', prints=prints, aggregate=aggregate)
else:
get_results(eval_results_filename, dataset='coco', prints=prints, aggregate=aggregate) |
class PerturbLayer(nn.Module):
def __init__(self, in_channels=None, out_channels=None, nmasks=None, level=None, filter_size=None, debug=False, use_act=False, stride=1, act=None, unique_masks=False, mix_maps=None, train_masks=False, noise_type='uniform', input_size=None):
super(PerturbLayer, self).__init__()
self.nmasks = nmasks
self.unique_masks = unique_masks
self.train_masks = train_masks
self.level = level
self.filter_size = filter_size
self.use_act = use_act
self.act = act_fn(act)
self.debug = debug
self.noise_type = noise_type
self.in_channels = in_channels
self.input_size = input_size
self.mix_maps = mix_maps
if (filter_size == 1):
padding = 0
bias = True
elif ((filter_size == 3) or (filter_size == 5)):
padding = 1
bias = False
elif (filter_size == 7):
stride = 2
padding = 3
bias = False
if (self.filter_size > 0):
self.noise = None
self.layers = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=filter_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(out_channels), self.act)
else:
noise_channels = (in_channels if self.unique_masks else 1)
shape = (1, noise_channels, self.nmasks, input_size, input_size)
self.noise = nn.Parameter(torch.Tensor(*shape), requires_grad=self.train_masks)
if (noise_type == 'uniform'):
self.noise.data.uniform_((- 1), 1)
elif (self.noise_type == 'normal'):
self.noise.data.normal_()
else:
print('\n\nNoise type {} is not supported / understood\n\n'.format(self.noise_type))
if (nmasks != 1):
if ((out_channels % in_channels) != 0):
print('\n\n\nnfilters must be divisible by 3 if using multiple noise masks per input channel\n\n\n')
groups = in_channels
else:
groups = 1
self.layers = nn.Sequential(nn.Conv2d((in_channels * self.nmasks), out_channels, kernel_size=1, stride=1, groups=groups), nn.BatchNorm2d(out_channels), self.act)
if self.mix_maps:
self.mix_layers = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1, stride=1, groups=1), nn.BatchNorm2d(out_channels), self.act)
def forward(self, x):
if (self.filter_size > 0):
return self.layers(x)
else:
y = torch.add(x.unsqueeze(2), (self.noise * self.level))
if self.debug:
print_values(x, self.noise, y, self.unique_masks)
if self.use_act:
y = self.act(y)
y = y.view((- 1), (self.in_channels * self.nmasks), self.input_size, self.input_size)
y = self.layers(y)
if self.mix_maps:
y = self.mix_layers(y)
return y |
class TransformerModel(nn.Module):
def __init__(self, seq_len: int, d_model: int, nhead: int, d_hid: int, nlayers: int, dropout: float=0.5, out_dim=91, num_labels=15):
super().__init__()
self.model_type = 'Transformer'
self.seq_len = seq_len
self.d_model = d_model
self.nhead = nhead
self.d_hid = d_hid
self.nlayers = nlayers
self.cond_emb = nn.Embedding(num_labels, d_model)
self.pos_embedding = PositionalEmbedding(seq_len=seq_len, d_model=d_model)
encoder_layers = TransformerEncoderLayer(d_model, nhead, d_hid, dropout, activation='gelu')
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.decoder = nn.Linear(d_model, out_dim)
self.init_weights()
def init_weights(self) -> None:
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_((- initrange), initrange)
def forward(self, src: Tensor, src_mask: Tensor, cond_code: Tensor) -> Tensor:
cond_embedding = self.cond_emb(cond_code).permute(1, 0, 2)
output = self.pos_embedding(src)
output = torch.cat([cond_embedding, output], dim=0)
output = self.transformer_encoder(output, src_mask)
output = self.decoder(output)
return (output, cond_embedding) |
def get_primes(num_primes, log_flag=False, log_scaler=1.0, prime_scaler=1.0):
primes = []
num = 2
prod_prime = 0
while (len(primes) < num_primes):
if is_prime(num):
if log_flag:
primes.append((np.log2((num * prime_scaler)) * log_scaler))
prod_prime += (np.log2((num * prime_scaler)) * log_scaler)
elif False:
primes.append((np.log((num * prime_scaler)) * log_scaler))
prod_prime += (np.log((num * prime_scaler)) * log_scaler)
else:
primes.append(num)
prod_prime *= num
num += 1
return (primes, prod_prime) |
def linear_discriminant_analysis(name, solver=None, shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=1e-05):
def _name(msg):
return ('%s.%s_%s' % (name, 'lda', msg))
solver_shrinkage = hp.choice(_name('solver_shrinkage_dual'), [('svd', None), ('lsqr', None), ('lsqr', 'auto'), ('eigen', None), ('eigen', 'auto')])
rval = scope.sklearn_LinearDiscriminantAnalysis(solver=(solver_shrinkage[0] if (solver is None) else solver), shrinkage=(solver_shrinkage[1] if (shrinkage is None) else shrinkage), priors=priors, n_components=((4 * scope.int(hp.qloguniform(_name('n_components'), low=np.log(0.51), high=np.log(30.5), q=1.0))) if (n_components is None) else n_components), store_covariance=store_covariance, tol=tol)
return rval |
def main():
(identities, attributes) = get_metadata()
celebrities = get_celebrities_and_images(identities)
targets = get_celebrities_and_target(celebrities, attributes)
json_data = build_json_format(celebrities, targets)
write_json(json_data) |
class CheckpointSaverTest(unittest.TestCase):
def setUp(self) -> None:
AsyncCheckpointSaver._saver_instance = None
AsyncCheckpointSaver.start_async_saving_ckpt()
def tearDown(self) -> None:
if AsyncCheckpointSaver._saver_instance:
AsyncCheckpointSaver._saver_instance.close()
def test_create_checkpoint_saver(self):
sq = SharedQueue(name='factory', create=False)
class_meta = SaverClassMeta(module_path=DdpCheckpointSaver.__module__, class_name=DdpCheckpointSaver.__name__, init_args={'checkpoint_dir': 'test_ckpt'})
sq.put(class_meta)
for _ in range(10):
if (AsyncCheckpointSaver._saver_instance is None):
time.sleep(0.5)
else:
break
self.assertIsNotNone(AsyncCheckpointSaver._saver_instance)
def test_close_saver(self):
saver = DdpCheckpointSaver('test_ckpt')
try:
SharedMemory(name='test').unlink()
except Exception:
pass
saver._shm_handlers[0].shared_memory = SharedMemory(name='test', create=True, size=1024)
saver.close()
saver.close()
def test_traverse_state_dict(self):
def visitor(value):
return value
model = SimpleNet()
step = 100
state_dict = dict(model=model.state_dict(), step=step)
new_dict = _traverse_state_dict(state_dict, visitor)
self.assertEqual(new_dict, state_dict)
def test_create_shared_memory(self):
shm = _create_shared_memory('test', False)
self.assertIsNone(shm)
shm = _create_shared_memory('test-repeat', True, size=10240)
self.assertEqual(shm.size, 10240)
shm = _create_shared_memory('test-repeat', True, size=102400)
self.assertEqual(shm.size, 102400)
shm.unlink()
def test_save_to_storage(self):
model = SimpleNet()
step = 100
state_dict = dict(model=model.state_dict(), step=step)
with tempfile.TemporaryDirectory() as tmpdir:
saver = DdpCheckpointSaver(tmpdir)
path = (Path(tmpdir) / 'checkpoint.pt')
ckpt_config = SingleFileCheckpointConfig(step=100, path=path)
saver._shm_handlers[0].save_state_dict(state_dict, ckpt_config)
meta_dict = saver._shm_handlers[0].metadata.get()
ckpt_config: CheckpointShardConfig = meta_dict[DLROVER_CKPT_CONFIG_KEY]
self.assertFalse(ckpt_config.writing_shm)
self.assertEqual(ckpt_config.step, step)
saver._shm_handlers[0].shared_memory = SharedMemory(name=saver._shm_handlers[0]._shm_name)
AsyncCheckpointSaver._saver_instance = saver
AsyncCheckpointSaver.register_signal_handler()
handler = signal.getsignal(signal.SIGTERM)
handler(None, None)
with self.assertRaises(KeyboardInterrupt):
handler = signal.getsignal(signal.SIGINT)
handler(None, None)
ckpt_files = os.listdir(tmpdir)
self.assertEqual(len(ckpt_files), 3)
saver.close()
saver._node_rank = 1
saver.persist_to_storage(0, None)
def test_shard_num_changes(self):
with tempfile.TemporaryDirectory() as tmpdir:
saver = DdpCheckpointSaver(tmpdir)
saver.global_shard_num = 1
threading.Thread(target=saver._sync_shm_to_storage, daemon=True).start()
sq = SharedQueue(name='factory', create=True)
saver._shm_handlers[0].init_shared_memory(create=True, size=1024)
saver._shm_handlers[0].metadata.set({'step': 100})
event = CheckpointEvent(type=CheckpointEventType.UPDATE_SHARD, global_shard_num=2)
saver._event_queue.put(event)
sq.unlink()
time.sleep(0.3)
self.assertTrue(saver._shm_handlers[0].no_checkpint_state())
self.assertIsNone(saver._shm_handlers[0].shared_memory)
saver.close()
def test_commit_checkpoint(self):
with tempfile.TemporaryDirectory() as tmpdir:
step_done_dir = os.path.join(tmpdir, '.done/10/')
os.makedirs(step_done_dir, exist_ok=True)
saver = DdpCheckpointSaver(tmpdir)
saver.global_shard_num = 1
saver.commit_checkpoint(100, step_done_dir, 2) |
def get_pool_component(pool_name, spatial_size: Tuple[(int, int)]):
return {'adaptive_avg': nn.AdaptiveAvgPool2d(spatial_size), 'adaptive_max': nn.AdaptiveMaxPool2d(spatial_size), None: Identical(), 'none': Identical(), 'identical': Identical()}[pool_name] |
def to_leaf_format(some_json, start_idx=0):
leaf_json = {'users': [], 'num_samples': [], 'user_data': {}}
new_idx = start_idx
for (u, comments) in some_json.items():
new_idx += 1
leaf_json['users'].append(str(new_idx))
leaf_json['num_samples'].append(len(comments))
x = []
y = []
for c in comments:
assert (c.author == u)
c_x = c.body
c_y = {'subreddit': c.subreddit, 'created_utc': c.created_utc, 'score': c.score}
x.append(c_x)
y.append(c_y)
user_data = {'x': x, 'y': y}
leaf_json['user_data'][str(new_idx)] = user_data
return (leaf_json, new_idx) |
def write_int(f, x, name, *args):
if any(args):
for arg in args:
f.write(('%s->' % arg))
f.write(('%s = %i;\n' % (name, x)))
else:
f.write(('c_int %s = %i;\n' % (name, x))) |
class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline):
model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->unet->movq'
_load_connected_pipes = True
def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor)
self.prior_pipe = KandinskyV22PriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor)
self.decoder_pipe = KandinskyV22InpaintPipeline(unet=unet, scheduler=scheduler, movq=movq)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None):
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
def enable_sequential_cpu_offload(self, gpu_id=0):
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
def progress_bar(self, iterable=None, total=None):
self.prior_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.enable_model_cpu_offload()
def set_progress_bar_config(self, **kwargs):
self.prior_pipe.set_progress_bar_config(**kwargs)
self.decoder_pipe.set_progress_bar_config(**kwargs)
_grad()
_example_docstring(INPAINT_EXAMPLE_DOC_STRING)
def __call__(self, prompt: Union[(str, List[str])], image: Union[(torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image])], mask_image: Union[(torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image])], negative_prompt: Optional[Union[(str, List[str])]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[(torch.Generator, List[torch.Generator])]]=None, latents: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[([int, int, Dict], None)]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[([int, int, Dict], None)]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents'], **kwargs):
prior_kwargs = {}
if (kwargs.get('prior_callback', None) is not None):
prior_kwargs['callback'] = kwargs.pop('prior_callback')
deprecate('prior_callback', '1.0.0', 'Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`')
if (kwargs.get('prior_callback_steps', None) is not None):
deprecate('prior_callback_steps', '1.0.0', 'Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`')
prior_kwargs['callback_steps'] = kwargs.pop('prior_callback_steps')
prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, **prior_kwargs)
image_embeds = prior_outputs[0]
negative_image_embeds = prior_outputs[1]
prompt = ([prompt] if (not isinstance(prompt, (list, tuple))) else prompt)
image = ([image] if isinstance(prompt, PIL.Image.Image) else image)
mask_image = ([mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image)
if ((len(prompt) < image_embeds.shape[0]) and ((image_embeds.shape[0] % len(prompt)) == 0)):
prompt = ((image_embeds.shape[0] // len(prompt)) * prompt)
if (isinstance(image, (list, tuple)) and (len(image) < image_embeds.shape[0]) and ((image_embeds.shape[0] % len(image)) == 0)):
image = ((image_embeds.shape[0] // len(image)) * image)
if (isinstance(mask_image, (list, tuple)) and (len(mask_image) < image_embeds.shape[0]) and ((image_embeds.shape[0] % len(mask_image)) == 0)):
mask_image = ((image_embeds.shape[0] // len(mask_image)) * mask_image)
outputs = self.decoder_pipe(image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs)
self.maybe_free_model_hooks()
return outputs |
_module()
class ZeroCOCOStuffDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble', 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', 'plant-other', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', 'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops', 'window-blind', 'window-other', 'wood')
PALETTE = [[240, 128, 128], [0, 192, 64], [0, 64, 96], [128, 192, 192], [0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64], [0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224], [0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192], [0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192], [128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128], [64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], [0, 32, 0], [0, 128, 128], [64, 128, 160], [128, 160, 0], [0, 128, 0], [192, 128, 32], [128, 96, 128], [0, 0, 128], [64, 0, 32], [0, 224, 128], [128, 0, 0], [192, 0, 160], [0, 96, 128], [128, 128, 128], [64, 0, 160], [128, 224, 128], [128, 128, 64], [192, 0, 32], [128, 96, 0], [128, 0, 192], [0, 128, 32], [64, 224, 0], [0, 0, 64], [128, 128, 160], [64, 96, 0], [0, 128, 192], [0, 128, 160], [192, 224, 0], [0, 128, 64], [128, 128, 32], [192, 32, 128], [0, 64, 192], [0, 0, 32], [64, 160, 128], [128, 64, 64], [128, 0, 160], [64, 32, 128], [128, 192, 192], [0, 0, 160], [192, 160, 128], [128, 192, 0], [128, 0, 96], [192, 32, 0], [128, 64, 128], [64, 128, 96], [64, 160, 0], [0, 64, 0], [192, 128, 224], [64, 32, 0], [0, 192, 128], [64, 128, 224], [192, 160, 0], [0, 192, 0], [192, 128, 96], [192, 96, 128], [0, 64, 128], [64, 0, 96], [64, 224, 128], [128, 64, 0], [192, 0, 224], [64, 96, 128], [128, 192, 128], [64, 0, 224], [192, 224, 128], [128, 192, 64], [192, 0, 96], [192, 96, 0], [128, 64, 192], [0, 128, 96], [0, 224, 0], [64, 64, 64], [128, 128, 224], [0, 96, 0], [64, 192, 192], [0, 128, 224], [128, 224, 0], [64, 192, 64], [128, 128, 96], [128, 32, 128], [64, 0, 192], [0, 64, 96], [0, 160, 128], [192, 0, 64], [128, 64, 224], [0, 32, 128], [192, 128, 192], [0, 64, 224], [128, 160, 128], [192, 128, 0], [128, 64, 32], [128, 32, 64], [192, 0, 128], [64, 192, 32], [0, 160, 64], [64, 0, 0], [192, 192, 160], [0, 32, 64], [64, 128, 128], [64, 192, 160], [128, 160, 64], [64, 128, 0], [192, 192, 32], [128, 96, 192], [64, 0, 128], [64, 64, 32], [0, 224, 192], [192, 0, 0], [192, 64, 160], [0, 96, 192], [192, 128, 128], [64, 64, 160], [128, 224, 192], [192, 128, 64], [192, 64, 32], [128, 96, 64], [192, 0, 192], [0, 192, 32], [238, 209, 156], [64, 0, 64], [128, 192, 160], [64, 96, 64], [64, 128, 192], [0, 192, 160], [192, 224, 64], [64, 128, 64], [128, 192, 32], [192, 32, 192], [64, 64, 192], [0, 64, 32], [64, 160, 192], [192, 64, 64], [128, 64, 160], [64, 32, 192], [192, 192, 192], [0, 64, 160], [192, 160, 192], [192, 192, 0], [128, 64, 96], [192, 32, 64], [192, 64, 128], [64, 192, 96], [64, 160, 64], [64, 64, 0]]
def __init__(self, **kwargs):
super(ZeroCOCOStuffDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='_labelTrainIds.png', **kwargs)
def evaluate(self, seen_idx, unseen_idx, results, metric='mIoU', logger=None, gt_seg_maps=None, **kwargs):
if isinstance(metric, str):
metric = [metric]
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
if (not set(metric).issubset(set(allowed_metrics))):
raise KeyError('metric {} is not supported'.format(metric))
eval_results = {}
if (mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of(results, str)):
if (gt_seg_maps is None):
gt_seg_maps = self.get_gt_seg_maps()
num_classes = len(self.CLASSES)
ret_metrics = eval_metrics(results, gt_seg_maps, num_classes, self.ignore_index, metric, label_map=dict(), reduce_zero_label=self.reduce_zero_label)
else:
ret_metrics = pre_eval_to_metrics(results, metric)
if (self.CLASSES is None):
class_names = tuple(range(num_classes))
else:
class_names = self.CLASSES
seen_class_names = []
for i in range(len(seen_idx)):
seen_class_names.append(class_names[seen_idx[i]])
seen_class_names = tuple(seen_class_names)
unseen_class_names = []
for i in range(len(unseen_idx)):
unseen_class_names.append(class_names[unseen_idx[i]])
unseen_class_names = tuple(unseen_class_names)
seen_ret_metrics = ret_metrics.copy()
seen_ret_metrics['IoU'] = seen_ret_metrics['IoU'][seen_idx]
seen_ret_metrics['Acc'] = seen_ret_metrics['Acc'][seen_idx]
unseen_ret_metrics = ret_metrics.copy()
unseen_ret_metrics['IoU'] = unseen_ret_metrics['IoU'][unseen_idx]
unseen_ret_metrics['Acc'] = unseen_ret_metrics['Acc'][unseen_idx]
ret_metrics_summary = OrderedDict({ret_metric: np.round((np.nanmean(ret_metric_value) * 100), 2) for (ret_metric, ret_metric_value) in ret_metrics.items()})
seen_ret_metrics_summary = OrderedDict({seen_ret_metric: np.round((np.nanmean(seen_ret_metric_value) * 100), 2) for (seen_ret_metric, seen_ret_metric_value) in seen_ret_metrics.items()})
unseen_ret_metrics_summary = OrderedDict({unseen_ret_metric: np.round((np.nanmean(unseen_ret_metric_value) * 100), 2) for (unseen_ret_metric, unseen_ret_metric_value) in unseen_ret_metrics.items()})
ret_metrics.pop('aAcc', None)
ret_metrics_class = OrderedDict({ret_metric: np.round((ret_metric_value * 100), 2) for (ret_metric, ret_metric_value) in ret_metrics.items()})
ret_metrics_class.update({'Class': class_names})
ret_metrics_class.move_to_end('Class', last=False)
seen_ret_metrics.pop('aAcc', None)
seen_ret_metrics_class = OrderedDict({seen_ret_metric: np.round((seen_ret_metric_value * 100), 2) for (seen_ret_metric, seen_ret_metric_value) in seen_ret_metrics.items()})
seen_ret_metrics_class.update({'Class': seen_class_names})
seen_ret_metrics_class.move_to_end('Class', last=False)
unseen_ret_metrics.pop('aAcc', None)
unseen_ret_metrics_class = OrderedDict({unseen_ret_metric: np.round((unseen_ret_metric_value * 100), 2) for (unseen_ret_metric, unseen_ret_metric_value) in unseen_ret_metrics.items()})
unseen_ret_metrics_class.update({'Class': unseen_class_names})
unseen_ret_metrics_class.move_to_end('Class', last=False)
print(('\n' + ' Total classes '))
class_table_data = PrettyTable()
for (key, val) in ret_metrics_class.items():
class_table_data.add_column(key, val)
summary_table_data = PrettyTable()
for (key, val) in ret_metrics_summary.items():
if (key == 'aAcc'):
summary_table_data.add_column(key, [val])
else:
summary_table_data.add_column(('m' + key), [val])
print_log('per class results:', logger)
print_log(class_table_data.get_string(), logger=logger)
print_log('Summary:', logger)
print_log(summary_table_data.get_string(), logger=logger)
print(('\n' + ' Seen classes '))
seen_class_table_data = PrettyTable()
for (key, val) in seen_ret_metrics_class.items():
seen_class_table_data.add_column(key, val)
seen_summary_table_data = PrettyTable()
for (key, val) in seen_ret_metrics_summary.items():
if (key == 'aAcc'):
seen_summary_table_data.add_column(key, [val])
else:
seen_summary_table_data.add_column(('m' + key), [val])
print_log('seen per class results:', logger)
print_log(seen_class_table_data.get_string(), logger=logger)
print_log('Seen Summary:', logger)
print_log(seen_summary_table_data.get_string(), logger=logger)
print(('\n' + ' Unseen classes '))
unseen_class_table_data = PrettyTable()
for (key, val) in unseen_ret_metrics_class.items():
unseen_class_table_data.add_column(key, val)
unseen_summary_table_data = PrettyTable()
for (key, val) in unseen_ret_metrics_summary.items():
if (key == 'aAcc'):
unseen_summary_table_data.add_column(key, [val])
else:
unseen_summary_table_data.add_column(('m' + key), [val])
print_log('unseen per class results:', logger)
print_log(unseen_class_table_data.get_string(), logger=logger)
print_log('Unseen Summary:', logger)
print_log(unseen_summary_table_data.get_string(), logger=logger)
for (key, value) in ret_metrics_summary.items():
if (key == 'aAcc'):
eval_results[key] = (value / 100.0)
else:
eval_results[('m' + key)] = (value / 100.0)
ret_metrics_class.pop('Class', None)
for (key, value) in ret_metrics_class.items():
eval_results.update({((key + '.') + str(name)): (value[idx] / 100.0) for (idx, name) in enumerate(class_names)})
return eval_results |
def best_eer(val_scores, utt2len, utt2label, key_list):
def f_neg(threshold):
return utt_eer(val_scores, utt2len, utt2label, key_list, threshold)
thr_0 = ([0.2] * 1)
constraints = ([(0.0, 1.0)] * 1)
def bounds(**kwargs):
x = kwargs['x_new']
tmax = bool(np.all((x <= 1)))
tmin = bool(np.all((x >= 0)))
return (tmax and tmin)
minimizer_kwargs = {'method': 'L-BFGS-B', 'bounds': constraints, 'options': {'eps': 0.05}}
logger.info('===> Searching optimal threshold for each label')
start_time = timer()
opt_output = basinhopping(f_neg, thr_0, stepsize=0.1, minimizer_kwargs=minimizer_kwargs, niter=10, accept_test=bounds)
end_time = timer()
logger.info('===> Optimal threshold for each label:\n{}'.format(opt_output.x))
logger.info(('Threshold found in: %s seconds' % (end_time - start_time)))
score = opt_output.fun
return (score, opt_output.x) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.