code stringlengths 101 5.91M |
|---|
def rcEvaluator(rules: Iterable[Rule], labelSettings: LabelSettings=_lsString) -> RCEvaluator:
return libpymod._rcEvaluator(_wrap(libpymod._VecRule, rules), labelSettings) |
def get_grad(params):
if isinstance(params, torch.Tensor):
params = [params]
params = list(filter((lambda p: (p.grad is not None)), params))
grad = [p.grad.data.cpu().view((- 1)) for p in params]
return torch.cat(grad) |
class NLIReader(object):
LABEL_MAP = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
def __init__(self, lowercase=True, filter_length=0):
self.lowercase = lowercase
self.filter_length = (filter_length if (filter_length is not None) else 0)
def build(lowercase=True, filter_length=0):
return NLISentenceReader(lowercase=True, filter_length=0)
def read(self, filename):
return self.read_sentences(filename)
def read_sentences(self, filename):
raise NotImplementedError
def read_line(self, line):
example = json.loads(line)
try:
label = self.read_label(example['gold_label'])
except:
return None
(s1, t1) = convert_binary_bracketing(example['sentence1_binary_parse'], lowercase=self.lowercase)
(s2, t2) = convert_binary_bracketing(example['sentence2_binary_parse'], lowercase=self.lowercase)
example_id = example['pairID']
return dict(s1=s1, label=label, s2=s2, t1=t1, t2=t2, example_id=example_id)
def read_label(self, label):
return self.LABEL_MAP[label] |
def build_dataset(dataset_list, transforms, dataset_catalog, is_train=True):
if (not isinstance(dataset_list, (list, tuple))):
raise RuntimeError('dataset_list should be a list of strings, got {}'.format(dataset_list))
datasets = []
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data['factory'])
args = data['args']
if (data['factory'] in ['COCODataset', 'WordDataset']):
args['remove_images_without_annotations'] = is_train
if (data['factory'] == 'PascalVOCDataset'):
args['use_difficult'] = (not is_train)
args['transforms'] = transforms
dataset = factory(**args)
datasets.append(dataset)
if (not is_train):
return datasets
dataset = datasets[0]
if (len(datasets) > 1):
dataset = D.ConcatDataset(datasets)
return [dataset] |
_sz(2)
def linear(x):
(fw, to_dtype, eps) = set_framework_dependencies(x)
return (((x + 1) * to_dtype((((- 1) <= x) & (x < 0)))) + ((1 - x) * to_dtype(((0 <= x) & (x <= 1))))) |
class Pip_resnet18(nn.Module):
def __init__(self, resnet, num_nb, num_lms=68, input_size=256, net_stride=32):
super(Pip_resnet18, self).__init__()
self.num_nb = num_nb
self.num_lms = num_lms
self.input_size = input_size
self.net_stride = net_stride
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.maxpool = resnet.maxpool
self.sigmoid = nn.Sigmoid()
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
if (self.net_stride == 128):
self.layer5 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
self.bn5 = nn.BatchNorm2d(512)
self.layer6 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
self.bn6 = nn.BatchNorm2d(512)
nn.init.normal_(self.layer5.weight, std=0.001)
if (self.layer5.bias is not None):
nn.init.constant_(self.layer5.bias, 0)
nn.init.constant_(self.bn5.weight, 1)
nn.init.constant_(self.bn5.bias, 0)
nn.init.normal_(self.layer6.weight, std=0.001)
if (self.layer6.bias is not None):
nn.init.constant_(self.layer6.bias, 0)
nn.init.constant_(self.bn6.weight, 1)
nn.init.constant_(self.bn6.bias, 0)
elif (self.net_stride == 64):
self.layer5 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)
self.bn5 = nn.BatchNorm2d(512)
nn.init.normal_(self.layer5.weight, std=0.001)
if (self.layer5.bias is not None):
nn.init.constant_(self.layer5.bias, 0)
nn.init.constant_(self.bn5.weight, 1)
nn.init.constant_(self.bn5.bias, 0)
elif (self.net_stride == 32):
pass
elif (self.net_stride == 16):
self.deconv1 = nn.ConvTranspose2d(512, 512, kernel_size=4, stride=2, padding=1, bias=False)
self.bn_deconv1 = nn.BatchNorm2d(512)
nn.init.normal_(self.deconv1.weight, std=0.001)
if (self.deconv1.bias is not None):
nn.init.constant_(self.deconv1.bias, 0)
nn.init.constant_(self.bn_deconv1.weight, 1)
nn.init.constant_(self.bn_deconv1.bias, 0)
else:
print('No such net_stride!')
exit(0)
self.cls_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
self.x_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
self.y_layer = nn.Conv2d(512, num_lms, kernel_size=1, stride=1, padding=0)
self.nb_x_layer = nn.Conv2d(512, (num_nb * num_lms), kernel_size=1, stride=1, padding=0)
self.nb_y_layer = nn.Conv2d(512, (num_nb * num_lms), kernel_size=1, stride=1, padding=0)
nn.init.normal_(self.cls_layer.weight, std=0.001)
if (self.cls_layer.bias is not None):
nn.init.constant_(self.cls_layer.bias, 0)
nn.init.normal_(self.x_layer.weight, std=0.001)
if (self.x_layer.bias is not None):
nn.init.constant_(self.x_layer.bias, 0)
nn.init.normal_(self.y_layer.weight, std=0.001)
if (self.y_layer.bias is not None):
nn.init.constant_(self.y_layer.bias, 0)
nn.init.normal_(self.nb_x_layer.weight, std=0.001)
if (self.nb_x_layer.bias is not None):
nn.init.constant_(self.nb_x_layer.bias, 0)
nn.init.normal_(self.nb_y_layer.weight, std=0.001)
if (self.nb_y_layer.bias is not None):
nn.init.constant_(self.nb_y_layer.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if (self.net_stride == 128):
x = F.relu(self.bn5(self.layer5(x)))
x = F.relu(self.bn6(self.layer6(x)))
elif (self.net_stride == 64):
x = F.relu(self.bn5(self.layer5(x)))
elif (self.net_stride == 16):
x = F.relu(self.bn_deconv1(self.deconv1(x)))
else:
pass
x1 = self.cls_layer(x)
x2 = self.x_layer(x)
x3 = self.y_layer(x)
x4 = self.nb_x_layer(x)
x5 = self.nb_y_layer(x)
return (x1, x2, x3, x4, x5) |
def test_operator_new_delete(capture):
class SubAliased(m.AliasedHasOpNewDelSize):
pass
with capture:
a = m.HasOpNewDel()
b = m.HasOpNewDelSize()
d = m.HasOpNewDelBoth()
assert (capture == '\n A new 8\n B new 4\n D new 32\n ')
sz_alias = str(m.AliasedHasOpNewDelSize.size_alias)
sz_noalias = str(m.AliasedHasOpNewDelSize.size_noalias)
with capture:
c = m.AliasedHasOpNewDelSize()
c2 = SubAliased()
assert (capture == ((((('C new ' + sz_noalias) + '\n') + 'C new ') + sz_alias) + '\n'))
with capture:
del a
pytest.gc_collect()
del b
pytest.gc_collect()
del d
pytest.gc_collect()
assert (capture == '\n A delete\n B delete 4\n D delete\n ')
with capture:
del c
pytest.gc_collect()
del c2
pytest.gc_collect()
assert (capture == ((((('C delete ' + sz_noalias) + '\n') + 'C delete ') + sz_alias) + '\n')) |
class MobileNetV3(object):
__shared__ = ['norm_type']
def __init__(self, scale=1.0, model_name='small', feature_maps=[5, 6, 7, 8, 9, 10], conv_decay=0.0, norm_type='bn', norm_decay=0.0, extra_block_filters=[[256, 512], [128, 256], [128, 256], [64, 128]], lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], freeze_norm=False):
if isinstance(feature_maps, Integral):
feature_maps = [feature_maps]
self.scale = scale
self.model_name = model_name
self.feature_maps = feature_maps
self.extra_block_filters = extra_block_filters
self.conv_decay = conv_decay
self.norm_decay = norm_decay
self.inplanes = 16
self.end_points = []
self.block_stride = 0
self.lr_mult_list = lr_mult_list
self.freeze_norm = freeze_norm
self.norm_type = norm_type
self.curr_stage = 0
if (model_name == 'large'):
self.cfg = [[3, 16, 16, False, 'relu', 1], [3, 64, 24, False, 'relu', 2], [3, 72, 24, False, 'relu', 1], [5, 72, 40, True, 'relu', 2], [5, 120, 40, True, 'relu', 1], [5, 120, 40, True, 'relu', 1], [3, 240, 80, False, 'hard_swish', 2], [3, 200, 80, False, 'hard_swish', 1], [3, 184, 80, False, 'hard_swish', 1], [3, 184, 80, False, 'hard_swish', 1], [3, 480, 112, True, 'hard_swish', 1], [3, 672, 112, True, 'hard_swish', 1], [5, 672, 160, True, 'hard_swish', 2], [5, 960, 160, True, 'hard_swish', 1], [5, 960, 160, True, 'hard_swish', 1]]
self.cls_ch_squeeze = 960
self.cls_ch_expand = 1280
elif (model_name == 'small'):
self.cfg = [[3, 16, 16, True, 'relu', 2], [3, 72, 24, False, 'relu', 2], [3, 88, 24, False, 'relu', 1], [5, 96, 40, True, 'hard_swish', 2], [5, 240, 40, True, 'hard_swish', 1], [5, 240, 40, True, 'hard_swish', 1], [5, 120, 48, True, 'hard_swish', 1], [5, 144, 48, True, 'hard_swish', 1], [5, 288, 96, True, 'hard_swish', 2], [5, 576, 96, True, 'hard_swish', 1], [5, 576, 96, True, 'hard_swish', 1]]
self.cls_ch_squeeze = 576
self.cls_ch_expand = 1280
else:
raise NotImplementedError
def _conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True, act=None, name=None, use_cudnn=True):
lr_idx = (self.curr_stage // 3)
lr_idx = min(lr_idx, (len(self.lr_mult_list) - 1))
lr_mult = self.lr_mult_list[lr_idx]
conv = fluid.layers.conv2d(input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=num_groups, act=None, use_cudnn=use_cudnn, param_attr=ParamAttr(name=(name + '_weights'), learning_rate=lr_mult, regularizer=L2Decay(self.conv_decay)), bias_attr=False)
bn_name = (name + '_bn')
bn = self._bn(conv, bn_name=bn_name)
if if_act:
if (act == 'relu'):
bn = fluid.layers.relu(bn)
elif (act == 'hard_swish'):
bn = self._hard_swish(bn)
elif (act == 'relu6'):
bn = fluid.layers.relu6(bn)
return bn
def _bn(self, input, act=None, bn_name=None):
lr_idx = (self.curr_stage // 3)
lr_idx = min(lr_idx, (len(self.lr_mult_list) - 1))
lr_mult = self.lr_mult_list[lr_idx]
norm_lr = (0.0 if self.freeze_norm else lr_mult)
norm_decay = self.norm_decay
pattr = ParamAttr(name=(bn_name + '_scale'), learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
battr = ParamAttr(name=(bn_name + '_offset'), learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
conv = input
if (self.norm_type in ['bn', 'sync_bn']):
global_stats = (True if self.freeze_norm else False)
out = fluid.layers.batch_norm(input=conv, act=act, name=(bn_name + '.output.1'), param_attr=pattr, bias_attr=battr, moving_mean_name=(bn_name + '_mean'), moving_variance_name=(bn_name + '_variance'), use_global_stats=global_stats)
scale = fluid.framework._get_var(pattr.name)
bias = fluid.framework._get_var(battr.name)
elif (self.norm_type == 'affine_channel'):
scale = fluid.layers.create_parameter(shape=[conv.shape[1]], dtype=conv.dtype, attr=pattr, default_initializer=fluid.initializer.Constant(1.0))
bias = fluid.layers.create_parameter(shape=[conv.shape[1]], dtype=conv.dtype, attr=battr, default_initializer=fluid.initializer.Constant(0.0))
out = fluid.layers.affine_channel(x=conv, scale=scale, bias=bias, act=act)
if self.freeze_norm:
scale.stop_gradient = True
bias.stop_gradient = True
return out
def _hard_swish(self, x):
return ((x * fluid.layers.relu6((x + 3))) / 6.0)
def _se_block(self, input, num_out_filter, ratio=4, name=None):
lr_idx = (self.curr_stage // 3)
lr_idx = min(lr_idx, (len(self.lr_mult_list) - 1))
lr_mult = self.lr_mult_list[lr_idx]
num_mid_filter = int((num_out_filter // ratio))
pool = fluid.layers.pool2d(input=input, pool_type='avg', global_pooling=True, use_cudnn=False)
conv1 = fluid.layers.conv2d(input=pool, filter_size=1, num_filters=num_mid_filter, act='relu', param_attr=ParamAttr(name=(name + '_1_weights'), learning_rate=lr_mult, regularizer=L2Decay(self.conv_decay)), bias_attr=ParamAttr(name=(name + '_1_offset'), learning_rate=lr_mult, regularizer=L2Decay(self.conv_decay)))
conv2 = fluid.layers.conv2d(input=conv1, filter_size=1, num_filters=num_out_filter, act='hard_sigmoid', param_attr=ParamAttr(name=(name + '_2_weights'), learning_rate=lr_mult, regularizer=L2Decay(self.conv_decay)), bias_attr=ParamAttr(name=(name + '_2_offset'), learning_rate=lr_mult, regularizer=L2Decay(self.conv_decay)))
scale = fluid.layers.elementwise_mul(x=input, y=conv2, axis=0)
return scale
def _residual_unit(self, input, num_in_filter, num_mid_filter, num_out_filter, stride, filter_size, act=None, use_se=False, name=None):
input_data = input
conv0 = self._conv_bn_layer(input=input, filter_size=1, num_filters=num_mid_filter, stride=1, padding=0, if_act=True, act=act, name=(name + '_expand'))
if ((self.block_stride == 4) and (stride == 2)):
self.block_stride += 1
if (self.block_stride in self.feature_maps):
self.end_points.append(conv0)
conv1 = self._conv_bn_layer(input=conv0, filter_size=filter_size, num_filters=num_mid_filter, stride=stride, padding=int(((filter_size - 1) // 2)), if_act=True, act=act, num_groups=num_mid_filter, use_cudnn=False, name=(name + '_depthwise'))
if use_se:
conv1 = self._se_block(input=conv1, num_out_filter=num_mid_filter, name=(name + '_se'))
conv2 = self._conv_bn_layer(input=conv1, filter_size=1, num_filters=num_out_filter, stride=1, padding=0, if_act=False, name=(name + '_linear'))
if ((num_in_filter != num_out_filter) or (stride != 1)):
return conv2
else:
return fluid.layers.elementwise_add(x=input_data, y=conv2, act=None)
def _extra_block_dw(self, input, num_filters1, num_filters2, stride, name=None):
pointwise_conv = self._conv_bn_layer(input=input, filter_size=1, num_filters=int(num_filters1), stride=1, padding='SAME', act='relu6', name=(name + '_extra1'))
depthwise_conv = self._conv_bn_layer(input=pointwise_conv, filter_size=3, num_filters=int(num_filters2), stride=stride, padding='SAME', num_groups=int(num_filters1), act='relu6', use_cudnn=False, name=(name + '_extra2_dw'))
normal_conv = self._conv_bn_layer(input=depthwise_conv, filter_size=1, num_filters=int(num_filters2), stride=1, padding='SAME', act='relu6', name=(name + '_extra2_sep'))
return normal_conv
def _make_divisible(self, v, divisor=8, min_value=None):
if (min_value is None):
min_value = divisor
new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor))
if (new_v < (0.9 * v)):
new_v += divisor
return new_v
def __call__(self, input):
scale = self.scale
inplanes = self.inplanes
cfg = self.cfg
blocks = []
conv = self._conv_bn_layer(input, filter_size=3, num_filters=self._make_divisible((inplanes * scale)), stride=2, padding=1, num_groups=1, if_act=True, act='hard_swish', name='conv1')
i = 0
inplanes = self._make_divisible((inplanes * scale))
for layer_cfg in cfg:
if (layer_cfg[5] == 2):
self.block_stride += 1
if (self.block_stride in self.feature_maps):
self.end_points.append(conv)
conv = self._residual_unit(input=conv, num_in_filter=inplanes, num_mid_filter=self._make_divisible((scale * layer_cfg[1])), num_out_filter=self._make_divisible((scale * layer_cfg[2])), act=layer_cfg[4], stride=layer_cfg[5], filter_size=layer_cfg[0], use_se=layer_cfg[3], name=('conv' + str((i + 2))))
inplanes = self._make_divisible((scale * layer_cfg[2]))
i += 1
self.curr_stage += 1
self.block_stride += 1
if (self.block_stride in self.feature_maps):
self.end_points.append(conv)
conv_extra = self._conv_bn_layer(conv, filter_size=1, num_filters=self._make_divisible((scale * cfg[(- 1)][1])), stride=1, padding='SAME', num_groups=1, if_act=True, act='hard_swish', name=('conv' + str((i + 2))))
self.block_stride += 1
if (self.block_stride in self.feature_maps):
self.end_points.append(conv_extra)
i += 1
for block_filter in self.extra_block_filters:
conv_extra = self._extra_block_dw(conv_extra, block_filter[0], block_filter[1], 2, ('conv' + str((i + 2))))
self.block_stride += 1
if (self.block_stride in self.feature_maps):
self.end_points.append(conv_extra)
i += 1
return OrderedDict([('mbv3_{}'.format(idx), feat) for (idx, feat) in enumerate(self.end_points)]) |
class ROIBoxHead(torch.nn.Module):
def __init__(self, cfg, in_channels, BBAM=False):
super(ROIBoxHead, self).__init__()
self.BBAM = BBAM
self.feature_extractor = make_roi_box_feature_extractor(cfg, in_channels)
self.predictor = make_roi_box_predictor(cfg, self.feature_extractor.out_channels)
self.post_processor = make_roi_box_post_processor(cfg, BBAM=BBAM)
self.loss_evaluator = make_roi_box_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None, return_index=False):
if self.BBAM:
self.return_for_BBAM = {}
if self.training:
with torch.no_grad():
proposals = self.loss_evaluator.subsample(proposals, targets)
x = self.feature_extractor(features, proposals)
(class_logits, box_regression) = self.predictor(x)
if self.BBAM:
self.return_for_BBAM['proposals'] = proposals
self.return_for_BBAM['class_logits'] = class_logits
self.return_for_BBAM['box_regression'] = box_regression
if (not self.training):
if self.BBAM:
if return_index:
(result, self.return_for_BBAM, selected_idxs) = self.post_processor((class_logits, box_regression), proposals, self.return_for_BBAM, return_index=return_index)
else:
(result, self.return_for_BBAM) = self.post_processor((class_logits, box_regression), proposals, self.return_for_BBAM, return_index=return_index)
else:
result = self.post_processor((class_logits, box_regression), proposals)
if self.BBAM:
if return_index:
return (x, result, {}, self.return_for_BBAM, selected_idxs)
else:
return (x, result, {}, self.return_for_BBAM)
else:
return (x, result, {})
(loss_classifier, loss_box_reg) = self.loss_evaluator([class_logits], [box_regression])
return (x, proposals, dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg)) |
_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
vocab_files_names: Dict[(str, str)] = {}
pretrained_vocab_files_map: Dict[(str, Dict[(str, str)])] = {}
pretrained_init_configuration: Dict[(str, Dict[(str, Any)])] = {}
max_model_input_sizes: Dict[(str, Optional[int])] = {}
_auto_class: Optional[str] = None
model_input_names: List[str] = ['input_ids', 'token_type_ids', 'attention_mask']
padding_side: str = 'right'
truncation_side: str = 'right'
slow_tokenizer_class = None
def __init__(self, **kwargs):
self.init_inputs = ()
self.init_kwargs = copy.deepcopy(kwargs)
self.name_or_path = kwargs.pop('name_or_path', '')
self._processor_class = kwargs.pop('processor_class', None)
model_max_length = kwargs.pop('model_max_length', kwargs.pop('max_len', None))
self.model_max_length = (model_max_length if (model_max_length is not None) else VERY_LARGE_INTEGER)
self.padding_side = kwargs.pop('padding_side', self.padding_side)
if (self.padding_side not in ['right', 'left']):
raise ValueError(f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}")
self.truncation_side = kwargs.pop('truncation_side', self.truncation_side)
if (self.truncation_side not in ['right', 'left']):
raise ValueError(f"Padding side should be selected between 'right' and 'left', current value: {self.truncation_side}")
self.model_input_names = kwargs.pop('model_input_names', self.model_input_names)
self.clean_up_tokenization_spaces = kwargs.pop('clean_up_tokenization_spaces', True)
self.deprecation_warnings = {}
self._in_target_context_manager = False
super().__init__(**kwargs)
def max_len_single_sentence(self) -> int:
return (self.model_max_length - self.num_special_tokens_to_add(pair=False))
def max_len_sentences_pair(self) -> int:
return (self.model_max_length - self.num_special_tokens_to_add(pair=True))
_len_single_sentence.setter
def max_len_single_sentence(self, value) -> int:
if ((value == (self.model_max_length - self.num_special_tokens_to_add(pair=False))) and self.verbose):
if (not self.deprecation_warnings.get('max_len_single_sentence', False)):
logger.warning("Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up.")
self.deprecation_warnings['max_len_single_sentence'] = True
else:
raise ValueError("Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up.")
_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> int:
if ((value == (self.model_max_length - self.num_special_tokens_to_add(pair=True))) and self.verbose):
if (not self.deprecation_warnings.get('max_len_sentences_pair', False)):
logger.warning("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
self.deprecation_warnings['max_len_sentences_pair'] = True
else:
raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
def _set_processor_class(self, processor_class: str):
self._processor_class = processor_class
def __repr__(self) -> str:
return f"{self.__class__.__name__}(name_or_path='{self.name_or_path}', vocab_size={self.vocab_size}, model_max_length={self.model_max_length}, is_fast={self.is_fast}, padding_side='{self.padding_side}', truncation_side='{self.truncation_side}', special_tokens={self.special_tokens_map_extended}, clean_up_tokenization_spaces={self.clean_up_tokenization_spaces})"
def __len__(self) -> int:
raise NotImplementedError()
def get_vocab(self) -> Dict[(str, int)]:
raise NotImplementedError()
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], *init_inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
local_files_only = kwargs.pop('local_files_only', False)
use_auth_token = kwargs.pop('use_auth_token', None)
revision = kwargs.pop('revision', None)
subfolder = kwargs.pop('subfolder', None)
from_pipeline = kwargs.pop('_from_pipeline', None)
from_auto_class = kwargs.pop('_from_auto', False)
commit_hash = kwargs.pop('_commit_hash', None)
user_agent = {'file_type': 'tokenizer', 'from_auto_class': from_auto_class, 'is_fast': ('Fast' in cls.__name__)}
if (from_pipeline is not None):
user_agent['using_pipeline'] = from_pipeline
if (is_offline_mode() and (not local_files_only)):
logger.info('Offline mode: forcing local_files_only=True')
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
vocab_files = {}
init_configuration = {}
is_local = os.path.isdir(pretrained_model_name_or_path)
single_file_id = None
if (os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path)):
if (len(cls.vocab_files_names) > 1):
raise ValueError(f'Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not supported for this tokenizer. Use a model identifier or the path to a directory instead.')
warnings.warn(f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is deprecated and won't be possible anymore in v5. Use a model identifier or the path to a directory instead.", FutureWarning)
file_id = list(cls.vocab_files_names.keys())[0]
vocab_files[file_id] = pretrained_model_name_or_path
single_file_id = file_id
else:
additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE, 'tokenizer_config_file': TOKENIZER_CONFIG_FILE}
vocab_files = {**cls.vocab_files_names, **additional_files_names}
if ('tokenizer_file' in vocab_files):
fast_tokenizer_file = FULL_TOKENIZER_FILE
resolved_config_file = cached_file(pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, user_agent=user_agent, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, _commit_hash=commit_hash)
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
if (resolved_config_file is not None):
with open(resolved_config_file, encoding='utf-8') as reader:
tokenizer_config = json.load(reader)
if ('fast_tokenizer_files' in tokenizer_config):
fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config['fast_tokenizer_files'])
vocab_files['tokenizer_file'] = fast_tokenizer_file
resolved_vocab_files = {}
unresolved_files = []
for (file_id, file_path) in vocab_files.items():
if (file_path is None):
resolved_vocab_files[file_id] = None
elif (single_file_id == file_id):
if os.path.isfile(file_path):
resolved_vocab_files[file_id] = file_path
elif is_remote_url(file_path):
resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies)
else:
resolved_vocab_files[file_id] = cached_file(pretrained_model_name_or_path, file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, _commit_hash=commit_hash)
commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash)
if (len(unresolved_files) > 0):
logger.info(f"Can't load following files from cache: {unresolved_files} and cannot check if these files are necessary for the tokenizer to operate.")
if all(((full_file_name is None) for full_file_name in resolved_vocab_files.values())):
raise EnvironmentError(f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from ' make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing all relevant files for a {cls.__name__} tokenizer.")
for (file_id, file_path) in vocab_files.items():
if (file_id not in resolved_vocab_files):
continue
if is_local:
logger.info(f'loading file {file_path}')
else:
logger.info(f'loading file {file_path} from cache at {resolved_vocab_files[file_id]}')
return cls._from_pretrained(resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, use_auth_token=use_auth_token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=commit_hash, **kwargs)
def _from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, *init_inputs, use_auth_token=None, cache_dir=None, local_files_only=False, _commit_hash=None, **kwargs):
from_slow = kwargs.get('from_slow', False)
has_tokenizer_file = (resolved_vocab_files.get('tokenizer_file', None) is not None)
if ((from_slow or (not has_tokenizer_file)) and (cls.slow_tokenizer_class is not None)):
slow_tokenizer = cls.slow_tokenizer_class._from_pretrained(copy.deepcopy(resolved_vocab_files), pretrained_model_name_or_path, copy.deepcopy(init_configuration), *init_inputs, use_auth_token=use_auth_token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=_commit_hash, **copy.deepcopy(kwargs))
else:
slow_tokenizer = None
tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)
if (tokenizer_config_file is not None):
with open(tokenizer_config_file, encoding='utf-8') as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
config_tokenizer_class = init_kwargs.get('tokenizer_class')
init_kwargs.pop('tokenizer_class', None)
init_kwargs.pop('auto_map', None)
saved_init_inputs = init_kwargs.pop('init_inputs', ())
if (not init_inputs):
init_inputs = saved_init_inputs
else:
config_tokenizer_class = None
init_kwargs = init_configuration
if (config_tokenizer_class is None):
from .models.auto.configuration_auto import AutoConfig
try:
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, use_auth_token=use_auth_token, cache_dir=cache_dir, local_files_only=local_files_only, _commit_hash=_commit_hash)
config_tokenizer_class = config.tokenizer_class
except (OSError, ValueError, KeyError):
config = None
if (config_tokenizer_class is None):
from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES
if hasattr(config, 'model_type'):
model_type = config.model_type
else:
model_type = None
for pattern in TOKENIZER_MAPPING_NAMES.keys():
if (pattern in str(pretrained_model_name_or_path)):
model_type = pattern
break
if (model_type is not None):
(config_tokenizer_class, config_tokenizer_class_fast) = TOKENIZER_MAPPING_NAMES.get(model_type, (None, None))
if (config_tokenizer_class is None):
config_tokenizer_class = config_tokenizer_class_fast
if (config_tokenizer_class is not None):
if (cls.__name__.replace('Fast', '') != config_tokenizer_class.replace('Fast', '')):
logger.warning(f'''The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization.
The tokenizer class you load from this checkpoint is '{config_tokenizer_class}'.
The class this function is called from is '{cls.__name__}'.''')
init_kwargs.update(kwargs)
def convert_added_tokens(obj: Union[(AddedToken, Any)]):
if (isinstance(obj, dict) and ('__type' in obj) and (obj['__type'] == 'AddedToken')):
obj.pop('__type')
return AddedToken(**obj)
elif isinstance(obj, (list, tuple)):
return [convert_added_tokens(o) for o in obj]
elif isinstance(obj, dict):
return {k: convert_added_tokens(v) for (k, v) in obj.items()}
return obj
init_kwargs = convert_added_tokens(init_kwargs)
if (pretrained_model_name_or_path in cls.max_model_input_sizes):
model_max_length = cls.max_model_input_sizes[pretrained_model_name_or_path]
if ((model_max_length is not None) and isinstance(model_max_length, (int, float))):
model_max_length = min(init_kwargs.get('model_max_length', int(1e+30)), model_max_length)
init_kwargs['model_max_length'] = cls._eventually_correct_t5_max_length(pretrained_model_name_or_path, model_max_length, init_kwargs.get('model_max_length'))
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
for (args_name, file_path) in resolved_vocab_files.items():
if (args_name not in init_kwargs):
init_kwargs[args_name] = file_path
if (slow_tokenizer is not None):
init_kwargs['__slow_tokenizer'] = slow_tokenizer
init_kwargs['name_or_path'] = pretrained_model_name_or_path
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
raise OSError('Unable to load vocabulary from file. Please check that the provided vocabulary is accessible and not corrupted.')
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
if (special_tokens_map_file is not None):
with open(special_tokens_map_file, encoding='utf-8') as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for (key, value) in special_tokens_map.items():
if ((key in kwargs) and kwargs[key]):
continue
if isinstance(value, dict):
value = AddedToken(**value)
elif isinstance(value, list):
value = [(AddedToken(**token) if isinstance(token, dict) else token) for token in value]
setattr(tokenizer, key, value)
special_tokens = tokenizer.all_special_tokens
if (added_tokens_file is not None):
with open(added_tokens_file, encoding='utf-8') as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_encoder_sorted = sorted(added_tok_encoder.items(), key=(lambda x: x[1]))
is_last_special = None
tokens = []
for (token, index) in added_tok_encoder_sorted:
current_index = (len(tokenizer) + len(tokens))
if (has_tokenizer_file and (index != current_index) and (tokenizer.convert_tokens_to_ids(token) != index)):
raise ValueError(f'Wrong index found for {token}: should be {tokenizer.convert_tokens_to_ids(token)} but found {index}.')
elif ((not has_tokenizer_file) and (index != current_index)):
raise ValueError(f"Non-consecutive added token '{token}' found. Should have index {current_index} but has index {index} in saved vocabulary.")
is_special = bool((token in special_tokens))
if ((is_last_special is None) or (is_last_special == is_special)):
tokens.append(token)
else:
tokenizer.add_tokens(tokens, special_tokens=is_last_special)
tokens = [token]
is_last_special = is_special
if tokens:
tokenizer.add_tokens(tokens, special_tokens=is_last_special)
added_tokens = tokenizer.sanitize_special_tokens()
if added_tokens:
logger.warning_advice('Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.')
return tokenizer
def _eventually_correct_t5_max_length(pretrained_model_name_or_path, max_model_length, init_max_model_length):
return max_model_length
def save_pretrained(self, save_directory: Union[(str, os.PathLike)], legacy_format: Optional[bool]=None, filename_prefix: Optional[str]=None, push_to_hub: bool=False, **kwargs) -> Tuple[str]:
if os.path.isfile(save_directory):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop('commit_message', None)
repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[(- 1)])
repo_id = self._create_repo(repo_id, **kwargs)
files_timestamps = self._get_files_timestamps(save_directory)
special_tokens_map_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + SPECIAL_TOKENS_MAP_FILE))
tokenizer_config_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + TOKENIZER_CONFIG_FILE))
tokenizer_config = copy.deepcopy(self.init_kwargs)
target_keys = ['model_max_length', 'clean_up_tokenization_spaces']
for k in target_keys:
if hasattr(self, k):
tokenizer_config[k] = getattr(self, k)
if (len(self.init_inputs) > 0):
tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
def convert_added_tokens(obj: Union[(AddedToken, Any)], add_type_field=True):
if isinstance(obj, AddedToken):
out = obj.__getstate__()
if add_type_field:
out['__type'] = 'AddedToken'
return out
elif isinstance(obj, (list, tuple)):
return [convert_added_tokens(o, add_type_field=add_type_field) for o in obj]
elif isinstance(obj, dict):
return {k: convert_added_tokens(v, add_type_field=add_type_field) for (k, v) in obj.items()}
return obj
tokenizer_config = convert_added_tokens(tokenizer_config, add_type_field=True)
tokenizer_class = self.__class__.__name__
if (tokenizer_class.endswith('Fast') and (tokenizer_class != 'PreTrainedTokenizerFast')):
tokenizer_class = tokenizer_class[:(- 4)]
tokenizer_config['tokenizer_class'] = tokenizer_class
if (getattr(self, '_auto_map', None) is not None):
tokenizer_config['auto_map'] = self._auto_map
if (getattr(self, '_processor_class', None) is not None):
tokenizer_config['processor_class'] = self._processor_class
if (self._auto_class is not None):
custom_object_save(self, save_directory, config=tokenizer_config)
if ('name_or_path' in tokenizer_config):
tokenizer_config.pop('name_or_path')
with open(tokenizer_config_file, 'w', encoding='utf-8') as f:
out_str = (json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
f.write(out_str)
logger.info(f'tokenizer config file saved in {tokenizer_config_file}')
write_dict = convert_added_tokens(self.special_tokens_map_extended, add_type_field=False)
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
out_str = (json.dumps(write_dict, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
f.write(out_str)
logger.info(f'Special tokens file saved in {special_tokens_map_file}')
file_names = (tokenizer_config_file, special_tokens_map_file)
save_files = self._save_pretrained(save_directory=save_directory, file_names=file_names, legacy_format=legacy_format, filename_prefix=filename_prefix)
if push_to_hub:
self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('use_auth_token'))
return save_files
def _save_pretrained(self, save_directory: Union[(str, os.PathLike)], file_names: Tuple[str], legacy_format: Optional[bool]=None, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (legacy_format is False):
raise ValueError('Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format.')
save_directory = str(save_directory)
added_tokens_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + ADDED_TOKENS_FILE))
added_vocab = self.get_added_vocab()
if added_vocab:
with open(added_tokens_file, 'w', encoding='utf-8') as f:
out_str = (json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + '\n')
f.write(out_str)
logger.info(f'added tokens file saved in {added_tokens_file}')
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
return ((file_names + vocab_files) + (added_tokens_file,))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
raise NotImplementedError
def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> List[str]:
raise NotImplementedError
_end_docstrings(ENCODE_KWARGS_DOCSTRING, '\n **kwargs: Passed along to the `.tokenize()` method.\n ', '\n Returns:\n `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.\n ')
def encode(self, text: Union[(TextInput, PreTokenizedInput, EncodedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput, EncodedInput)]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> List[int]:
encoded_inputs = self.encode_plus(text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs)
return encoded_inputs['input_ids']
def num_special_tokens_to_add(self, pair: bool=False) -> int:
raise NotImplementedError
def _get_padding_truncation_strategies(self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs):
old_truncation_strategy = kwargs.pop('truncation_strategy', 'do_not_truncate')
old_pad_to_max_length = kwargs.pop('pad_to_max_length', False)
if ((max_length is not None) and (padding is False) and (truncation is None)):
if verbose:
if (not self.deprecation_warnings.get('Truncation-not-explicitly-activated', False)):
logger.warning("Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.")
self.deprecation_warnings['Truncation-not-explicitly-activated'] = True
truncation = 'longest_first'
if ((padding is False) and old_pad_to_max_length):
if verbose:
warnings.warn("The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).", FutureWarning)
if (max_length is None):
padding_strategy = PaddingStrategy.LONGEST
else:
padding_strategy = PaddingStrategy.MAX_LENGTH
elif (padding is not False):
if (padding is True):
if verbose:
if ((max_length is not None) and ((truncation is None) or (truncation is False) or (truncation == 'do_not_truncate'))):
warnings.warn("`max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.")
if (old_pad_to_max_length is not False):
warnings.warn('Though `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.')
padding_strategy = PaddingStrategy.LONGEST
elif (not isinstance(padding, PaddingStrategy)):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
if ((truncation is None) and (old_truncation_strategy != 'do_not_truncate')):
if verbose:
warnings.warn("The `truncation_strategy` argument is deprecated and will be removed in a future version, use `truncation=True` to truncate examples to a max length. You can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input size of the model (e.g. 512 for Bert). If you have pairs of inputs, you can give a specific truncation strategy selected among `truncation='only_first'` (will only truncate the first sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).", FutureWarning)
truncation_strategy = TruncationStrategy(old_truncation_strategy)
elif ((truncation is not False) and (truncation is not None)):
if (truncation is True):
truncation_strategy = TruncationStrategy.LONGEST_FIRST
elif (not isinstance(truncation, TruncationStrategy)):
truncation_strategy = TruncationStrategy(truncation)
elif isinstance(truncation, TruncationStrategy):
truncation_strategy = truncation
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
if (max_length is None):
if (padding_strategy == PaddingStrategy.MAX_LENGTH):
if (self.model_max_length > LARGE_INTEGER):
if verbose:
if (not self.deprecation_warnings.get('Asking-to-pad-to-max_length', False)):
logger.warning('Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no padding.')
self.deprecation_warnings['Asking-to-pad-to-max_length'] = True
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if (truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE):
if (self.model_max_length > LARGE_INTEGER):
if verbose:
if (not self.deprecation_warnings.get('Asking-to-truncate-to-max_length', False)):
logger.warning('Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.')
self.deprecation_warnings['Asking-to-truncate-to-max_length'] = True
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
if ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and ((not self.pad_token) or (self.pad_token_id < 0))):
raise ValueError("Asking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.")
if ((truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE) and (padding_strategy != PaddingStrategy.DO_NOT_PAD) and (pad_to_multiple_of is not None) and (max_length is not None) and ((max_length % pad_to_multiple_of) != 0)):
raise ValueError(f'Truncation and padding are both activated but truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of}).')
return (padding_strategy, truncation_strategy, max_length, kwargs)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(self, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, text_pair: Optional[Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]]=None, text_target: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, text_pair_target: Optional[Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
all_kwargs = {'add_special_tokens': add_special_tokens, 'padding': padding, 'truncation': truncation, 'max_length': max_length, 'stride': stride, 'is_split_into_words': is_split_into_words, 'pad_to_multiple_of': pad_to_multiple_of, 'return_tensors': return_tensors, 'return_token_type_ids': return_token_type_ids, 'return_attention_mask': return_attention_mask, 'return_overflowing_tokens': return_overflowing_tokens, 'return_special_tokens_mask': return_special_tokens_mask, 'return_offsets_mapping': return_offsets_mapping, 'return_length': return_length, 'verbose': verbose}
all_kwargs.update(kwargs)
if ((text is None) and (text_target is None)):
raise ValueError('You need to specify either `text` or `text_target`.')
if (text is not None):
if (not self._in_target_context_manager):
self._switch_to_input_mode()
encodings = self._call_one(text=text, text_pair=text_pair, **all_kwargs)
if (text_target is not None):
self._switch_to_target_mode()
target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **all_kwargs)
self._switch_to_input_mode()
if (text_target is None):
return encodings
elif (text is None):
return target_encodings
else:
encodings['labels'] = target_encodings['input_ids']
return encodings
def _call_one(self, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])], text_pair: Optional[Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
def _is_valid_text_input(t):
if isinstance(t, str):
return True
elif isinstance(t, (list, tuple)):
if (len(t) == 0):
return True
elif isinstance(t[0], str):
return True
elif isinstance(t[0], (list, tuple)):
return ((len(t[0]) == 0) or isinstance(t[0][0], str))
else:
return False
else:
return False
if (not _is_valid_text_input(text)):
raise ValueError('text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).')
if ((text_pair is not None) and (not _is_valid_text_input(text_pair))):
raise ValueError('text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).')
if is_split_into_words:
is_batched = (isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)))
else:
is_batched = isinstance(text, (list, tuple))
if is_batched:
if isinstance(text_pair, str):
raise TypeError('when tokenizing batches of text, `text_pair` must be a list or tuple with the same length as `text`.')
if ((text_pair is not None) and (len(text) != len(text_pair))):
raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')
batch_text_or_text_pairs = (list(zip(text, text_pair)) if (text_pair is not None) else text)
return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
else:
return self.encode_plus(text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(self, text: Union[(TextInput, PreTokenizedInput, EncodedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput, EncodedInput)]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._encode_plus(text=text, text_pair=text_pair, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _encode_plus(self, text: Union[(TextInput, PreTokenizedInput, EncodedInput)], text_pair: Optional[Union[(TextInput, PreTokenizedInput, EncodedInput)]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
raise NotImplementedError
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair])], add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair])], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:
raise NotImplementedError
def pad(self, encoded_inputs: Union[(BatchEncoding, List[BatchEncoding], Dict[(str, EncodedInput)], Dict[(str, List[EncodedInput])], List[Dict[(str, EncodedInput)]])], padding: Union[(bool, str, PaddingStrategy)]=True, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, verbose: bool=True) -> BatchEncoding:
if self.__class__.__name__.endswith('Fast'):
if (not self.deprecation_warnings.get('Asking-to-pad-a-fast-tokenizer', False)):
logger.warning_advice(f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.")
self.deprecation_warnings['Asking-to-pad-a-fast-tokenizer'] = True
if (isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping)):
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
if (self.model_input_names[0] not in encoded_inputs):
raise ValueError(f'You should supply an encoding or a list of encodings to this method that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}')
required_input = encoded_inputs[self.model_input_names[0]]
if ((required_input is None) or (isinstance(required_input, Sized) and (len(required_input) == 0))):
if return_attention_mask:
encoded_inputs['attention_mask'] = []
return encoded_inputs
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
for item in required_input:
if (len(item) != 0):
first_element = item[0]
break
if (not isinstance(first_element, (int, list, tuple))):
if is_tf_tensor(first_element):
return_tensors = ('tf' if (return_tensors is None) else return_tensors)
elif is_torch_tensor(first_element):
return_tensors = ('pt' if (return_tensors is None) else return_tensors)
elif isinstance(first_element, np.ndarray):
return_tensors = ('np' if (return_tensors is None) else return_tensors)
else:
raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, pytorch or tensorflow object.')
for (key, value) in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
(padding_strategy, _, max_length, _) = self._get_padding_truncation_strategies(padding=padding, max_length=max_length, verbose=verbose)
required_input = encoded_inputs[self.model_input_names[0]]
if (required_input and (not isinstance(required_input[0], (list, tuple)))):
encoded_inputs = self._pad(encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
assert all(((len(v) == batch_size) for v in encoded_inputs.values())), 'Some items in the output dictionary have a different batch size than others.'
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = max((len(inputs) for inputs in required_input))
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = {k: v[i] for (k, v) in encoded_inputs.items()}
outputs = self._pad(inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
for (key, value) in outputs.items():
if (key not in batch_outputs):
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (len(token_ids_0) * [0])
return (([0] * len(token_ids_0)) + ([1] * len(token_ids_1)))
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return token_ids_0
return (token_ids_0 + token_ids_1)
_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(self, ids: List[int], pair_ids: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, prepend_batch_axis: bool=False, **kwargs) -> BatchEncoding:
(padding_strategy, truncation_strategy, max_length, kwargs) = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)
pair = bool((pair_ids is not None))
len_ids = len(ids)
len_pair_ids = (len(pair_ids) if pair else 0)
if (return_token_type_ids and (not add_special_tokens)):
raise ValueError('Asking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.')
if (return_overflowing_tokens and (truncation_strategy == TruncationStrategy.LONGEST_FIRST) and (pair_ids is not None)):
raise ValueError('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.')
if (return_token_type_ids is None):
return_token_type_ids = ('token_type_ids' in self.model_input_names)
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
encoded_inputs = {}
total_len = ((len_ids + len_pair_ids) + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0))
overflowing_tokens = []
if ((truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE) and max_length and (total_len > max_length)):
(ids, pair_ids, overflowing_tokens) = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=(total_len - max_length), truncation_strategy=truncation_strategy, stride=stride)
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['num_truncated_tokens'] = (total_len - max_length)
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ((ids + pair_ids) if pair else ids)
token_type_ids = (([0] * len(ids)) + (([0] * len(pair_ids)) if pair else []))
encoded_inputs['input_ids'] = sequence
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs['special_tokens_mask'] = ([0] * len(sequence))
self._eventual_warn_about_too_long_sequence(encoded_inputs['input_ids'], max_length, verbose)
if ((padding_strategy != PaddingStrategy.DO_NOT_PAD) or return_attention_mask):
encoded_inputs = self.pad(encoded_inputs, max_length=max_length, padding=padding_strategy.value, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask)
if return_length:
encoded_inputs['length'] = len(encoded_inputs['input_ids'])
batch_outputs = BatchEncoding(encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis)
return batch_outputs
def truncate_sequences(self, ids: List[int], pair_ids: Optional[List[int]]=None, num_tokens_to_remove: int=0, truncation_strategy: Union[(str, TruncationStrategy)]='longest_first', stride: int=0) -> Tuple[(List[int], List[int], List[int])]:
if (num_tokens_to_remove <= 0):
return (ids, pair_ids, [])
if (not isinstance(truncation_strategy, TruncationStrategy)):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
if ((truncation_strategy == TruncationStrategy.ONLY_FIRST) or ((truncation_strategy == TruncationStrategy.LONGEST_FIRST) and (pair_ids is None))):
if (len(ids) > num_tokens_to_remove):
window_len = min(len(ids), (stride + num_tokens_to_remove))
if (self.truncation_side == 'left'):
overflowing_tokens = ids[:window_len]
ids = ids[num_tokens_to_remove:]
elif (self.truncation_side == 'right'):
overflowing_tokens = ids[(- window_len):]
ids = ids[:(- num_tokens_to_remove)]
else:
raise ValueError(f"invalid truncation strategy: {self.truncation_side}, use 'left' or 'right'.")
else:
error_msg = f'We need to remove {num_tokens_to_remove} to truncate the input but the first sequence has a length {len(ids)}. '
if (truncation_strategy == TruncationStrategy.ONLY_FIRST):
error_msg = (error_msg + f"Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_second'.")
logger.error(error_msg)
elif (truncation_strategy == TruncationStrategy.LONGEST_FIRST):
logger.warning(f"Be aware, overflowing tokens are not returned for the setting you have chosen, i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' truncation strategy. So the returned list will always be empty even if some tokens have been removed.")
for _ in range(num_tokens_to_remove):
if ((pair_ids is None) or (len(ids) > len(pair_ids))):
if (self.truncation_side == 'right'):
ids = ids[:(- 1)]
elif (self.truncation_side == 'left'):
ids = ids[1:]
else:
raise ValueError(('invalid truncation strategy:' + str(self.truncation_side)))
elif (self.truncation_side == 'right'):
pair_ids = pair_ids[:(- 1)]
elif (self.truncation_side == 'left'):
pair_ids = pair_ids[1:]
else:
raise ValueError(('invalid truncation strategy:' + str(self.truncation_side)))
elif ((truncation_strategy == TruncationStrategy.ONLY_SECOND) and (pair_ids is not None)):
if (len(pair_ids) > num_tokens_to_remove):
window_len = min(len(pair_ids), (stride + num_tokens_to_remove))
if (self.truncation_side == 'right'):
overflowing_tokens = pair_ids[(- window_len):]
pair_ids = pair_ids[:(- num_tokens_to_remove)]
elif (self.truncation_side == 'left'):
overflowing_tokens = pair_ids[:window_len]
pair_ids = pair_ids[num_tokens_to_remove:]
else:
raise ValueError(('invalid truncation strategy:' + str(self.truncation_side)))
else:
logger.error(f"We need to remove {num_tokens_to_remove} to truncate the input but the second sequence has a length {len(pair_ids)}. Please select another truncation strategy than {truncation_strategy}, for instance 'longest_first' or 'only_first'.")
return (ids, pair_ids, overflowing_tokens)
def _pad(self, encoded_inputs: Union[(Dict[(str, EncodedInput)], BatchEncoding)], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, return_attention_mask: Optional[bool]=None) -> dict:
if (return_attention_mask is None):
return_attention_mask = ('attention_mask' in self.model_input_names)
required_input = encoded_inputs[self.model_input_names[0]]
if (padding_strategy == PaddingStrategy.LONGEST):
max_length = len(required_input)
if ((max_length is not None) and (pad_to_multiple_of is not None) and ((max_length % pad_to_multiple_of) != 0)):
max_length = (((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of)
needs_to_be_padded = ((padding_strategy != PaddingStrategy.DO_NOT_PAD) and (len(required_input) != max_length))
if (return_attention_mask and ('attention_mask' not in encoded_inputs)):
encoded_inputs['attention_mask'] = ([1] * len(required_input))
if needs_to_be_padded:
difference = (max_length - len(required_input))
if (self.padding_side == 'right'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (encoded_inputs['attention_mask'] + ([0] * difference))
if ('token_type_ids' in encoded_inputs):
encoded_inputs['token_type_ids'] = (encoded_inputs['token_type_ids'] + ([self.pad_token_type_id] * difference))
if ('special_tokens_mask' in encoded_inputs):
encoded_inputs['special_tokens_mask'] = (encoded_inputs['special_tokens_mask'] + ([1] * difference))
encoded_inputs[self.model_input_names[0]] = (required_input + ([self.pad_token_id] * difference))
elif (self.padding_side == 'left'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (([0] * difference) + encoded_inputs['attention_mask'])
if ('token_type_ids' in encoded_inputs):
encoded_inputs['token_type_ids'] = (([self.pad_token_type_id] * difference) + encoded_inputs['token_type_ids'])
if ('special_tokens_mask' in encoded_inputs):
encoded_inputs['special_tokens_mask'] = (([1] * difference) + encoded_inputs['special_tokens_mask'])
encoded_inputs[self.model_input_names[0]] = (([self.pad_token_id] * difference) + required_input)
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
return encoded_inputs
def convert_tokens_to_string(self, tokens: List[str]) -> str:
raise NotImplementedError
def batch_decode(self, sequences: Union[(List[int], List[List[int]], 'np.ndarray', 'torch.Tensor', 'tf.Tensor')], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, **kwargs) -> List[str]:
return [self.decode(seq, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs) for seq in sequences]
def decode(self, token_ids: Union[(int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor')], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, **kwargs) -> str:
token_ids = to_py_obj(token_ids)
return self._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
def _decode(self, token_ids: Union[(int, List[int])], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=None, **kwargs) -> str:
raise NotImplementedError
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
assert (already_has_special_tokens and (token_ids_1 is None)), 'You cannot use ``already_has_special_tokens=False`` with this tokenizer. Please use a slow (full python) tokenizer to activate this argument. Or set `return_special_tokens_mask=True` when calling the encoding method to get the special tokens mask in any tokenizer. '
all_special_ids = self.all_special_ids
special_tokens_mask = [(1 if (token in all_special_ids) else 0) for token in token_ids_0]
return special_tokens_mask
def clean_up_tokenization(out_string: str) -> str:
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
def _eventual_warn_about_too_long_sequence(self, ids: List[int], max_length: Optional[int], verbose: bool):
if ((max_length is None) and (len(ids) > self.model_max_length) and verbose):
if (not self.deprecation_warnings.get('sequence-length-is-longer-than-the-specified-maximum', False)):
logger.warning(f'Token indices sequence length is longer than the specified maximum sequence length for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model will result in indexing errors')
self.deprecation_warnings['sequence-length-is-longer-than-the-specified-maximum'] = True
def _switch_to_input_mode(self):
pass
def _switch_to_target_mode(self):
pass
def as_target_tokenizer(self):
warnings.warn('`as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.')
self._switch_to_target_mode()
self._in_target_context_manager = True
(yield)
self._in_target_context_manager = False
self._switch_to_input_mode()
def register_for_auto_class(cls, auto_class='AutoTokenizer'):
if (not isinstance(auto_class, str)):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if (not hasattr(auto_module, auto_class)):
raise ValueError(f'{auto_class} is not a valid auto class.')
cls._auto_class = auto_class
def prepare_seq2seq_batch(self, src_texts: List[str], tgt_texts: Optional[List[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, padding: str='longest', return_tensors: str=None, truncation: bool=True, **kwargs) -> BatchEncoding:
formatted_warning = '\n`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular\n`__call__` method to prepare your inputs and targets.\n\nHere is a short example:\n\nmodel_inputs = tokenizer(src_texts, text_target=tgt_texts, ...)\n\nIf you either need to use different keyword arguments for the source and target texts, you should do two calls like\nthis:\n\nmodel_inputs = tokenizer(src_texts, ...)\nlabels = tokenizer(text_target=tgt_texts, ...)\nmodel_inputs["labels"] = labels["input_ids"]\n\nSee the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.\nFor a more complete example, see the implementation of `prepare_seq2seq_batch`.\n'
warnings.warn(formatted_warning, FutureWarning)
kwargs.pop('src_lang', None)
kwargs.pop('tgt_lang', None)
if (max_length is None):
max_length = self.model_max_length
model_inputs = self(src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs)
if (tgt_texts is None):
return model_inputs
if (max_target_length is None):
max_target_length = max_length
with self.as_target_tokenizer():
labels = self(tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs)
model_inputs['labels'] = labels['input_ids']
return model_inputs |
def pose_net(image, name):
with tf.variable_scope(name) as scope:
is_BN = False
pose_conv1 = conv2d(image, 512, 3, 1, relu=True, bn=is_BN, name='pose_conv1')
pose_conv2 = conv2d(pose_conv1, 512, 3, 1, relu=True, bn=is_BN, name='pose_conv2')
pose_conv3 = conv2d(pose_conv2, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv3')
pose_conv4 = conv2d(pose_conv3, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv4')
pose_conv5 = conv2d(pose_conv4, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv5')
pose_conv6 = conv2d(pose_conv5, 256, 3, 1, relu=True, bn=is_BN, name='pose_conv6')
pose_conv7 = conv2d(pose_conv6, 512, 1, 1, relu=True, bn=is_BN, name='pose_conv7')
pose_conv8 = conv2d(pose_conv7, 16, 1, 1, relu=False, bn=is_BN, name='pose_conv8')
return (pose_conv8, pose_conv6) |
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
(sorted_sequence_lengths, permutation_index) = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = Variable(torch.arange(0, len(sequence_lengths)).long()).cuda()
(_, reverse_mapping) = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return (sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index) |
class Policy():
def get_priority(self, now: float, seq_group: SequenceGroup) -> float:
invalidInputError(False, 'base class not implemented')
def sort_by_priority(self, now: float, seq_groups: List[SequenceGroup]) -> List[SequenceGroup]:
return sorted(seq_groups, key=(lambda seq_group: self.get_priority(now, seq_group)), reverse=True) |
def get_config(_):
agent = sprite.Sprite(x=0.5, y=0.5, shape='circle', scale=0.04, c0=0.33, c1=1.0, c2=0.66)
annulus_vertices = shapes.annulus_vertices(inner_radius=0.08, outer_radius=0.3)
agent_annulus = sprite.Sprite(x=0.5, y=0.5, shape=annulus_vertices, scale=1.0, c0=0.6, c1=1.0, c2=1.0)
max_predator_vel = 0.02
predator_pos = _get_boundary_pos_distribution(_FIELD_BUFFER)
predator_vel = _get_vel_distribution((0.5 * max_predator_vel), max_predator_vel)
predator_factors = distribs.Product([predator_pos, predator_vel, distribs.Continuous('scale', 0.07, 0.13)], shape='circle', c0=0.0, c1=1.0, c2=0.8)
max_prey_vel = 0.01
prey_pos = _get_boundary_pos_distribution(_FIELD_BUFFER)
prey_vel = _get_vel_distribution((0.5 * max_prey_vel), max_prey_vel)
prey_factors = distribs.Product([prey_pos, prey_vel, distribs.Continuous('scale', 0.07, 0.13)], shape='circle', c0=0.2, c1=1.0, c2=1.0)
grid = shapes.grid_lines(grid_x=_GRID_SIZE, grid_y=_GRID_SIZE, buffer_border=1.0, c0=0.0, c1=0.0, c2=0.5)
def state_initializer():
state = collections.OrderedDict([('grid', grid), ('prey', []), ('agent', [agent]), ('predators', []), ('agent_annulus', [agent_annulus])])
return state
agent_friction_force = physics_lib.Drag(coeff_friction=0.25)
physics = physics_lib.Physics((agent_friction_force, ['agent', 'agent_annulus']), updates_per_env_step=10)
def _predator_reward_fn(_, predator_sprite):
return ((- 2.0) * predator_sprite.scale)
predator_task = tasks.ContactReward(reward_fn=_predator_reward_fn, layers_0='agent', layers_1='predators', reset_steps_after_contact=0)
def _prey_reward_fn(_, prey_sprite):
return prey_sprite.scale
prey_task = tasks.ContactReward(reward_fn=_prey_reward_fn, layers_0='agent', layers_1='prey')
task = tasks.CompositeTask(predator_task, prey_task)
action_space = action_spaces.Joystick(scaling_factor=0.003, action_layers=('agent', 'agent_annulus'), constrained_lr=False)
_polygon_modifier = observers.polygon_modifiers.FirstPersonAgent(agent_layer='agent')
observer = observers.PILRenderer(image_size=(64, 64), anti_aliasing=1, color_to_rgb='hsv_to_rgb', polygon_modifier=_polygon_modifier)
predator_appear_generator = sprite_generators.generate_sprites(predator_factors, num_sprites=1)
predator_appear = game_rules.ConditionalRule(condition=(lambda state: np.random.binomial(1, p=0.5)), rules=game_rules.CreateSprites('predators', predator_appear_generator))
prey_appear_generator = sprite_generators.generate_sprites(prey_factors, num_sprites=1)
prey_appear = game_rules.ConditionalRule(condition=(lambda state: np.random.binomial(1, p=0.2)), rules=game_rules.CreateSprites('prey', prey_appear_generator))
vanish_range = [((- 1.0) * _VANISH_DIST), (1.0 + _VANISH_DIST)]
def _should_vanish(s):
pos_too_small = ((s.position < vanish_range[0]) * (s.velocity < 0.0))
pos_too_large = ((s.position > vanish_range[1]) * (s.velocity > 0.0))
return (any(pos_too_small) or any(pos_too_large))
predator_vanish = game_rules.VanishByFilter('predators', _should_vanish)
prey_vanish = game_rules.VanishByFilter('prey', _should_vanish)
keep_near_center = game_rules.KeepNearCenter(agent_layer='agent', layers_to_center=['agent_annulus', 'predators', 'prey'], grid_x=_GRID_SIZE)
prey_caught = game_rules.VanishOnContact(vanishing_layer='prey', contacting_layer='agent')
rules = (predator_appear, prey_appear, prey_vanish, predator_vanish, keep_near_center, prey_caught)
config = {'state_initializer': state_initializer, 'physics': physics, 'task': task, 'action_space': action_space, 'observers': {'image': observer}, 'game_rules': rules}
return config |
def main(opts):
n2bb = _compute_all_nbb(opts.img_dir, opts.conf_th, opts.max_bb, opts.min_bb, opts.nproc)
with open(f'{opts.img_dir}/nbb_th{opts.conf_th}_max{opts.max_bb}_min{opts.min_bb}.json', 'w') as f:
json.dump(n2bb, f)
corrupts = [f for (f, n) in n2bb.items() if (n is None)]
if corrupts:
with open(f'{opts.img_dir}/corrupted.json', 'w') as f:
json.dump(corrupts, f, indent=4) |
class BaseEnv(gym.Env):
def __init__(self, config: EnvContext):
super().__init__()
self.record = config.get('record', False)
self.replay_suffix = config.get('replay_suffix', '')
self.print_log = config.get('detailed_log', False)
self.seed(config['random_seed'])
self.server_port = (BASE_WORKER_PORT + config.worker_index)
print(f'>>> New instance {self} on port: {self.server_port}')
print(f'Worker Index: {config.worker_index}, VecEnv Index: {config.vector_index}')
self.game = Game(map_dir=config['map_dir'], engine_dir=config['engine_dir'], server_port=self.server_port)
self.game.set_map_id(config['map_id'])
self.game.set_episode_timeout(config['timeout'])
self.game.set_random_seed(config['random_seed'])
self.start_location = config.get('start_location', [0, 0, 0])
def reset(self):
print('Reset for a new game ...')
self._reset_game_config()
if self.record:
self.game.turn_on_record()
else:
self.game.turn_off_record()
self.game.set_game_replay_suffix(self.replay_suffix)
self.game.new_episode()
self.state = self.game.get_state()
self.running_steps = 0
return self._get_obs()
def close(self):
self.game.close()
return super().close()
def render(self, mode='replay'):
return None
def _reset_game_config(self):
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError() |
('mmdet.apis.single_gpu_test', MagicMock)
('mmdet.apis.multi_gpu_test', MagicMock)
.parametrize('EvalHookParam', (EvalHook, DistEvalHook))
def test_evaluation_hook(EvalHookParam):
dataloader = DataLoader(torch.ones((5, 2)))
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=(- 1))
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=(- 1))
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 1)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
with pytest.warns(UserWarning):
evalhook = EvalHookParam(dataloader, start=(- 2))
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 2
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 1
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2) |
def _fixed_padding(kernel_size, dilation):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (dilation - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
return [pad_beg, pad_end, pad_beg, pad_end] |
class VanStage(nn.Module):
def __init__(self, config: VanConfig, in_channels: int, hidden_size: int, patch_size: int, stride: int, depth: int, mlp_ratio: int=4, drop_path_rate: float=0.0):
super().__init__()
self.embeddings = VanOverlappingPatchEmbedder(in_channels, hidden_size, patch_size, stride)
self.layers = nn.Sequential(*[VanLayer(config, hidden_size, mlp_ratio=mlp_ratio, drop_path_rate=drop_path_rate) for _ in range(depth)])
self.normalization = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.embeddings(hidden_state)
hidden_state = self.layers(hidden_state)
(batch_size, hidden_size, height, width) = hidden_state.shape
hidden_state = hidden_state.flatten(2).transpose(1, 2)
hidden_state = self.normalization(hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, hidden_size).permute(0, 3, 1, 2)
return hidden_state |
class TestNetSpec(unittest.TestCase):
def load_net(self, net_proto):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write(str(net_proto))
f.close()
return caffe.Net(f.name, caffe.TEST)
def test_lenet(self):
net_proto = lenet(50)
self.assertEqual(net_proto.layer[6].bottom, net_proto.layer[6].top)
net = self.load_net(net_proto)
self.assertEqual(len(net.layers), 9)
net_proto = anon_lenet(50)
self.assertEqual(net_proto.layer[6].bottom, net_proto.layer[6].top)
net = self.load_net(net_proto)
self.assertEqual(len(net.layers), 9)
def test_zero_tops(self):
net_proto = silent_net()
net = self.load_net(net_proto)
self.assertEqual(len(net.forward()), 0)
def test_type_error(self):
data = L.DummyData(ntop=2)
r = "^Silence input 0 is not a Top \\(type is <(type|class) 'tuple'>\\)$"
with self.assertRaisesRegexp(TypeError, r):
L.Silence(data, ntop=0)
L.Silence(*data, ntop=0) |
def autocrop(inputs, cropping):
if (cropping is None):
return inputs
else:
ndim = inputs[0].ndim
if (not all(((input.ndim == ndim) for input in inputs))):
raise ValueError('Not all inputs are of the same dimensionality. Got {0} inputs of dimensionalities {1}.'.format(len(inputs), [input.ndim for input in inputs]))
shapes = [input.shape for input in inputs]
shapes_tensor = T.as_tensor_variable(shapes)
min_shape = T.min(shapes_tensor, axis=0)
slices_by_input = [[] for i in range(len(inputs))]
cropping = list(cropping)
if (ndim > len(cropping)):
cropping = (list(cropping) + ([None] * (ndim - len(cropping))))
for (dim, cr) in enumerate(cropping):
if (cr is None):
slice_all = slice(None)
for slices in slices_by_input:
slices.append(slice_all)
else:
sz = min_shape[dim]
if (cr == 'lower'):
slc_lower = slice(None, sz)
for slices in slices_by_input:
slices.append(slc_lower)
elif (cr == 'upper'):
slc_upper = slice((- sz), None)
for slices in slices_by_input:
slices.append(slc_upper)
elif (cr == 'center'):
for (sh, slices) in zip(shapes, slices_by_input):
offset = ((sh[dim] - sz) // 2)
slices.append(slice(offset, (offset + sz)))
else:
raise ValueError("Unknown crop mode '{0}'".format(cr))
return [input[slices] for (input, slices) in zip(inputs, slices_by_input)] |
class AdaptiveBasicBlock(nn.Module):
expansion = 1
def __init__(self, bottleneck_settings, stride=1, downsample=None):
super(AdaptiveBasicBlock, self).__init__()
(conv1_in_ch, conv1_out_ch) = bottleneck_settings['conv1']
self.conv1 = conv3x3(conv1_in_ch, conv1_out_ch, stride)
self.bn1 = nn.BatchNorm2d(conv1_out_ch)
(conv2_in_ch, conv2_out_ch) = bottleneck_settings['conv2']
self.conv2 = conv3x3(conv2_in_ch, conv2_out_ch)
self.bn2 = nn.BatchNorm2d(conv2_out_ch)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class FScoreQuantity(MonitoredQuantity):
def __init__(self, average='macro', threshold=0.5, **kwargs):
self.average = average
self.threshold = threshold
super(FScoreQuantity, self).__init__(**kwargs)
def initialize(self):
(self.total_f_score, self.examples_seen) = (0.0, 0)
def aggregate(self, y, y_hat):
self.total_f_score += metrics.f1_score(y, (y_hat > self.threshold), average=self.average)
self.examples_seen += 1
def get_aggregated_value(self):
res = (self.total_f_score / self.examples_seen)
return res |
class OnnxStableDiffusionInpaintPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers', 'onnx']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers', 'onnx'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers', 'onnx'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers', 'onnx']) |
class Trainer(TrainerBase):
def __init__(self, args, train_loader=None, val_loader=None, test_loader=None, train=True):
super().__init__(args, train_loader=train_loader, val_loader=val_loader, test_loader=test_loader, train=train)
from gqa_model import VLT5GQA, VLBartGQA
model_kwargs = {}
if ('t5' in args.backbone):
model_class = VLT5GQA
elif ('bart' in args.backbone):
model_class = VLBartGQA
config = self.create_config()
self.tokenizer = self.create_tokenizer()
if ('bart' in self.args.tokenizer):
num_added_toks = 0
if config.use_vis_order_embedding:
additional_special_tokens = ([f'<extra_id_{i}>' for i in range((100 - 1), (- 1), (- 1))] + [f'<vis_extra_id_{i}>' for i in range((100 - 1), (- 1), (- 1))])
special_tokens_dict = {'additional_special_tokens': additional_special_tokens}
num_added_toks = self.tokenizer.add_special_tokens(special_tokens_dict)
config.default_obj_order_ids = self.tokenizer.convert_tokens_to_ids([f'<vis_extra_id_{i}>' for i in range(100)])
self.model = self.create_model(model_class, config, **model_kwargs)
if ('t5' in self.args.tokenizer):
self.model.resize_token_embeddings(self.tokenizer.vocab_size)
elif ('bart' in self.args.tokenizer):
self.model.resize_token_embeddings((self.model.model.shared.num_embeddings + num_added_toks))
self.model.tokenizer = self.tokenizer
self.start_epoch = None
if (args.load is not None):
ckpt_path = (args.load + '.pth')
self.load_checkpoint(ckpt_path)
if self.args.from_scratch:
self.init_weights()
print(f'Model Launching at GPU {self.args.gpu}')
if self.verbose:
from time import time
start = time()
self.model = self.model.to(args.gpu)
if train:
(self.optim, self.lr_scheduler) = self.create_optimizer_and_scheduler()
if (self.args.fp16 and _use_native_amp):
self.scaler = torch.cuda.amp.GradScaler()
elif _use_apex:
(self.model, self.optim) = amp.initialize(self.model, self.optim, opt_level='O1', verbosity=self.verbose)
if args.multiGPU:
if args.distributed:
self.model = DDP(self.model, device_ids=[args.gpu], find_unused_parameters=True)
if self.verbose:
print(f'It took {(time() - start):.1f}s')
def train(self):
if self.verbose:
loss_meter = LossMeter()
best_valid = 0.0
best_epoch = 0
if ('t5' in self.args.backbone):
if self.args.use_vision:
project_name = 'VLT5_GQA'
else:
project_name = 'T5_GQA'
elif ('bart' in self.args.backbone):
if self.args.use_vision:
project_name = 'VLBart_GQA'
else:
project_name = 'Bart_GQA'
wandb.init(project=project_name)
wandb.run.name = self.args.run_name
wandb.config.update(self.args)
wandb.watch(self.model)
src_dir = Path(__file__).resolve().parent
base_path = str(src_dir.parent)
src_dir = str(src_dir)
wandb.save(os.path.join((src_dir + '/*.py')), base_path=base_path)
if self.args.distributed:
dist.barrier()
global_step = 0
for epoch in range(self.args.epochs):
if (self.start_epoch is not None):
epoch += self.start_epoch
self.model.train()
if self.args.distributed:
self.train_loader.sampler.set_epoch(epoch)
if self.verbose:
pbar = tqdm(total=len(self.train_loader), ncols=120)
epoch_results = {'loss': 0.0}
quesid2ans = {}
train_acc = 0.0
train_acc_steps = int((len(self.train_loader) * 0.05))
last_acc_step = 0
for (step_i, batch) in enumerate(self.train_loader):
if (self.args.fp16 and _use_native_amp):
with autocast():
if self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
elif self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
loss = results['loss']
if (self.args.fp16 and _use_native_amp):
self.scaler.scale(loss).backward()
elif (self.args.fp16 and _use_apex):
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
loss = loss.detach()
if (self.args.clip_grad_norm > 0):
if (self.args.fp16 and _use_native_amp):
self.scaler.unscale_(self.optim)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
elif (self.args.fp16 and _use_apex):
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim), self.args.clip_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
if (self.args.fp16 and _use_native_amp):
self.scaler.step(self.optim)
self.scaler.update()
else:
self.optim.step()
if self.lr_scheduler:
self.lr_scheduler.step()
for param in self.model.parameters():
param.grad = None
global_step += 1
for (k, v) in results.items():
if (k in epoch_results):
epoch_results[k] += v.item()
if self.lr_scheduler:
if (version.parse(torch.__version__) >= version.parse('1.4')):
lr = self.lr_scheduler.get_last_lr()[0]
else:
lr = self.lr_scheduler.get_lr()[0]
else:
try:
lr = self.optim.get_lr()[0]
except AttributeError:
lr = self.args.lr
if self.verbose:
loss_meter.update(loss.item())
desc_str = f'Epoch {epoch} | LR {lr:.6f}'
desc_str += f' | Loss {loss_meter.val:4f}'
pbar.set_description(desc_str)
pbar.update(1)
if self.args.distributed:
dist.barrier()
if self.verbose:
pbar.close()
log_str = ''
valid_score = (self.evaluate(self.val_loader) * 100.0)
if (valid_score > best_valid):
best_valid = valid_score
best_epoch = epoch
self.save('BEST')
log_str += ('\nEpoch %d: Testdev %0.2f' % (epoch, valid_score))
log_str += ('\nEpoch %d: Best %0.2f\n' % (best_epoch, best_valid))
wandb_log_dict = {}
wandb_log_dict['Train/Loss'] = (epoch_results['loss'] / len(self.train_loader))
wandb_log_dict['Testdev/score'] = valid_score
wandb.log(wandb_log_dict, step=epoch)
print(log_str)
if self.args.distributed:
dist.barrier()
if self.verbose:
self.save('LAST')
best_path = os.path.join(self.args.output, 'BEST')
self.load(best_path)
dump_path = os.path.join(self.args.output, 'submit.json')
self.predict(self.test_loader, dump_path=dump_path)
wandb.save(dump_path, base_path=self.args.output)
wandb.log({'finished': True})
if self.args.distributed:
dist.barrier()
exit()
def predict(self, loader, dump_path=None):
self.model.eval()
with torch.no_grad():
quesid2ans = {}
gen_kwargs = {}
if (self.args.num_beams > 1):
gen_kwargs['num_beams'] = self.args.num_beams
if self.verbose:
pbar = tqdm(total=len(loader), ncols=120, desc='Prediction')
for (i, batch) in enumerate(loader):
if self.args.distributed:
results = self.model.module.test_step(batch, **gen_kwargs)
else:
results = self.model.test_step(batch, **gen_kwargs)
pred_ans = results['pred_ans']
ques_ids = batch['question_ids']
for (qid, ans) in zip(ques_ids, pred_ans):
quesid2ans[qid] = ans
if self.verbose:
pbar.update(1)
if (dump_path is not None):
print('\nsave dump at', dump_path)
loader.evaluator.dump_result(quesid2ans, dump_path)
return quesid2ans
def evaluate(self, loader, dump_path=None):
evaluator = loader.evaluator
quesid2ans = self.predict(loader, dump_path)
return evaluator.evaluate(quesid2ans) |
def extra_bitex(ted_data_path, lsrc_lang, ltrg_lang, target_token, output_data_path):
def get_ted_lang(lang):
long_langs = ['pt-br', 'zh-cn', 'zh-tw', 'fr-ca']
if (lang[:5] in long_langs):
return lang[:5]
elif (lang[:4] == 'calv'):
return lang[:5]
elif (lang in ['pt_BR', 'zh_CN', 'zh_TW', 'fr_CA']):
return lang.lower().replace('_', '-')
return lang[:2]
src_lang = get_ted_lang(lsrc_lang)
trg_lang = get_ted_lang(ltrg_lang)
train_lang_dict = {'source': [src_lang], 'target': [trg_lang]}
eval_lang_dict = {'source': [src_lang], 'target': [trg_lang]}
obj = MultiLingualAlignedCorpusReader(corpus_path=ted_data_path, lang_dict=train_lang_dict, target_token=target_token, corpus_type='file', eval_lang_dict=eval_lang_dict, zero_shot=False, bilingual=True)
os.makedirs(output_data_path, exist_ok=True)
lsrc_lang = lsrc_lang.replace('-', '_')
ltrg_lang = ltrg_lang.replace('-', '_')
obj.save_file((output_data_path + f'/train.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}'), split_type='train', data_type='source', lang=src_lang)
obj.save_file((output_data_path + f'/train.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}'), split_type='train', data_type='target', lang=trg_lang)
obj.save_file((output_data_path + f'/test.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}'), split_type='test', data_type='source', lang=src_lang)
obj.save_file((output_data_path + f'/test.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}'), split_type='test', data_type='target', lang=trg_lang)
obj.save_file((output_data_path + f'/valid.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}'), split_type='dev', data_type='source', lang=src_lang)
obj.save_file((output_data_path + f'/valid.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}'), split_type='dev', data_type='target', lang=trg_lang) |
class VideoDiffFramesDataset_FullBGID(Dataset):
def __init__(self, datapath, idspath, img_size, num_frames, limit):
super().__init__()
self.limit = limit
self.boarden = 0.4
self.lower_bound = max(0, (self.limit - self.boarden))
self.upper_bound = min(1, (self.limit + self.boarden))
self.img_size = img_size
self.json_path = idspath
self.frame_path = datapath
self.num_frames = num_frames
self.video_ids = json.load(open(self.json_path, 'r'))
self.transform = transforms.Compose([transforms.Resize((img_size, img_size), interpolation=PIL.Image.BILINEAR), transforms.ToTensor()])
logger = get_logger()
logger.info(f'{len(self.video_ids)} videos from datapath {datapath}, img_size: {img_size}, num_frames: {num_frames}')
def __len__(self):
return len(self.video_ids)
def skip_sample(self, ind):
if (ind >= (self.__len__() - 1)):
return self.__getitem__(0)
return self.__getitem__((ind + 1))
def __getitem__(self, index):
video_id = self.video_ids[index]
cur_path = os.path.join(self.frame_path, video_id)
files = sorted(os.listdir(cur_path))
if (len(files) < (self.num_frames + 2)):
return self.skip_sample(index)
elif (len(files) == (self.num_frames + 2)):
start = 0
else:
start = np.random.choice(range(((len(files) - self.num_frames) - 2)))
pre_img = get_img_from_path(os.path.join(cur_path, files[start]), transform=self.transform)
nxt_img = get_img_from_path(os.path.join(cur_path, files[(start + 1)]), transform=self.transform)
(imgs, imgs_bg, imgs_id, imgs_mo) = ([], [], [], [])
for file in files[(start + 2):((start + 2) + self.num_frames)]:
cur_img = nxt_img
nxt_img = get_img_from_path(os.path.join(cur_path, file), transform=self.transform)
cur_diff = (((cur_img * 2) - pre_img) - nxt_img)
max_diff = torch.max(torch.abs(cur_diff), dim=0)[0]
id_mask = ((max_diff >= self.lower_bound) * (max_diff <= self.upper_bound))
img_id = cur_img
img_bg = cur_img
imgs.append(cur_img.unsqueeze(0))
imgs_bg.append(img_bg.unsqueeze(0))
imgs_id.append(img_id.unsqueeze(0))
imgs_mo.append(cur_diff.unsqueeze(0))
ret_img = torch.cat(imgs, dim=0)
ret_img_bg = torch.cat(imgs_bg, dim=0)
ret_img_id = torch.cat(imgs_id, dim=0)
ret_img_mo = torch.cat(imgs_mo, dim=0)
return [ret_img, ret_img_bg, ret_img_id, ret_img_mo] |
def configure_model(model, eps, momentum, reset_stats, no_stats):
for m in model.modules():
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)):
m.train()
m.eps = eps
m.momentum = momentum
if reset_stats:
m.reset_running_stats()
if no_stats:
m.track_running_stats = False
m.running_mean = None
m.running_var = None
return model |
def merge_registries(a, b):
for i in b:
a[i] = (merge_lists(a[i], b[i]) if (i in a) else b[i])
return a |
class RSU7(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv6d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv5d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv4d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return (hx1d + hxin) |
class LayoutLMv2TokenizerFast(metaclass=DummyObject):
_backends = ['tokenizers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tokenizers']) |
def parse_args():
parser = argparse.ArgumentParser(description='Gather benchmarked models metric')
parser.add_argument('root', type=str, help='root path of benchmarked models to be gathered')
parser.add_argument('txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument('--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument('--not-show', action='store_true', help='not show metrics')
parser.add_argument('--excel', type=str, help='input path of excel to be recorded')
parser.add_argument('--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args |
class Sum(_Reduce):
def __init__(self, dim, keepdim=False):
super().__init__(dim, keepdim, 'sum')
def from_onnx(parameters=None, attributes=None):
if (attributes is None):
attributes = {}
keepdim = _identify_bool_attributes_with_defaults(attributes, 'keepdims', 1)
return Sum(attributes['axes'], keepdim) |
def ignore_mkt_data_buffer_decorator(func):
def wrapper_mkt_data_buffer_decorator(self, raw_state):
raw_state_copy = deepcopy(raw_state)
for i in range(len(raw_state)):
raw_state[i]['parsed_mkt_data'] = raw_state_copy[i]['parsed_mkt_data'][(- 1)]
raw_state[i]['parsed_volume_data'] = raw_state_copy[i]['parsed_volume_data'][(- 1)]
raw_state2 = list_dict_flip(raw_state)
flipped = dict(((k, list_dict_flip(v)) for (k, v) in raw_state2.items()))
return func(self, flipped)
return wrapper_mkt_data_buffer_decorator |
def build_model2(X_train, y_train, X_valid, y_valid, max_len, max_features, embed_size, embedding_matrix, lr=0.0, lr_d=0.0, spatial_dr=0.0, dense_units=128, conv_size=128, dr=0.2, patience=3, fold_id=1):
file_path = f'best_model_fold_{fold_id}.hdf5'
check_point = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_acc', mode='max', patience=patience)
model = Sequential()
model.add(Embedding((max_features + 1), (embed_size * 2), input_length=max_len, weights=[embedding_matrix], trainable=False))
model.add(Conv1D(200, 10, activation='relu'))
model.add(MaxPooling1D(pool_size=5))
model.add(LSTM(100))
model.add(Dense(50, activation='relu'))
model.add(Dropout(rate=0.35))
model.add(Dense(1, activation='sigmoid'))
model2 = model
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr, decay=lr_d), metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=16, epochs=10, validation_data=(X_valid, y_valid), verbose=1, callbacks=[early_stop, check_point])
model2.load_weights(file_path)
model2.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr, decay=lr_d), metrics=['accuracy'])
return model2 |
class PRNet_PAF_Vis_Shape(nn.Module):
def __init__(self, in_channels=3, out_channels=3, kernal_size_paf=3):
super().__init__()
size = 16
self.mask_conv = nn.Sequential(*padding_same_conv2d(256, 1, in_channels, kernel_size=3, stride=1), nn.BatchNorm2d(in_channels, eps=0.001, momentum=0.001), nn.ReLU(inplace=True))
self.paf_conv = nn.Sequential(*padding_same_conv2d((256 * kernal_size_paf), in_channels, in_channels, kernel_size=kernal_size_paf, stride=kernal_size_paf))
self.input_conv = nn.Sequential(*padding_same_conv2d(256, (in_channels + 3), size, kernel_size=4, stride=1), nn.BatchNorm2d(size, eps=0.001, momentum=0.001), nn.ReLU(inplace=True))
self.down_conv_1 = resBlock(size, (size * 2), kernel_size=4, stride=2, input_size=256)
self.down_conv_2 = resBlock((size * 2), (size * 2), kernel_size=4, stride=1, input_size=128)
self.down_conv_3 = resBlock((size * 2), (size * 4), kernel_size=4, stride=2, input_size=128)
self.down_conv_4 = resBlock((size * 4), (size * 4), kernel_size=4, stride=1, input_size=64)
self.down_conv_5 = resBlock((size * 4), (size * 8), kernel_size=4, stride=2, input_size=64)
self.down_conv_6 = resBlock((size * 8), (size * 8), kernel_size=4, stride=1, input_size=32)
self.down_conv_7 = resBlock((size * 8), (size * 16), kernel_size=4, stride=2, input_size=32)
self.down_conv_8 = resBlock((size * 16), (size * 16), kernel_size=4, stride=1, input_size=16)
self.down_conv_9 = resBlock((size * 16), (size * 32), kernel_size=4, stride=2, input_size=16)
self.down_conv_10 = resBlock((size * 32), (size * 32), kernel_size=4, stride=1, input_size=8)
self.center_conv = nn.Sequential(nn.ConstantPad2d((2, 1, 2, 1), 0), nn.ConvTranspose2d((size * 32), (size * 32), kernel_size=4, stride=1, padding=3, bias=False), nn.BatchNorm2d((size * 32), eps=0.001, momentum=0.001), nn.ReLU(inplace=True))
self.up_conv_5 = upBlock((size * 32), (size * 16))
self.up_conv_4 = upBlock((size * 16), (size * 8))
self.up_conv_3 = upBlock((size * 8), (size * 4))
self.up_conv_2 = upBlock((size * 4), (size * 2), 1)
self.up_conv_1 = upBlock((size * 2), size, 1)
self.output_conv = nn.Sequential(nn.Conv2d(size, out_channels, 1, stride=1, padding=0))
def forward(self, PAF, mask, shapemap):
PAF = self.paf_conv(PAF)
mask = self.mask_conv(mask)
PAF = (PAF * mask)
x = torch.cat([PAF, shapemap], 1)
x = self.input_conv(x)
x = self.down_conv_1(x)
x = self.down_conv_2(x)
x = self.down_conv_3(x)
x = self.down_conv_4(x)
x = self.down_conv_5(x)
x = self.down_conv_6(x)
x = self.down_conv_7(x)
x = self.down_conv_8(x)
x = self.down_conv_9(x)
x = self.down_conv_10(x)
x = self.center_conv(x)
x = self.up_conv_5(x)
x = self.up_conv_4(x)
x = self.up_conv_3(x)
x = self.up_conv_2(x)
x = self.up_conv_1(x)
x = self.output_conv(x)
return x |
def ldcnn(bands=60, frames=31, n_classes=10, filters=80, L=57, W=6, fully_connected=5000, dropout=0.25):
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Input, Concatenate
from keras.regularizers import l2
import keras.layers
input_shape = (bands, frames, 1)
def head(input, name):
return ldcnn_head(input, name, filters, L, W)
mel_input = Input(shape=input_shape, name='mel_input')
delta_input = Input(shape=input_shape, name='delta_input')
heads = [head(mel_input, 'mel'), head(delta_input, 'delta')]
m = keras.layers.add(heads, name='FSL')
m = Dropout(dropout)(m)
m = Dense(fully_connected, activation='relu', kernel_regularizer=l2(0.001), name='FCL')(m)
m = Dropout(dropout)(m)
m = Dense(n_classes, activation='softmax')(m)
model = Model([mel_input, delta_input], m)
return model |
(before=[init], after=[post])
def con_train_e2e_test():
USR.set('dataset', 'data/e2e_aligned/')
USR.set('decoder', 'crf')
USR.set('L', '8')
USR.set('layers', '2')
USR.set('min_epochs', '8')
USR.set('posterior_reg', '1')
command = ('%(S_python_itrptr)s %(S_python_dir)s/train.py --data %(U_dataset)s --load /n/holylfs/LABS/rush_lab/users/lisa/FSA-RNN/jobs/con_train_e2e-bert1/model/{config} --save_out %(S_output)s/{config}-test --epoch 55 --data_mode real --optim_algo 1 --L %(U_L)s --decoder %(U_decoder)s --cuda --one_rnn --sep_attn --option beam' % ALL())
command += ' --posterior_reg 1 --layers 2 --train_q_epoch 5 --full_independence 3 --weight_decay 0 --test --begin_r 0 --end_r 1000'
search_list = [('pr_reg_style', 'phrase'), ('bsz', '20'), ('pr_coef', '25|15|5|0'), ('hard_code', 'no'), ('decoder_constraint', 'no'), ('encoder_constraint', 'yes'), ('tagset_size', '11'), ('max_mbs_per_epoch', '25000'), ('use_elmo', 'no'), ('seed', '0'), ('thresh', '1000'), ('hidden_dim', '500'), ('embedding_dim', '500'), ('lr_p', '0.001'), ('lr_q', '0.001'), ('sample_size', '4'), ('dual_attn', 'yes'), ('trans_unif', 'yes'), ('task', 'bert1|bert2')]
grid_search((lambda map: basic_func(command, map)), search_list, seed=1)
return |
class MaxTimeCriteria(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestMeanTeacherHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
model = ToyModel2().to(device)
runner = Runner(model=model, train_dataloader=dict(dataset=DummyDataset(), sampler=dict(type='DefaultSampler', shuffle=True), batch_size=3, num_workers=0), val_dataloader=dict(dataset=DummyDataset(), sampler=dict(type='DefaultSampler', shuffle=False), batch_size=3, num_workers=0), val_evaluator=[ToyMetric1()], work_dir=self.temp_dir.name, default_scope='mmdet', optim_wrapper=OptimWrapper(torch.optim.Adam(ToyModel().parameters())), train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1), val_cfg=dict(), default_hooks=dict(logger=None), custom_hooks=[dict(type='MeanTeacherHook')], experiment_name='test1')
runner.train()
self.assertTrue(osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
runner = Runner(model=model, test_dataloader=dict(dataset=DummyDataset(), sampler=dict(type='DefaultSampler', shuffle=True), batch_size=3, num_workers=0), test_evaluator=[ToyMetric1()], test_cfg=dict(), work_dir=self.temp_dir.name, default_scope='mmdet', load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'), default_hooks=dict(logger=None), custom_hooks=[dict(type='MeanTeacherHook')], experiment_name='test2')
runner.test()
_WRAPPERS.register_module()
class DummyWrapper(BaseModel):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
runner = Runner(model=DummyWrapper(ToyModel2()), test_dataloader=dict(dataset=DummyDataset(), sampler=dict(type='DefaultSampler', shuffle=True), batch_size=3, num_workers=0), test_evaluator=[ToyMetric1()], test_cfg=dict(), work_dir=self.temp_dir.name, default_scope='mmdet', load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'), default_hooks=dict(logger=None), custom_hooks=[dict(type='MeanTeacherHook')], experiment_name='test3')
runner.test() |
class QDQBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ['pytorch_quantization', 'torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['pytorch_quantization', 'torch']) |
def test_formants(waveform):
formants = waveform.formants()
assert isinstance(formants, dict) |
class SPAdaINResBlock(nn.Module):
def __init__(self, input_nc, planes, norm=nn.InstanceNorm1d, conv_kernel_size=1, padding=0):
super(SPAdaINResBlock, self).__init__()
self.spadain1 = SPAdaIN(norm=norm, input_nc=input_nc, planes=planes)
self.relu = nn.ReLU()
self.conv1 = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain2 = SPAdaIN(norm=norm, input_nc=input_nc, planes=planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
self.spadain_res = SPAdaIN(norm=norm, input_nc=input_nc, planes=planes)
self.conv_res = nn.Conv1d(planes, planes, kernel_size=conv_kernel_size, stride=1, padding=padding)
def forward(self, x, addition):
out = self.spadain1(x, addition)
out = self.relu(out)
out = self.conv1(out)
out = self.spadain2(out, addition)
out = self.relu(out)
out = self.conv2(out)
residual = x
residual = self.spadain_res(residual, addition)
residual = self.relu(residual)
residual = self.conv_res(residual)
out = (out + residual)
return out |
class SpatialZeroPadding(Layer):
def __init__(self, pad_left, pad_right, pad_top, pad_bottom, bigdl_type='float'):
super(SpatialZeroPadding, self).__init__(None, bigdl_type, pad_left, pad_right, pad_top, pad_bottom) |
class TestGetData(unittest.TestCase):
('Too long')
def test_get_data(self):
target_dir = tempfile.mkdtemp()
get_data(target_dir)
self.assertFalse(os.path.isfile(os.path.join(target_dir, 'data.zip')))
self.assertTrue(os.path.isdir(os.path.join(target_dir, 'data')))
expectedFiles = ['collisional_absorption.pkl', 'pressures.npy', 'species_info', 'temperatures.npy', 'wavelengths.npy', 'stellar_spectra.pkl']
expectedDirs = ['Absorption', 'abundances']
for f in expectedFiles:
filename = os.path.join(target_dir, 'data', f)
self.assertTrue(os.path.isfile(filename))
for d in expectedDirs:
dirname = os.path.join(target_dir, 'data', d)
self.assertTrue(os.path.isdir(dirname)) |
def main(n_splits=10, random_state=1):
logger = util.get_logger('log.txt')
logger.info('timestamp: {}'.format(datetime.now()))
start = time.time()
df = pd.read_csv('OnlineNewsPopularity.csv')
logger.info('\ntime to read in data...{:.3f}s'.format((time.time() - start)))
columns = list(df.columns)
remove_cols = ['url', ' timedelta']
if (len(remove_cols) > 0):
df = df.drop(columns=remove_cols)
columns = [x for x in columns if (x not in remove_cols)]
features = {}
features['label'] = [' shares']
features['numeric'] = [' n_tokens_title', ' n_tokens_content', ' n_unique_tokens', ' n_non_stop_words', ' n_non_stop_unique_tokens', ' num_hrefs', ' num_self_hrefs', ' num_imgs', ' num_videos', ' average_token_length', ' num_keywords', ' data_channel_is_lifestyle', ' data_channel_is_entertainment', ' data_channel_is_bus', ' data_channel_is_socmed', ' data_channel_is_tech', ' data_channel_is_world', ' kw_min_min', ' kw_max_min', ' kw_avg_min', ' kw_min_max', ' kw_max_max', ' kw_avg_max', ' kw_min_avg', ' kw_max_avg', ' kw_avg_avg', ' self_reference_min_shares', ' self_reference_max_shares', ' self_reference_avg_sharess', ' weekday_is_monday', ' weekday_is_tuesday', ' weekday_is_wednesday', ' weekday_is_thursday', ' weekday_is_friday', ' weekday_is_saturday', ' weekday_is_sunday', ' is_weekend', ' LDA_00', ' LDA_01', ' LDA_02', ' LDA_03', ' LDA_04', ' global_subjectivity', ' global_sentiment_polarity', ' global_rate_positive_words', ' global_rate_negative_words', ' rate_positive_words', ' rate_negative_words', ' avg_positive_polarity', ' min_positive_polarity', ' max_positive_polarity', ' avg_negative_polarity', ' min_negative_polarity', ' max_negative_polarity', ' title_subjectivity', ' title_sentiment_polarity', ' abs_title_subjectivity', ' abs_title_sentiment_polarity']
features['categorical'] = list(((set(columns) - set(features['numeric'])) - set(features['label'])))
fold = 1
data = {}
rs = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)
for (train_idxs, test_idxs) in rs.split(df):
logger.info(f'''
fold {fold}...''')
train_df = df.iloc[train_idxs]
test_df = df.iloc[test_idxs]
(X_train, y_train, X_test, y_test, feature) = util.preprocess(train_df, test_df, features, logger=(logger if (fold == 1) else None), objective='regression')
data[fold] = {'X_train': X_train, 'y_train': y_train, 'X_test': X_test, 'y_test': y_test, 'feature': feature}
fold += 1
logger.info(f'''
fold {(fold - 1)} preview:''')
logger.info(f'train (head): {X_train[:5]}, {y_train[:5]}')
logger.info(f'test (head): {X_test[:5]}, {y_test[:5]}')
logger.info(f'feature (head): {feature[:5]}')
logger.info(f'X_train.shape: {X_train.shape}')
logger.info(f'X_test.shape: {X_test.shape}')
logger.info(f'y_train.shape: {y_train.shape}, min., max.: {y_train.min()}, {y_train.max()}')
logger.info(f'y_test.shape: {y_test.shape}, min., max.: {y_test.min()}, {y_test.max()}')
np.save(os.path.join('data.npy'), data) |
class BNNeck3(nn.Module):
def __init__(self, input_dim, class_num, feat_dim, return_f=False):
super(BNNeck3, self).__init__()
self.return_f = return_f
self.reduction = nn.Conv2d(input_dim, feat_dim, 1, bias=False)
self.bn = nn.BatchNorm1d(feat_dim)
self.bn.bias.requires_grad_(False)
self.classifier = nn.Linear(feat_dim, class_num, bias=False)
self.bn.apply(self.weights_init_kaiming)
self.classifier.apply(self.weights_init_classifier)
def forward(self, x):
x = self.reduction(x)
before_neck = x.view(x.size(0), x.size(1))
after_neck = self.bn(before_neck)
if self.return_f:
score = self.classifier(after_neck)
return (after_neck, score, before_neck)
else:
x = self.classifier(x)
return x
def weights_init_kaiming(self, m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif (classname.find('Conv') != (- 1)):
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if (m.bias is not None):
nn.init.constant_(m.bias, 0.0)
elif (classname.find('BatchNorm') != (- 1)):
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(self, m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0) |
class save_file_path(_ParseType):
def __call__(self, string: str) -> pathlib.Path:
if (not string.isprintable()):
msg = f"'{string}' must only contain printable characters."
raise argparse.ArgumentTypeError(msg)
path = pathlib.Path(string)
return path |
def create_weighted_lora_adapter(pipe, adapters, weights, adapter_name='default'):
pipe.unet.add_weighted_adapter(adapters, weights, adapter_name)
if isinstance(pipe.text_encoder, PeftModel):
pipe.text_encoder.add_weighted_adapter(adapters, weights, adapter_name)
return pipe |
class InceptConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(InceptConv, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels, eps=0.001)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
return x |
def test_timer_run():
timer = mmcv.Timer()
time.sleep(1)
assert (abs((timer.since_start() - 1)) < 0.01)
time.sleep(1)
assert (abs((timer.since_last_check() - 1)) < 0.01)
assert (abs((timer.since_start() - 2)) < 0.01)
timer = mmcv.Timer(False)
with pytest.raises(mmcv.TimerError):
timer.since_start()
with pytest.raises(mmcv.TimerError):
timer.since_last_check() |
def get_test_module(test_file):
test_module_path = get_module_path(test_file)
test_module = importlib.import_module(test_module_path)
return test_module |
def convert_yuv420_to_rgb(frame: Tuple[(np.ndarray, np.ndarray, np.ndarray)], device: torch.device, max_val: int) -> Tensor:
out = to_tensors(frame, device=str(device), max_value=max_val)
out = yuv_420_to_444(tuple((c.unsqueeze(0).unsqueeze(0) for c in out)), mode='bicubic')
return ycbcr2rgb(out) |
_module()
class CheckpointHook(Hook):
def __init__(self, interval=(- 1), by_epoch=True, save_optimizer=True, out_dir=None, max_keep_ckpts=(- 1), save_last=True, sync_buffer=False, file_client_args=None, **kwargs):
self.interval = interval
self.by_epoch = by_epoch
self.save_optimizer = save_optimizer
self.out_dir = out_dir
self.max_keep_ckpts = max_keep_ckpts
self.save_last = save_last
self.args = kwargs
self.sync_buffer = sync_buffer
self.file_client_args = file_client_args
def before_run(self, runner):
if (not self.out_dir):
self.out_dir = runner.work_dir
self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir)
if (self.out_dir != runner.work_dir):
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
self.out_dir = self.file_client.join_path(self.out_dir, basename)
runner.logger.info(f'Checkpoints will be saved to {self.out_dir} by {self.file_client.name}.')
if ('create_symlink' in self.args):
if (self.args['create_symlink'] and (not self.file_client.allow_symlink)):
self.args['create_symlink'] = False
warnings.warn(f'create_symlink is set as True by the user but is changedto be False because creating symbolic link is not allowed in {self.file_client.name}')
else:
self.args['create_symlink'] = self.file_client.allow_symlink
def after_train_epoch(self, runner):
if (not self.by_epoch):
return
if (self.every_n_epochs(runner, self.interval) or (self.save_last and self.is_last_epoch(runner))):
runner.logger.info(f'Saving checkpoint at {(runner.epoch + 1)} epochs')
if self.sync_buffer:
allreduce_params(runner.model.buffers())
self._save_checkpoint(runner)
_only
def _save_checkpoint(self, runner):
runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args)
if (runner.meta is not None):
if self.by_epoch:
cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format((runner.epoch + 1))
else:
cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format((runner.iter + 1))
runner.meta.setdefault('hook_msgs', dict())
runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(self.out_dir, cur_ckpt_filename)
if (self.max_keep_ckpts > 0):
if self.by_epoch:
name = 'epoch_{}.pth'
current_ckpt = (runner.epoch + 1)
else:
name = 'iter_{}.pth'
current_ckpt = (runner.iter + 1)
redundant_ckpts = range((current_ckpt - (self.max_keep_ckpts * self.interval)), 0, (- self.interval))
filename_tmpl = self.args.get('filename_tmpl', name)
for _step in redundant_ckpts:
ckpt_path = self.file_client.join_path(self.out_dir, filename_tmpl.format(_step))
if self.file_client.isfile(ckpt_path):
self.file_client.remove(ckpt_path)
else:
break
def after_train_iter(self, runner):
if self.by_epoch:
return
if (self.every_n_iters(runner, self.interval) or (self.save_last and self.is_last_iter(runner))):
runner.logger.info(f'Saving checkpoint at {(runner.iter + 1)} iterations')
if self.sync_buffer:
allreduce_params(runner.model.buffers())
self._save_checkpoint(runner) |
def SO_Tokenizer_wrapper(tokens):
end_of_sent_punc_split_tokens = Split_End_of_Sentence_Punc(tokens)
dot_split_tokens = []
for token in end_of_sent_punc_split_tokens:
multiple_dot_splitted_result = Split_On_Multiple_Dot(token)
if (len(multiple_dot_splitted_result) == 0):
dot_split_tokens.append(token)
continue
dot_split_tokens.extend(multiple_dot_splitted_result)
end_parenthesis_split_tokens = []
for token in dot_split_tokens:
end_parenthesis_split_tokens.extend(Split_On_Non_function_end_parenthesis(token))
end_paren_split_tokens = []
for token in end_parenthesis_split_tokens:
end_paren_split_tokens.extend(Split_On_last_letter_Colon_Mark(token))
end_of_quote_split_tokens = []
for token in end_paren_split_tokens:
end_of_quote_split_tokens.extend(Split_On_last_letter_Quote_Mark(token))
inside_parenthesis_split_words = []
for token in end_of_quote_split_tokens:
inside_parenthesis_split_words.extend(Split_Words_Inside_Parenthesis(token))
url_word_end_paren_removed_tokens = []
for token in inside_parenthesis_split_words:
url_word_end_paren_removed_tokens.extend(Split_Parenthesis_At_End_of_URL(token))
new_tokens = url_word_end_paren_removed_tokens
multi_space_removed_tokens = []
for w in new_tokens:
w = re.sub('\\s+', ' ', w).strip()
multi_space_removed_tokens.append(w)
new_token = multi_space_removed_tokens
return new_tokens |
def test_str(doc):
assert (m.str_from_string().encode().decode() == 'baz')
assert (m.str_from_bytes().encode().decode() == 'boo')
assert (doc(m.str_from_bytes) == 'str_from_bytes() -> str')
class A(object):
def __str__(self):
return 'this is a str'
def __repr__(self):
return 'this is a repr'
assert (m.str_from_object(A()) == 'this is a str')
assert (m.repr_from_object(A()) == 'this is a repr')
(s1, s2) = m.str_format()
assert (s1 == '1 + 2 = 3')
assert (s1 == s2) |
class FixedSampler(object):
def __init__(self, perm_len):
assert (perm_len > 0)
self._perm_len = perm_len
def __call__(self, perm_len=None):
perm_len = (self._perm_len if (perm_len is None) else perm_len)
return np.arange(perm_len) |
class Generator_toy(torch.nn.Module):
def __init__(self, hidden_dim):
super(Generator_toy, self).__init__()
self.all_layers = nn.Sequential(nn.Linear(2, hidden_dim), nn.ReLU(True), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(True), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(True), nn.Linear(hidden_dim, 2))
def forward(self, x):
out = self.all_layers(x)
return out |
def test_class():
ann_str = _make_annotation_str_for_obj(Foo)
assert (ann_str == 'Type[{prefix}.Foo]'.format(prefix=__name__)), ('got %s' % ann_str)
assert (_make_annotation_str_for_obj((Foo, Foo())) == 'Tuple[Type[{prefix}.Foo], {prefix}.Foo]'.format(prefix=__name__))
assert (_make_annotation_str_for_obj([Foo, Foo()]) == 'List[Union[Type[{prefix}.Foo], {prefix}.Foo]]'.format(prefix=__name__)) |
def training_step(global_iter, model, phase, device, optimizer, loss_fn):
assert (phase in ['train', 'val'])
(batch, positives_mask, negatives_mask) = next(global_iter)
batch = {e: batch[e].to(device) for e in batch}
if (phase == 'train'):
model.train()
else:
model.eval()
optimizer.zero_grad()
with torch.set_grad_enabled((phase == 'train')):
y = model(batch)
stats = (model.stats.copy() if hasattr(model, 'stats') else {})
embeddings = y['global']
(loss, temp_stats) = loss_fn(embeddings, positives_mask, negatives_mask)
temp_stats = tensors_to_numbers(temp_stats)
stats.update(temp_stats)
if (phase == 'train'):
loss.backward()
optimizer.step()
torch.cuda.empty_cache()
return stats |
def load_bf16_model(path, model):
from .bfloat16 import BF16Model
return BF16Model._load(path, model) |
def compute_errors(ground_truth, pre):
l1 = np.mean(np.abs((ground_truth - pre)))
mse = np.mean(((ground_truth - pre) ** 2))
if (mse == 0):
PSNR = 100
else:
PSNR = (20 * math.log10((255.0 / math.sqrt(mse))))
gx = (pre - np.roll(pre, (- 1), axis=1))
gy = (pre - np.roll(pre, (- 1), axis=0))
grad_norm2 = ((gx ** 2) + (gy ** 2))
TV = np.mean(np.sqrt(grad_norm2))
return (l1, PSNR, TV) |
def cds_matchback(cat, xcat, colRA='RA', colDec='DEC', selection='best', epoch=None, colpmRA='pmra', colpmDec='pmdec'):
if (selection != 'all'):
selection = 'best'
if (selection == 'all'):
raise NotImplementedError("selection='all' CDS cross-match not currently implemented")
if (epoch is None):
if ('ref_epoch' in cat.dtype.fields):
epoch = cat['ref_epoch']
else:
epoch = 2000.0
_check_epoch(cat, epoch)
depoch = (epoch - 2000.0)
if numpy.any((depoch != 0.0)):
dra = (((cat[colpmRA] / numpy.cos(((cat[colDec] / 180.0) * numpy.pi))) / 3600000.0) * depoch)
ddec = ((cat[colpmDec] / 3600000.0) * depoch)
dra[numpy.isnan(cat[colpmRA])] = 0.0
ddec[numpy.isnan(cat[colpmDec])] = 0.0
else:
dra = numpy.zeros(len(cat))
ddec = numpy.zeros(len(cat))
mc1 = acoords.SkyCoord((cat[colRA] - dra), (cat[colDec] - ddec), unit=(u.degree, u.degree), frame='icrs')
mc2 = acoords.SkyCoord(xcat['RA'], xcat['DEC'], unit=(u.degree, u.degree), frame='icrs')
(idx, d2d, d3d) = mc2.match_to_catalog_sky(mc1)
mindx = (d2d < (1e-05 * u.arcsec))
return idx[mindx] |
def train_robosuite(args):
(train_env, from_pixels) = create_robosuite_env(args.env)
(test_env, from_pixels) = create_robosuite_env(args.env)
if (not from_pixels):
encoder = IdentityEncoder(train_env.observation_space.shape[0])
else:
raise NotImplementedError
agent = super_sac.Agent(act_space_size=train_env.action_space.shape[0], encoder=encoder)
buffer = super_sac.replay.ReplayBuffer(size=1000000)
super_sac.super_sac(agent=agent, train_env=train_env, test_env=test_env, buffer=buffer, name=args.name, logging_method=args.logging) |
class ExplicitEnum(str, Enum):
def _missing_(cls, value):
raise ValueError(f'{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}') |
def customized_resnet18(pretrained: bool=False, class_num=10, progress: bool=True) -> ResNet:
res18 = ResNet(BasicBlock, [2, 2, 2, 2], class_num=class_num)
res18.bn1 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer1[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=64)
res18.layer2[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[0].shortcut[1] = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer2[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=128)
res18.layer3[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[0].shortcut[1] = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer3[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=256)
res18.layer4[0].bn1 = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[0].bn2 = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[0].shortcut[1] = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[1].bn1 = nn.GroupNorm(num_groups=32, num_channels=512)
res18.layer4[1].bn2 = nn.GroupNorm(num_groups=32, num_channels=512)
assert (len(dict(res18.named_parameters()).keys()) == len(res18.state_dict().keys())), 'More BN layers are there...'
return res18 |
class PrepareForNet(object):
def __init__(self):
pass
def __call__(self, sample):
image = np.transpose(sample['image'], (2, 0, 1))
sample['image'] = np.ascontiguousarray(image).astype(np.float32)
if ('mask' in sample):
sample['mask'] = sample['mask'].astype(np.float32)
sample['mask'] = np.ascontiguousarray(sample['mask'])
if ('disparity' in sample):
disparity = sample['disparity'].astype(np.float32)
sample['disparity'] = np.ascontiguousarray(disparity)
if ('depth' in sample):
depth = sample['depth'].astype(np.float32)
sample['depth'] = np.ascontiguousarray(depth)
return sample |
def filter_tests(output_file, filters):
if (not os.path.isfile(output_file)):
print('No test file found.')
return
with open(output_file, 'r', encoding='utf-8') as f:
test_files = f.read().split(' ')
if ((len(test_files) == 0) or (test_files == [''])):
print('No tests to filter.')
return
if (test_files == ['tests']):
test_files = [os.path.join('tests', f) for f in os.listdir('tests') if (f not in (['__init__.py'] + filters))]
else:
test_files = [f for f in test_files if (f.split(os.path.sep)[1] not in filters)]
with open(output_file, 'w', encoding='utf-8') as f:
f.write(' '.join(test_files)) |
def lnprob(p):
lnprior = prior(p)
if (lnprior == (- np.inf)):
return (- np.inf)
for (key, pconn) in pconns.items():
pconn.send(('LNPROB', p))
lnps = np.empty(n_chunks)
for (i, pconn) in enumerate(pconns.values()):
lnps[i] = pconn.recv()
s = np.sum(lnps)
return (s + lnprior) |
def test_model(dataset_loaders, model, stat_names, train_func, args, inference_func=None):
test_model_path = args.test_model
print(('Testing model loaded from %s' % test_model_path))
model.load_state_dict(torch.load(test_model_path))
with torch.no_grad():
test_stats = train_func(data_loader=dataset_loaders['test'], model=model, optimizer=None, stat_names=stat_names, mode='test', args=args, write_path=('%s/test.txt' % args.output_dir))
print(data_utils.dict_to_pstr(test_stats, header_str='Test:')) |
def data_transforms(dataset_type='train', normlize_type='-1-1'):
transforms = {'train': Compose([ReSize(size=10.0), Reshape(), Normalize(normlize_type), RandomScale(), RandomCrop(), Retype()]), 'val': Compose([ReSize(size=10.0), Reshape(), Normalize(normlize_type), Retype()])}
return transforms[dataset_type] |
class SmallExact(Solver):
def __init__(self, init_dataset, poslabels, env, budget_per_round=1, poolsize=1000, device='cpu'):
super(SmallExact, self).__init__()
self.cur_dataset = init_dataset
self.used = set()
self.budget_per_round = budget_per_round
self.poslabels = poslabels
self.env = env
self.cand = [self.env.random_item() for i in range(poolsize)]
self.device = device
def set_model(self, model):
self.model = model
self.acquisition = MaxEntAcquisition(model, self.device)
def increment_dataset(self):
cand = []
for r in self.cand:
if (r in self.used):
continue
score = self.acquisition(self.env.get_path(r))
cand.append((score, r))
cand = sorted(cand)[::(- 1)]
for (score, item) in cand[:self.budget_per_round]:
c = self.get_class(item)
data = (self.env.get_path(item), c)
self.used.add(item)
self.cur_dataset.append(data)
return MDataset(self.cur_dataset) |
def make_vgg_layer(in_channels, out_channels, num_blocks, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), dilation=1, with_norm=False, ceil_mode=False):
layers = []
for _ in range(num_blocks):
layer = ConvModule(in_channels=in_channels, out_channels=out_channels, kernel_size=3, dilation=dilation, padding=dilation, bias=True, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
layers.append(layer)
in_channels = out_channels
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
return layers |
def get_parser():
parser = argparse.ArgumentParser(description='RIASS')
parser.add_argument('-year', dest='year', default='2017')
parser.add_argument('-imsize', dest='imsize', default=480, type=int)
parser.add_argument('-batch_size', dest='batch_size', default=10, type=int)
parser.add_argument('-num_workers', dest='num_workers', default=1, type=int)
parser.add_argument('-length_clip', dest='length_clip', default=1, type=int)
parser.add_argument('--single_object', dest='single_object', action='store_true')
parser.set_defaults(single_object=False)
parser.add_argument('--only_temporal', dest='only_temporal', action='store_true')
parser.set_defaults(only_temporal=False)
parser.add_argument('--only_spatial', dest='only_spatial', action='store_true')
parser.set_defaults(only_spatial=False)
parser.add_argument('--resume', dest='resume', action='store_true', help='whether to resume training an existing model (the one with name model_name will be used)')
parser.set_defaults(resume=False)
parser.add_argument('-epoch_resume', dest='epoch_resume', default=0, type=int, help='set epoch_resume if you want flags --finetune_after and --update_encoder to be properly activated (eg if you stop training for whatever reason at epoch 15, set epoch_resume to 15)')
parser.add_argument('-seed', dest='seed', default=123, type=int)
parser.add_argument('-gpu_id', dest='gpu_id', default=0, type=int)
parser.add_argument('-lr', dest='lr', default=0.001, type=float)
parser.add_argument('-lr_cnn', dest='lr_cnn', default=1e-06, type=float)
parser.add_argument('-optim_cnn', dest='optim_cnn', default='adam', choices=['adam', 'sgd', 'rmsprop'])
parser.add_argument('-momentum', dest='momentum', default=0.9, type=float)
parser.add_argument('-weight_decay', dest='weight_decay', default=1e-06, type=float)
parser.add_argument('-weight_decay_cnn', dest='weight_decay_cnn', default=1e-06, type=float)
parser.add_argument('-optim', dest='optim', default='adam', choices=['adam', 'sgd', 'rmsprop'])
parser.add_argument('-maxseqlen', dest='maxseqlen', default=10, type=int)
parser.add_argument('-gt_maxseqlen', dest='gt_maxseqlen', default=10, type=int)
parser.add_argument('-best_val_loss', dest='best_val_loss', default=1000, type=float)
parser.add_argument('--crop', dest='crop', action='store_true')
parser.set_defaults(crop=False)
parser.add_argument('--smooth_curves', dest='smooth_curves', action='store_true')
parser.set_defaults(smooth_curves=False)
parser.add_argument('--overlay_masks', dest='overlay_masks', action='store_true')
parser.set_defaults(overlay_masks=False)
parser.add_argument('-finetune_after', dest='finetune_after', default=0, type=int, help='epoch number to start finetuning. set -1 to not finetune.there is a patience term that can allow starting to fine tune earlier (does not apply if value is -1)')
parser.add_argument('--update_encoder', dest='update_encoder', action='store_true', help='used in sync with finetune_after. no need to activate.')
parser.set_defaults(update_encoder=False)
parser.add_argument('--transfer', dest='transfer', action='store_true')
parser.set_defaults(transfer=False)
parser.add_argument('-transfer_from', dest='transfer_from', default='model')
parser.add_argument('-min_delta', dest='min_delta', default=0.0, type=float)
parser.add_argument('-patience', dest='patience', default=15, type=int, help='patience term to activate flags such as use_class_loss, feed_prediction and update_encoder if their matching vars are not -1')
parser.add_argument('-patience_stop', dest='patience_stop', default=60, type=int, help='patience to stop training.')
parser.add_argument('-max_epoch', dest='max_epoch', default=100, type=int)
parser.add_argument('-print_every', dest='print_every', default=10, type=int)
parser.add_argument('--log_term', dest='log_term', action='store_true', help='if activated, will show logs in stdout instead of log file.')
parser.set_defaults(log_term=False)
parser.add_argument('--visdom', dest='visdom', action='store_true')
parser.set_defaults(visdom=False)
parser.add_argument('-port', dest='port', default=8097, type=int, help='visdom port')
parser.add_argument('-server', dest='server', default=' help='visdom server')
parser.add_argument('-iou_weight', dest='iou_weight', default=1.0, type=float)
parser.add_argument('--augment', dest='augment', action='store_true')
parser.set_defaults(augment=False)
parser.add_argument('-rotation', dest='rotation', default=10, type=int)
parser.add_argument('-translation', dest='translation', default=0.1, type=float)
parser.add_argument('-shear', dest='shear', default=0.1, type=float)
parser.add_argument('-zoom', dest='zoom', default=0.7, type=float)
parser.add_argument('--cpu', dest='use_gpu', action='store_false')
parser.set_defaults(use_gpu=True)
parser.add_argument('-ngpus', dest='ngpus', default=1, type=int)
parser.add_argument('-base_model', dest='base_model', default='resnet101', choices=['resnet101', 'resnet50', 'resnet34', 'vgg16'])
parser.add_argument('-skip_mode', dest='skip_mode', default='concat', choices=['sum', 'concat', 'mul', 'none'])
parser.add_argument('-model_name', dest='model_name', default='model')
parser.add_argument('-log_file', dest='log_file', default='train.log')
parser.add_argument('-hidden_size', dest='hidden_size', default=128, type=int)
parser.add_argument('-kernel_size', dest='kernel_size', default=3, type=int)
parser.add_argument('-dropout', dest='dropout', default=0.0, type=float)
parser.add_argument('--resize', dest='resize', action='store_true')
parser.set_defaults(resize=False)
parser.add_argument('-num_classes', dest='num_classes', default=21, type=int)
parser.add_argument('-dataset', dest='dataset', default='davis2017', choices=['davis2017', 'youtube'])
parser.add_argument('-youtube_dir', dest='youtube_dir', default='../../databases/YouTubeVOS/')
parser.add_argument('-eval_split', dest='eval_split', default='test')
parser.add_argument('-mask_th', dest='mask_th', default=0.5, type=float)
parser.add_argument('-max_dets', dest='max_dets', default=100, type=int)
parser.add_argument('-min_size', dest='min_size', default=0.001, type=float)
parser.add_argument('--display', dest='display', action='store_true')
parser.add_argument('--no_display_text', dest='no_display_text', action='store_true')
parser.set_defaults(display=False)
parser.set_defaults(display_route=False)
parser.set_defaults(no_display_text=False)
parser.set_defaults(use_gt_masks=False)
parser.add_argument('-frames_path', dest='frames_path', default='../../databases/DAVIS2017/JPEGImages/480p/tennis-vest')
parser.add_argument('-mask_path', dest='init_mask_path', default='../../databases/DAVIS2017/Annotations/480p/tennis-vest/00000.png')
parser.add_argument('-results_path', dest='results_path', default=None)
parser.add_argument('--zero_shot', dest='zero_shot', action='store_true')
return parser |
def build_evaluator(task: str, metric_configs: List[Union[(str, Dict[(str, dict)])]], validate_index: int=0):
if (task == 'graph_vertex_classification'):
return GraphVertexClassificationEvaluator(metric_configs, validate_index)
elif (task == 'hypergraph_vertex_classification'):
return HypergraphVertexClassificationEvaluator(metric_configs, validate_index)
elif (task == 'user_item_recommender'):
return UserItemRecommenderEvaluator(metric_configs, validate_index)
else:
raise ValueError(f"{task} is not supported yet. Please email '{AUTHOR_EMAIL}' to add it.") |
def load_data(train_filename, valid_filename, test_filename, delimiter='\t', col_names=['user_id', 'item_id', 'rating']):
train_data = pd.read_csv(train_filename, delimiter=delimiter, header=None, names=col_names)
train_data['user_id'] = (train_data['user_id'] - 1)
train_data['item_id'] = (train_data['item_id'] - 1)
valid_data = pd.read_csv(valid_filename, delimiter=delimiter, header=None, names=col_names)
valid_data['user_id'] = (valid_data['user_id'] - 1)
valid_data['item_id'] = (valid_data['item_id'] - 1)
test_data = pd.read_csv(test_filename, delimiter=delimiter, header=None, names=col_names)
test_data['user_id'] = (test_data['user_id'] - 1)
test_data['item_id'] = (test_data['item_id'] - 1)
return (train_data, valid_data, test_data) |
class Seq2SeqForecaster(BasePytorchForecaster):
def __init__(self, past_seq_len, future_seq_len, input_feature_num, output_feature_num, lstm_hidden_dim=64, lstm_layer_num=2, teacher_forcing=False, normalization=True, decomposition_kernel_size=0, dropout=0.1, optimizer='Adam', loss='mse', lr=0.001, metrics=['mse'], seed=None, distributed=False, workers_per_node=1, distributed_backend='ray'):
self.data_config = {'past_seq_len': past_seq_len, 'future_seq_len': future_seq_len, 'input_feature_num': input_feature_num, 'output_feature_num': output_feature_num}
self.model_config = {'lstm_hidden_dim': lstm_hidden_dim, 'lstm_layer_num': lstm_layer_num, 'teacher_forcing': teacher_forcing, 'dropout': dropout, 'normalization': normalization, 'decomposition_kernel_size': decomposition_kernel_size}
self.loss_config = {'loss': loss}
self.optim_config = {'lr': lr, 'optim': optimizer}
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
if isinstance(loss, str):
self.loss_creator = loss_creator
else:
def customized_loss_creator(config):
return config['loss']
self.loss_creator = customized_loss_creator
self.distributed = distributed
self.remote_distributed_backend = distributed_backend
self.local_distributed_backend = 'subprocess'
self.workers_per_node = workers_per_node
self.lr = lr
self.metrics = metrics
self.seed = seed
current_num_threads = torch.get_num_threads()
self.thread_num = current_num_threads
self.optimized_model_thread_num = current_num_threads
if (current_num_threads >= 24):
self.num_processes = max(1, (current_num_threads // 8))
else:
self.num_processes = 1
self.use_ipex = False
self.onnx_available = True
self.quantize_available = False
self.checkpoint_callback = True
self.use_hpo = True
self.optimized_model_output_tensor = True
super().__init__() |
def recursive_indicators(condition_func, x, default_indicator=False):
if (condition_func is None):
condition_func = recursive_generic_condition_func
the_indicators = recursive_apply(condition_func, (lambda _: default_indicator), x, backup_func=(lambda _: default_indicator))
return the_indicators |
def all_reduce(py_dict, op='sum', group=None):
world_size = get_world_size()
if (world_size == 1):
return py_dict
if (group is None):
group = _get_global_gloo_group()
if (dist.get_world_size(group) == 1):
return py_dict
py_key = list(py_dict.keys())
py_key_tensor = pyobj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2pyobj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=_get_reduce_op(op))
if (op == 'mean'):
flatten_tensor /= world_size
split_tensors = [x.reshape(shape) for (x, shape) in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes)]
return OrderedDict({k: v for (k, v) in zip(py_key, split_tensors)}) |
def test_recursive_find_duplicates_dir_integration(cnn):
expected_duplicates = {str(Path('lvl1/ukbench00120.jpg')): [('ukbench00120_hflip.jpg', 0.9891392), (str(Path('lvl1/lvl2b/ukbench00120_resize.jpg')), 0.), (str(Path('lvl1/lvl2a/ukbench00120_rotation.jpg')), 0.)], 'ukbench00120_hflip.jpg': [(str(Path('lvl1/lvl2a/ukbench00120_rotation.jpg')), 0.9030868), (str(Path('lvl1/ukbench00120.jpg')), 0.9891392), (str(Path('lvl1/lvl2b/ukbench00120_resize.jpg')), 0.9793916)], str(Path('lvl1/lvl2b/ukbench00120_resize.jpg')): [(str(Path('lvl1/lvl2a/ukbench00120_rotation.jpg')), 0.9102372), (str(Path('lvl1/ukbench00120.jpg')), 0.), ('ukbench00120_hflip.jpg', 0.9793916)], str(Path('lvl1/lvl2a/ukbench00120_rotation.jpg')): [('ukbench00120_hflip.jpg', 0.9030868), (str(Path('lvl1/ukbench00120.jpg')), 0.), (str(Path('lvl1/lvl2b/ukbench00120_resize.jpg')), 0.9102372)], str(Path('lvl1/lvl2b/ukbench09268.jpg')): []}
duplicates = cnn.find_duplicates(image_dir=TEST_IMAGE_DIR_MIXED_NESTED, min_similarity_threshold=0.9, scores=True, outfile=False, recursive=True)
assert isinstance(duplicates[str(Path('lvl1/ukbench00120.jpg'))][0][1], np.float32)
assert (len(duplicates) == len(expected_duplicates))
for k in duplicates.keys():
dup_val = duplicates[k]
expected_val = expected_duplicates[k]
dup_ret = set(map((lambda x: x[0]), dup_val))
expected_ret = set(map((lambda x: x[0]), expected_val))
assert (dup_ret == expected_ret) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = norm_layer((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def _sum_each(self, x, y):
assert (len(x) == len(y))
z = []
for i in range(len(x)):
z.append((x[i] + y[i]))
return z
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def _activation_to_string(activation, precision=2):
return (_num_to_string(activation, precision) + 'B') |
def run_mat(source_root_dir, target_root_dir, imname, cname):
eng = matlab.engine.start_matlab()
eng.convert_data(source_root_dir, target_root_dir, imname, cname, FINE_HEIGHT, FINE_WIDTH)
eng.quit() |
_model
def vgg13(pretrained: bool=False, **kwargs: Any) -> VGG:
model_args = dict(**kwargs)
return _create_vgg('vgg13', pretrained=pretrained, **model_args) |
class MeanZero(MeanFunction):
def __init__(self):
pass
def gpml_function(self):
return '{}'
def is_thunk(self):
return True
def id(self):
return 'Zero'
def param_vector(self):
return np.array([])
def latex(self):
return '{\\emptyset}'
def syntax(self):
return colored('MZ', self.depth)
def copy(self):
return MeanZero()
def initialise_params(self, sd=1, data_shape=None):
pass
def __repr__(self):
return 'MeanZero()'
def pretty_print(self):
return colored('MZ', self.depth)
def load_param_vector(self, params):
assert (len(params) == 0) |
class RockPaperScissorsMed(RockPaperScissors):
def compute_labels(cls, limit=20):
all_labels = string.ascii_lowercase[:limit]
train = []
dev = []
train_vocab = set()
for i in range(0, (len(all_labels) - 5), 2):
(a, b, c, d, e) = all_labels[i:(i + 5)]
for trip in itertools.permutations([a, b, d]):
train.append(trip)
train_vocab |= set(trip)
for trip in itertools.permutations([b, c, e]):
dev.append(trip)
dev = [(a, b, c) for (a, b, c) in dev if ((a in train_vocab) and (b in train_vocab) and (c in train_vocab))]
return (train, dev) |
def get_harmonics_to_noise_ratio(waveform, sample_rate, min_pitch=75.0, silence_threshold=0.1, periods_per_window=4.5):
assert (min_pitch > 0), 'Min pitch needs to be > 0'
assert (0 <= silence_threshold <= 1), 'Silence threshold need to be in [0, 1]'
hop_length_seconds = (periods_per_window / (4.0 * min_pitch))
window_length_seconds = (periods_per_window / min_pitch)
hop_length = int((hop_length_seconds * sample_rate))
window_length = int((window_length_seconds * sample_rate))
frames_iterator = range((max(1, int(((waveform.shape[0] / hop_length) + 0.5))) + 1))
segmented_waveform = [waveform[(i * hop_length):((i * hop_length) + window_length)] for i in frames_iterator]
waveform_peak = max(abs((waveform - waveform.mean())))
hnr_vector = []
for (index, chunk) in enumerate(segmented_waveform):
if (chunk.shape[0] > 0):
thischunk_length = (chunk.shape[0] / sample_rate)
chunk = (chunk - chunk.mean())
thischunk_peak = np.max(np.abs(chunk))
if (thischunk_peak == 0):
hnr_vector.append(0.5)
else:
chunk_len = len(chunk)
hanning_window = np.hanning(chunk_len)
chunk *= hanning_window
n_fft = (2 ** int((np.log2(chunk_len) + 1)))
hanning_window = np.hstack((hanning_window, np.zeros((n_fft - chunk_len))))
chunk = np.hstack((chunk, np.zeros((n_fft - chunk_len))))
ffts_outputs = []
for fft_input in [chunk, hanning_window]:
fft_output = np.fft.fft(fft_input)
r = np.nan_to_num(np.real(np.fft.fft((fft_output * np.conjugate(fft_output))))[:chunk_len])
ffts_outputs.append(r)
r_x = (ffts_outputs[0] / ffts_outputs[1])
r_x /= r_x[0]
indices = pu.indexes(r_x)
time_array = np.linspace(0, thischunk_length, r_x.shape[0])
myfilter = time_array[indices]
candidate_values = r_x[indices]
candidate_values = candidate_values[(myfilter >= (1.0 / (sample_rate / 2.0)))]
myfilter = myfilter[(myfilter >= (1.0 / (sample_rate / 2.0)))]
candidate_values = candidate_values[(myfilter <= (1.0 / min_pitch))]
for (i, v) in enumerate(candidate_values):
if (v > 1.0):
candidate_values[i] = (1.0 / v)
if (candidate_values.shape[0] > 0):
strengths = [np.max(candidate_values), np.max((0, (2 - ((thischunk_peak / waveform_peak) / silence_threshold))))]
if np.argmax(strengths):
hnr_vector.append(0.5)
else:
hnr_vector.append(strengths[0])
else:
hnr_vector.append(0.5)
hnr_vector = np.array(hnr_vector)[(np.array(hnr_vector) > 0.5)]
if (hnr_vector.shape[0] == 0):
return 0
else:
hnr_vector = (10.0 * np.log10((hnr_vector / (1.0 - hnr_vector))))
return np.mean(hnr_vector) |
def network_weight_zero_init(net: nn.Module):
with torch.no_grad():
for m in net.modules():
if isinstance(m, nn.Conv2d):
device = m.weight.device
(in_channels, out_channels, k1, k2) = m.weight.shape
m.weight[:] = ((torch.randn(m.weight.shape, device=device) / np.sqrt(((k1 * k2) * in_channels))) * 0.0001)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
device = m.weight.device
(in_channels, out_channels) = m.weight.shape
m.weight[:] = ((torch.randn(m.weight.shape, device=device) / np.sqrt(in_channels)) * 0.0001)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.zeros_(m.bias)
else:
continue
return net |
class VAE(nn.Module):
def __init__(self, args):
super(VAE, self).__init__()
self.args = args
self.q_z_layers_pre = nn.ModuleList()
self.q_z_layers_gate = nn.ModuleList()
self.q_z_layers_pre.append(nn.Linear(np.prod(self.args.input_size), 300))
self.q_z_layers_gate.append(nn.Linear(np.prod(self.args.input_size), 300))
self.q_z_layers_pre.append(nn.Linear(300, 300))
self.q_z_layers_gate.append(nn.Linear(300, 300))
self.q_z_mean = nn.Linear(300, self.args.z1_size)
self.q_z_logvar = nn.Linear(300, self.args.z1_size)
self.p_x_layers_pre = nn.ModuleList()
self.p_x_layers_gate = nn.ModuleList()
self.p_x_layers_pre.append(nn.Linear(self.args.z1_size, 300))
self.p_x_layers_gate.append(nn.Linear(self.args.z1_size, 300))
self.p_x_layers_pre.append(nn.Linear(300, 300))
self.p_x_layers_gate.append(nn.Linear(300, 300))
self.p_x_mean = nn.Linear(300, np.prod(self.args.input_size))
self.sigmoid = nn.Sigmoid()
self.Gate = Gate()
for m in self.modules():
if isinstance(m, nn.Linear):
xavier_init(m)
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if self.args.cuda:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def calculate_likelihood(self, X, dir, mode='test', S=5000):
N_test = X.size(0)
likelihood_test = []
MB = 500
if (S <= MB):
R = 1
else:
R = (S / MB)
S = MB
for j in range(N_test):
if ((j % 100) == 0):
print('{:.2f}%'.format(((j / (1.0 * N_test)) * 100)))
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, R):
x = x_single.expand(S, x_single.size(1))
(x_mean, x_logvar, z_q, z_q_mean, z_q_logvar) = self.forward(x)
RE = log_Bernoulli(x, x_mean, dim=1)
log_p_z = log_Normal_standard(z_q, dim=1)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = (- (log_p_z - log_q_z))
a_tmp = (RE - KL)
a.append(a_tmp.cpu().data.numpy())
a = np.asarray(a)
a = np.reshape(a, ((a.shape[0] * a.shape[1]), 1))
likelihood_x = logsumexp(a)
likelihood_test.append((likelihood_x - np.log(len(a))))
likelihood_test = np.array(likelihood_test)
plot_histogram((- likelihood_test), dir, mode)
return (- np.mean(likelihood_test))
def calculate_lower_bound(self, X_full):
lower_bound = 0.0
RE_all = 0.0
KL_all = 0.0
MB = 500
for i in range((X_full.size(0) / MB)):
x = X_full[(i * MB):((i + 1) * MB)].view((- 1), np.prod(self.args.input_size))
(x_mean, x_logvar, z_q, z_q_mean, z_q_logvar) = self.forward(x)
RE = log_Bernoulli(x, x_mean)
log_p_z = log_Normal_standard(z_q, dim=1)
log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
KL = (- torch.sum((log_p_z - log_q_z)))
RE_all += RE.cpu().data[0]
KL_all += KL.cpu().data[0]
lower_bound += ((- RE) + KL).cpu().data[0]
lower_bound = (lower_bound / X_full.size(0))
return lower_bound
def q_z(self, x):
h0_pre = self.q_z_layers_pre[0](x)
h0_gate = self.sigmoid(self.q_z_layers_gate[0](x))
h0 = self.Gate(h0_pre, h0_gate)
h1_pre = self.q_z_layers_pre[1](h0)
h1_gate = self.sigmoid(self.q_z_layers_gate[1](h0))
h1 = self.Gate(h1_pre, h1_gate)
z_q_mean = self.q_z_mean(h1)
z_q_logvar = self.q_z_logvar(h1)
return (z_q_mean, z_q_logvar)
def p_x(self, z):
h0_pre = self.p_x_layers_pre[0](z)
h0_gate = self.sigmoid(self.p_x_layers_gate[0](z))
h0 = self.Gate(h0_pre, h0_gate)
h1_pre = self.p_x_layers_pre[1](h0)
h1_gate = self.sigmoid(self.p_x_layers_gate[1](h0))
h1 = self.Gate(h1_pre, h1_gate)
x_mean = self.sigmoid(self.p_x_mean(h1))
x_logvar = 0.0
return (x_mean, x_logvar)
def forward(self, x):
(z_q_mean, z_q_logvar) = self.q_z(x)
z_q = self.reparameterize(z_q_mean, z_q_logvar)
(x_mean, x_logvar) = self.p_x(z_q)
return (x_mean, x_logvar, z_q, z_q_mean, z_q_logvar) |
def compute_f1_all(pred_entities, gt_entities):
origins = []
founds = []
rights = []
for (i, _) in enumerate(pred_entities):
origins.extend(gt_entities[i])
founds.extend(pred_entities[i])
rights.extend([pre_entity for pre_entity in pred_entities[i] if (pre_entity in gt_entities[i])])
class_info = {}
origin_counter = Counter([x[0] for x in origins])
found_counter = Counter([x[0] for x in founds])
right_counter = Counter([x[0] for x in rights])
for (type_, count) in origin_counter.items():
origin = count
found = found_counter.get(type_, 0)
right = right_counter.get(type_, 0)
(recall, precision, f1) = _compute_f1(origin, found, right)
class_info[type_] = {'precision': precision, 'recall': recall, 'f1-score': f1}
origin = len(origins)
found = len(founds)
right = len(rights)
(recall, precision, f1) = _compute_f1(origin, found, right)
class_info['all'] = {'precision': precision, 'recall': recall, 'f1-score': f1}
return class_info |
def write_current_fig(pprefix):
log.info(f'write {pprefix}.png')
plt.savefig(f'{pprefix}.png', dpi=140)
log.info(f'write {pprefix}.pdf')
plt.savefig(f'{pprefix}.pdf') |
class CustomDataParallel(nn.DataParallel):
def __getattr__(self, key):
try:
return super().__getattr__(key)
except AttributeError:
return getattr(self.module, key) |
class LookupDataPool():
def __init__(self) -> None:
self.pool: dict = {}
def add(self, lookup: LookupData, update: bool=False, case_sensitive: bool=True) -> None:
if (not isinstance(lookup, LookupData)):
raise TypeError('lookup has to be instance of LookupData')
if ((lookup.name in self.pool) and (not update)):
raise LookupDuplicateError(f"'{lookup.name}' has already been added. Set update=True to update")
else:
self.pool[lookup.name] = KeywordProcessor(case_sensitive=case_sensitive)
self.pool[lookup.name].add_keywords_from_dict(lookup.data)
if (lookup.script != 'default'):
self.pool[lookup.name].non_word_boundaries.update(settings.SCRIPTS[lookup.script]['chars'])
logger.debug(f'{lookup.name} added to pool')
def remove(self, lookup_to_remove: str) -> None:
if (lookup_to_remove in self.pool):
del self.pool[lookup_to_remove]
logger.debug(f'{lookup_to_remove} removed from pool')
def remove_all(self):
self.pool = {}
def _add_demo_data(self, case_sensitive: bool=True):
cities = LookupData(name='cities', data=load_data_from_file(file=settings.DEMODATA_CITIES))
countries = LookupData(name='countries', data=load_data_from_file(file=settings.DEMODATA_COUNTRIES))
self.add(cities, case_sensitive=case_sensitive)
self.add(countries, case_sensitive=case_sensitive)
logger.debug(f'demo data loaded for: {list(self.pool.keys())}') |
def _canonicalize(smi_str):
return rdkit_general_ops.return_canoncailised_smiles_str(rdkit_general_ops.get_molecule(smi_str, kekulize=False), kekuleSmiles=False) |
def set_forget_bias_to_one(bias):
n = bias.size(0)
(start, end) = ((n // 4), (n // 2))
bias.data[start:end].fill_(1.0) |
class ResLayer(nn.Sequential):
def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, **kwargs):
self.block = block
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
if downsample_first:
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
inplanes = (planes * block.expansion)
for _ in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
else:
for _ in range((num_blocks - 1)):
layers.append(block(inplanes=inplanes, planes=inplanes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
super(ResLayer, self).__init__(*layers) |
def assert_fx_safe(condition: bool, message: str) -> torch.Tensor:
if (torch.jit.is_scripting() or is_fx_tracing()):
return torch.zeros(1)
return _do_assert_fx_safe(condition, message) |
def read_ims(auto_src, gold_src):
auto = coreference_reading.read_conll_doc(auto_src, None, True, False, False, True)
gold = coreference_reading.read_conll_matching_files(auto, gold_src)
return (auto, gold) |
class PASS2ACT(object):
def __init__(self, nlp) -> None:
super(PASS2ACT, self).__init__()
self.nlp = nlp
def pass2act(self, doc, rec=False):
parse = self.nlp(doc)
newdoc = ''
for sent in parse.sents:
subjpass = ''
subj = ''
verb = ''
verbtense = ''
adverb = {'bef': '', 'aft': ''}
part = ''
prep = ''
agent = ''
aplural = False
advcltree = None
aux = list(list(self.nlp('. .').sents)[0])
xcomp = ''
punc = '.'
for word in sent:
if (word.dep_ == 'advcl'):
if (word.head.dep_ in ('ROOT', 'auxpass')):
advcltree = word.subtree
if (word.dep_ == 'nsubjpass'):
if (word.head.dep_ == 'ROOT'):
subjpass = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
if (word.dep_ == 'nsubj'):
subj = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
if (word.head.dep_ == 'auxpass'):
if (word.head.head.dep_ == 'ROOT'):
subjpass = subj
if (word.dep_ in ('advmod', 'npadvmod', 'oprd')):
if (word.head.dep_ == 'ROOT'):
if (verb == ''):
adverb['bef'] = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
else:
adverb['aft'] = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
if (word.dep_ == 'auxpass'):
if (word.head.dep_ == 'ROOT'):
if (not subjpass):
subjpass = subj
if (word.dep_ in ('aux', 'auxpass', 'neg')):
if (word.head.dep_ == 'ROOT'):
aux += [word]
if (word.dep_ == 'ROOT'):
verb = word.text
if (word.tag_ == 'VB'):
verbtense = en.INFINITIVE
elif (word.tag_ == 'VBD'):
verbtense = en.PAST
elif (word.tag_ == 'VBG'):
verbtense = en.PRESENT
verbaspect = en.PROGRESSIVE
elif (word.tag_ == 'VBN'):
verbtense = en.PAST
else:
try:
verbtense = en.tenses(word.text)[0][0]
except IndexError:
pass
if (word.dep_ == 'prt'):
if (word.head.dep_ == 'ROOT'):
part = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
if (word.dep_ == 'prep'):
if (word.head.dep_ == 'ROOT'):
prep = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
if word.dep_.endswith('obj'):
if (word.head.dep_ == 'agent'):
if (word.head.head.dep_ == 'ROOT'):
agent = ''.join((((w.text + ', ') if (w.dep_ == 'appos') else (w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws)) for w in word.subtree)).strip()
aplural = (word.tag_ in ('NNS', 'NNPS'))
if (word.dep_ in ('xcomp', 'ccomp', 'conj')):
if (word.head.dep_ == 'ROOT'):
xcomp = ''.join(((w.text_with_ws.lower() if (w.tag_ not in ('NNP', 'NNPS')) else w.text_with_ws) for w in word.subtree)).strip()
that = xcomp.startswith('that')
xcomp = self.pass2act(xcomp, True).strip(' .')
if ((not xcomp.startswith('that')) and that):
xcomp = ('that ' + xcomp)
if ((word.dep_ == 'punct') and (not rec)):
if (word.text != '"'):
punc = word.text
if (subjpass == ''):
newdoc += (str(sent) + ' ')
continue
if (agent == ''):
newdoc += (str(sent) + ' ')
continue
agent = nouninv(agent)
subjpass = nouninv(subjpass)
auxstr = ''
num = (en.SINGULAR if ((not aplural) or (agent in ('he', 'she'))) else en.PLURAL)
aux.append(aux[0])
verbaspect = None
for (pp, p, a, n) in zip(aux, aux[1:], aux[2:], aux[3:]):
if (a.lemma_ == '.'):
continue
if (a.lemma_ == 'not'):
if (p.lemma_ == 'be'):
if (n.lemma_ == 'be'):
verbtense = en.tenses(a.text)[0][0]
auxstr += (en.conjugate('be', tense=en.tenses(p.text)[0][0], number=num) + ' ')
verbaspect = en.PROGRESSIVE
else:
auxstr += (en.conjugate('do', tense=en.tenses(p.text)[0][0], number=num) + ' ')
verbtense = en.INFINITIVE
auxstr += 'not '
elif (a.lemma_ == 'be'):
if (p.lemma_ == 'be'):
verbtense = en.tenses(a.text)[0][0]
auxstr += (en.conjugate('be', tense=en.tenses(a.text)[0][0], number=num) + ' ')
verbaspect = en.PROGRESSIVE
elif (p.tag_ == 'MD'):
verbtense = en.INFINITIVE
elif (a.lemma_ == 'have'):
((num == en.PLURAL) if (p.tag_ == 'MD') else num)
auxstr += (en.conjugate('have', tense=en.tenses(a.text)[0][0], number=num) + ' ')
if (n.lemma_ == 'be'):
verbaspect = en.PROGRESSIVE
verbtense = en.tenses(n.text)[0][0]
else:
auxstr += a.text_with_ws
auxstr = auxstr.lower().strip()
if verbaspect:
verb = en.conjugate(verb, tense=verbtense, aspect=verbaspect)
else:
verb = en.conjugate(verb, tense=verbtense)
advcl = ''
if advcltree:
for w in advcltree:
if ((w.pos_ == 'VERB') and (en.tenses(w.text)[0][4] == en.PROGRESSIVE)):
advcl += (('which ' + en.conjugate(w.text, tense=en.tenses(verb)[0][0])) + ' ')
else:
advcl += w.text_with_ws
newsent = (' '.join(list(filter(None, [agent, auxstr, adverb['bef'], verb, part, subjpass, adverb['aft'], advcl, prep, xcomp]))) + punc)
if (not rec):
newsent = (newsent[0].upper() + newsent[1:])
newdoc += (newsent + ' ')
return newdoc |
class LocalJobArgs(JobArgs):
def __init__(self, platform, namespace, job_name):
super().__init__(platform, namespace, job_name)
def initilize(self):
self.distribution_strategy = DistributionStrategy.LOCAL
self.enable_elastic_scheduling = False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.