code stringlengths 101 5.91M |
|---|
class BertTokenizer(object):
def __init__(self, vocab_file, do_lower_case=True, max_len=None, never_split=('[UNK]', '[SEP]', '[PAD]', '[CLS]', '[MASK]')):
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = (max_len if (max_len is not None) else int(.0))
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
ids = []
for token in tokens:
ids.append(self.vocab[token])
if (len(ids) > self.max_len):
raise ValueError('Token indices sequence length is longer than the specified maximum sequence length for this BERT model ({} > {}). Running this sequence through BERT will result in indexing errors'.format(len(ids), self.max_len))
return ids
def convert_ids_to_tokens(self, ids):
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
if (pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP):
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
else:
vocab_file = pretrained_model_name
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
try:
resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error("Model name '{}' was not found in model name list ({}). We assumed '{}' was a path or url but couldn't find any file associated to this path or url.".format(pretrained_model_name, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file))
return None
if (resolved_vocab_file == vocab_file):
logger.info('loading vocabulary file {}'.format(vocab_file))
else:
logger.info('loading vocabulary file {} from cache at {}'.format(vocab_file, resolved_vocab_file))
if (pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP):
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name]
kwargs['max_len'] = min(kwargs.get('max_len', int(.0)), max_len)
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer |
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg('cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
backbone = dict(type='ResNet', depth=18, num_stages=4, out_indices=(3,), norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch')
neck = dict(type='FPN', in_channels=[512], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg('cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue((detector.bbox_head is not None))
self.assertTrue((detector.backbone is not None))
self.assertTrue((not hasattr(detector, 'neck')))
((not torch.cuda.is_available()), 'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
assert isinstance(losses, dict)
((not torch.cuda.is_available()), 'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
assert (len(batch_results) == 2)
assert isinstance(batch_results[0], DetDataSample)
((not torch.cuda.is_available()), 'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
assert isinstance(batch_results, tuple) |
class AnchorHeadSemi(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, voxel_size, point_cloud_range, predict_boxes_when_training=True):
super().__init__(model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range, predict_boxes_when_training=predict_boxes_when_training)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.conv_cls = nn.Conv2d(input_channels, (self.num_anchors_per_location * self.num_class), kernel_size=1)
self.conv_box = nn.Conv2d(input_channels, (self.num_anchors_per_location * self.box_coder.code_size), kernel_size=1)
if (self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None):
self.conv_dir_cls = nn.Conv2d(input_channels, (self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS), kernel_size=1)
else:
self.conv_dir_cls = None
self.init_weights()
self.model_type = None
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, (- np.log(((1 - pi) / pi))))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def forward(self, data_dict):
spatial_features_2d = data_dict['spatial_features_2d']
cls_preds = self.conv_cls(spatial_features_2d)
box_preds = self.conv_box(spatial_features_2d)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous()
box_preds = box_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if (self.conv_dir_cls is not None):
dir_cls_preds = self.conv_dir_cls(spatial_features_2d)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if (self.model_type == 'origin'):
if self.training:
targets_dict = self.assign_targets(gt_boxes=data_dict['gt_boxes'])
self.forward_ret_dict.update(targets_dict)
if ((not self.training) or self.predict_boxes_when_training):
(batch_cls_preds, batch_box_preds) = self.generate_predicted_boxes(batch_size=data_dict['batch_size'], cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
elif (self.model_type == 'teacher'):
(batch_cls_preds, batch_box_preds) = self.generate_predicted_boxes(batch_size=data_dict['batch_size'], cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
elif (self.model_type == 'student'):
if self.training:
if ('gt_boxes' in data_dict):
targets_dict = self.assign_targets(gt_boxes=data_dict['gt_boxes'])
self.forward_ret_dict.update(targets_dict)
(batch_cls_preds, batch_box_preds) = self.generate_predicted_boxes(batch_size=data_dict['batch_size'], cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
else:
raise Exception('Unsupprted model type')
return data_dict |
def test_recarray(simple_dtype, packed_dtype):
elements = [(False, 0, 0.0, (- 0.0)), (True, 1, 1.5, (- 2.5)), (False, 2, 3.0, (- 5.0))]
for (func, dtype) in [(m.create_rec_simple, simple_dtype), (m.create_rec_packed, packed_dtype)]:
arr = func(0)
assert (arr.dtype == dtype)
assert_equal(arr, [], simple_dtype)
assert_equal(arr, [], packed_dtype)
arr = func(3)
assert (arr.dtype == dtype)
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
assert (type(arr[0]) == np.void)
assert (type(arr[0].item()) == tuple)
if (dtype == simple_dtype):
assert (m.print_rec_simple(arr) == ['s:0,0,0,-0', 's:1,1,1.5,-2.5', 's:0,2,3,-5'])
else:
assert (m.print_rec_packed(arr) == ['p:0,0,0,-0', 'p:1,1,1.5,-2.5', 'p:0,2,3,-5'])
nested_dtype = np.dtype([('a', simple_dtype), ('b', packed_dtype)])
arr = m.create_rec_nested(0)
assert (arr.dtype == nested_dtype)
assert_equal(arr, [], nested_dtype)
arr = m.create_rec_nested(3)
assert (arr.dtype == nested_dtype)
assert_equal(arr, [((False, 0, 0.0, (- 0.0)), (True, 1, 1.5, (- 2.5))), ((True, 1, 1.5, (- 2.5)), (False, 2, 3.0, (- 5.0))), ((False, 2, 3.0, (- 5.0)), (True, 3, 4.5, (- 7.5)))], nested_dtype)
assert (m.print_rec_nested(arr) == ['n:a=s:0,0,0,-0;b=p:1,1,1.5,-2.5', 'n:a=s:1,1,1.5,-2.5;b=p:0,2,3,-5', 'n:a=s:0,2,3,-5;b=p:1,3,4.5,-7.5'])
arr = m.create_rec_partial(3)
assert (str(arr.dtype) == partial_dtype_fmt())
partial_dtype = arr.dtype
assert ('' not in arr.dtype.fields)
assert (partial_dtype.itemsize > simple_dtype.itemsize)
assert_equal(arr, elements, simple_dtype)
assert_equal(arr, elements, packed_dtype)
arr = m.create_rec_partial_nested(3)
assert (str(arr.dtype) == partial_nested_fmt())
assert ('' not in arr.dtype.fields)
assert ('' not in arr.dtype.fields['a'][0].fields)
assert (arr.dtype.itemsize > partial_dtype.itemsize)
np.testing.assert_equal(arr['a'], m.create_rec_partial(3)) |
def test_check_parameters_minmax_values_float():
x = torch.tensor([1.1, 2.3, 7.8], dtype=torch.float32)
dtypes = [torch.bool]
_check_parameter(x, 'x', min_value=1.0, max_value=24)
assert_raises(ValueError, _check_parameter, x, 'x', min_value=1.2, max_value=24)
assert_raises(ValueError, _check_parameter, x, 'x', min_value=0.0, max_value=6) |
def _gaussian_cross_kernels(q, x, s):
K_qq = _gaussian_kernel(q, q, s)
K_qx = _gaussian_kernel(q, x, s)
K_xx = _gaussian_kernel(x, x, s)
return (K_qq, K_qx, K_xx) |
class SlimmableAlexNet(BaseModule, SlimmableMixin):
input_shape = [None, 3, 256, 256]
def __init__(self, num_classes=10, track_running_stats=True, bn_type='bn', share_affine=True, width_scale=1.0, slimmabe_ratios=None):
super(SlimmableAlexNet, self).__init__()
self._set_slimmabe_ratios(slimmabe_ratios)
self.bn_type = bn_type
bn_class = get_bn_layer(bn_type)
bn_kwargs = dict(track_running_stats=track_running_stats)
if bn_type.startswith('d'):
bn_kwargs['share_affine'] = share_affine
if track_running_stats:
norm_layer2d = (lambda ch: SwitchableLayer1D(bn_class['2d'], ch, slim_ratios=self.slimmable_ratios, **bn_kwargs))
norm_layer1d = (lambda ch: SwitchableLayer1D(bn_class['1d'], ch, slim_ratios=self.slimmable_ratios, **bn_kwargs))
else:
assert (bn_type == 'bn')
norm_layer2d = (lambda ch: SlimmableBatchNorm2d(ch, affine=True, **bn_kwargs))
norm_layer1d = (lambda ch: SlimmableBatchNorm1d(ch, affine=True, **bn_kwargs))
feature_layers = []
feature_layers += [('conv1', SlimmableConv2d(3, int((64 * width_scale)), kernel_size=11, stride=4, padding=2, non_slimmable_in=True)), ('bn1', norm_layer2d(int((64 * width_scale)))), ('relu1', nn.ReLU(inplace=True)), ('maxpool1', nn.MaxPool2d(kernel_size=3, stride=2)), ('conv2', SlimmableConv2d(int((64 * width_scale)), int((192 * width_scale)), kernel_size=5, padding=2)), ('bn2', norm_layer2d(int((192 * width_scale)))), ('relu2', nn.ReLU(inplace=True)), ('maxpool2', nn.MaxPool2d(kernel_size=3, stride=2)), ('conv3', SlimmableConv2d(int((192 * width_scale)), int((384 * width_scale)), kernel_size=3, padding=1)), ('bn3', norm_layer2d(int((384 * width_scale)))), ('relu3', nn.ReLU(inplace=True)), ('conv4', SlimmableConv2d(int((384 * width_scale)), int((256 * width_scale)), kernel_size=3, padding=1)), ('bn4', norm_layer2d(int((256 * width_scale)))), ('relu4', nn.ReLU(inplace=True)), ('conv5', SlimmableConv2d(int((256 * width_scale)), int((256 * width_scale)), kernel_size=3, padding=1)), ('bn5', norm_layer2d(int((256 * width_scale)))), ('relu5', nn.ReLU(inplace=True)), ('maxpool5', nn.MaxPool2d(kernel_size=3, stride=2))]
self.features = nn.Sequential(OrderedDict(feature_layers))
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
clf_layers = [('fc1', SlimmableLinear(int((((256 * 6) * 6) * width_scale)), int((4096 * width_scale)))), ('bn6', norm_layer1d(int((4096 * width_scale)))), ('relu6', nn.ReLU(inplace=True)), ('fc2', SlimmableLinear(int((4096 * width_scale)), int((4096 * width_scale)))), ('bn7', norm_layer1d(int((4096 * width_scale)))), ('relu7', nn.ReLU(inplace=True)), ('fc3', SlimmableLinear(int((4096 * width_scale)), num_classes, non_slimmable_out=True))]
self.classifier = nn.Sequential(OrderedDict(clf_layers))
def get_module_by_layer(self):
blocks = [[self.features._modules[name] for name in ['conv1', 'bn1', 'relu1', 'maxpool1']], [self.features._modules[name] for name in ['conv2', 'bn2', 'relu2', 'maxpool2']], [self.features._modules[name] for name in ['conv3', 'bn3', 'relu3']], [self.features._modules[name] for name in ['conv4', 'bn4', 'relu4']], [self.features._modules[name] for name in ['conv5', 'bn5', 'relu5', 'maxpool5']], [self.avgpool, nn.Flatten()], [self.classifier._modules[name] for name in ['fc1', 'bn6', 'relu6']], [self.classifier._modules[name] for name in ['fc2', 'bn7', 'relu7']], [self.classifier._modules[name] for name in ['fc3']]]
return blocks
def print_footprint(self):
input_shape = self.input_shape
input_shape[0] = 2
x = torch.rand(input_shape)
batch = x.shape[0]
layers = self.get_module_by_layer()
print(f'input: {x.shape[1:]} => {np.prod(x.shape[1:])}')
for (i_layer, layer) in enumerate(layers):
for m in layer:
x = m(x)
print(f'layer {i_layer}: {np.prod(x.shape[1:]):5d} <= {x.shape[1:]}') |
class Attention(att_model.Attention):
def __init__(self, config):
nn.Module.__init__(self)
self.config = config
self.rnn_size = config.rnn_size
self.att_hid_size = config.att_hid_size
mask_params = {'mask_type': self.config.prune_type, 'mask_init_value': self.config.prune_supermask_init}
self.h2att = MaskedLinear(self.rnn_size, self.att_hid_size, **mask_params)
self.alpha_net = MaskedLinear(self.att_hid_size, 1, **mask_params) |
class ImbalanceCIFAR100(ImbalanceCIFAR10):
base_folder = 'cifar-100-python'
url = '
filename = 'cifar-100-python.tar.gz'
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [['train', '16019d7e3df5f24257cddd939b257f8d']]
test_list = [['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc']]
meta = {'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48'}
cls_num = 100 |
def separate_channels(items, n_channels=9, use_note_on_pitch=True):
caches = []
for i in range((n_channels + 1)):
cache = LastCache()
caches.append(cache)
midi_instruments = []
for i in range((n_channels + 1)):
midi_instruments.append(dict())
for (i, ins_items) in enumerate(items):
for item in ins_items:
program = item.program
channel = item.channel
if (item.name == 'key on'):
caches[channel].update(freq=item.pitch, vel=item.velocity, time=item.start, pitch_bend=0)
elif (item.name == 'set freq'):
key1 = freq2key(caches[channel].last_freq, _round=False)
key2 = freq2key(item.value, _round=False)
if (key1 == key2):
continue
if (key2 < 0):
continue
if (key1 == 0):
key1 = key2
diff = round(((key2 - key1) * PITCHBEND_STEPS))
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = []
if ((abs(diff) > 0) and (abs((caches[channel].pitch_bend + diff)) < PITCHBEND_MAX)):
new_pitch_bend = round((caches[channel].pitch_bend + diff))
midi_instruments[channel][program].append(miditoolkit.PitchBend(pitch=new_pitch_bend, time=item.time))
else:
key = round(key1)
vel = caches[channel].last_vel
assert ((vel >= MIDI_MIN) and (vel < MIDI_MAX)), f'Invalid velocity value {vel}, it should be in [{MIDI_MIN}, {MIDI_MAX}).'
if ((key < MIDI_MIN) or (key >= MIDI_MAX)):
continue
midi_instruments[channel][program].append(miditoolkit.Note(velocity=vel, pitch=key, start=caches[channel].last_sample_time, end=item.time))
new_pitch_bend = round(((key2 - round(key2)) * PITCHBEND_MAX))
midi_instruments[channel][program].append(miditoolkit.PitchBend(pitch=new_pitch_bend, time=item.time))
caches[channel].update(freq=item.value, vel=caches[channel].last_vel, time=item.time, pitch_bend=new_pitch_bend)
elif (item.name == 'key off'):
key1 = freq2key(caches[channel].last_freq, _round=False)
key2 = freq2key(item.pitch, _round=False)
if use_note_on_pitch:
key = round(key1)
else:
key = round(key2)
assert ((item.velocity >= MIDI_MIN) and (item.velocity < MIDI_MAX)), f'Invalid velocity value {item.velocity}, it should be in [{MIDI_MIN}, {MIDI_MAX}).'
if ((key < MIDI_MIN) or (key >= MIDI_MAX)):
continue
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = []
midi_instruments[channel][program].append(miditoolkit.Note(velocity=item.velocity, pitch=key, start=caches[item.channel].last_sample_time, end=item.start))
caches[channel].update(freq=item.pitch, vel=item.velocity, time=item.start, pitch_bend=0)
elif (item.name == 'drum note on'):
program = DRUM_INS
channel = DRUM_CHANNEL
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = []
midi_instruments[channel][program].append(miditoolkit.Note(velocity=item.velocity, pitch=item.pitch, start=item.start, end=item.end))
return midi_instruments |
class Attention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = (dim_head * heads)
context_dim = default(context_dim, query_dim)
self.scale = (dim_head ** (- 0.5))
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, (inner_dim * 2), bias=False)
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
def forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
(k, v) = self.to_kv(context).chunk(2, dim=(- 1))
(q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h)), (q, k, v))
sim = (einsum('b i d, b j d -> b i j', q, k) * self.scale)
if exists(mask):
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = (- torch.finfo(sim.dtype).max)
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_((~ mask), max_neg_value)
attn = sim.softmax(dim=(- 1))
out = einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
return self.to_out(out) |
class ResNet34Fc(nn.Module):
def __init__(self):
super(ResNet34Fc, self).__init__()
model_resnet34 = models.resnet34(pretrained=True)
self.conv1 = model_resnet34.conv1
self.bn1 = model_resnet34.bn1
self.relu = model_resnet34.relu
self.maxpool = model_resnet34.maxpool
self.layer1 = model_resnet34.layer1
self.layer2 = model_resnet34.layer2
self.layer3 = model_resnet34.layer3
self.layer4 = model_resnet34.layer4
self.avgpool = model_resnet34.avgpool
self.__in_features = model_resnet34.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
return x
def output_num(self):
return self.__in_features |
class ONNXRuntimeSegmentor(BaseSegmentor):
def __init__(self, onnx_file: str, cfg: Any, device_id: int):
super(ONNXRuntimeSegmentor, self).__init__()
import onnxruntime as ort
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = (ort.get_device() == 'GPU')
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
for name in self.output_names:
self.io_binding.bind_output(name)
self.cfg = cfg
self.test_mode = cfg.model.test_cfg.mode
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def encode_decode(self, img, img_metas):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def simple_test(self, img: torch.Tensor, img_meta: Iterable, **kwargs) -> list:
device_type = img.device.type
self.io_binding.bind_input(name='input', device_type=device_type, device_id=self.device_id, element_type=np.float32, shape=img.shape, buffer_ptr=img.data_ptr())
self.sess.run_with_iobinding(self.io_binding)
seg_pred = self.io_binding.copy_outputs_to_cpu()[0]
ori_shape = img_meta[0]['ori_shape']
if (not ((ori_shape[0] == seg_pred.shape[(- 2)]) and (ori_shape[1] == seg_pred.shape[(- 1)]))):
seg_pred = torch.from_numpy(seg_pred).float()
seg_pred = torch.nn.functional.interpolate(seg_pred, size=tuple(ori_shape[:2]), mode='nearest')
seg_pred = seg_pred.long().detach().cpu().numpy()
seg_pred = seg_pred[0]
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.') |
_pipeline_test
_vision
class ImageToTextPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
def get_test_pipeline(self, model, tokenizer, processor):
pipe = pipeline('image-to-text', model=model, tokenizer=tokenizer, image_processor=processor)
examples = [Image.open('./tests/fixtures/tests_samples/COCO/.png'), './tests/fixtures/tests_samples/COCO/.png']
return (pipe, examples)
def run_pipeline_test(self, pipe, examples):
outputs = pipe(examples)
self.assertEqual(outputs, [[{'generated_text': ANY(str)}], [{'generated_text': ANY(str)}]])
_tf
def test_small_model_tf(self):
pipe = pipeline('image-to-text', model='hf-internal-testing/tiny-random-vit-gpt2', framework='tf')
image = './tests/fixtures/tests_samples/COCO/.png'
outputs = pipe(image)
self.assertEqual(outputs, [{'generated_text': 'growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}])
outputs = pipe([image, image])
self.assertEqual(outputs, [[{'generated_text': 'growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}], [{'generated_text': 'growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]])
outputs = pipe(image, max_new_tokens=1)
self.assertEqual(outputs, [{'generated_text': 'growth'}])
_torch
def test_small_model_pt(self):
pipe = pipeline('image-to-text', model='hf-internal-testing/tiny-random-vit-gpt2')
image = './tests/fixtures/tests_samples/COCO/.png'
outputs = pipe(image)
self.assertEqual(outputs, [{'generated_text': 'growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}])
outputs = pipe([image, image])
self.assertEqual(outputs, [[{'generated_text': 'growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}], [{'generated_text': 'growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO'}]])
_torch
def test_large_model_pt(self):
pipe = pipeline('image-to-text', model='ydshieh/vit-gpt2-coco-en')
image = './tests/fixtures/tests_samples/COCO/.png'
outputs = pipe(image)
self.assertEqual(outputs, [{'generated_text': 'a cat laying on a blanket next to a cat laying on a bed '}])
outputs = pipe([image, image])
self.assertEqual(outputs, [[{'generated_text': 'a cat laying on a blanket next to a cat laying on a bed '}], [{'generated_text': 'a cat laying on a blanket next to a cat laying on a bed '}]])
_tf
def test_large_model_tf(self):
pipe = pipeline('image-to-text', model='ydshieh/vit-gpt2-coco-en', framework='tf')
image = './tests/fixtures/tests_samples/COCO/.png'
outputs = pipe(image)
self.assertEqual(outputs, [{'generated_text': 'a cat laying on a blanket next to a cat laying on a bed '}])
outputs = pipe([image, image])
self.assertEqual(outputs, [[{'generated_text': 'a cat laying on a blanket next to a cat laying on a bed '}], [{'generated_text': 'a cat laying on a blanket next to a cat laying on a bed '}]]) |
def test__find_duplicates_dict_scores_false(cnn):
encoding_map = data_encoding_map()
dict_ret = cnn._find_duplicates_dict(encoding_map, min_similarity_threshold=0.9, scores=False)
assert isinstance(dict_ret['ukbench00002.jpg'], list)
assert (len(dict_ret['ukbench00002.jpg']) == 1)
assert (not isinstance(dict_ret['ukbench00002.jpg'][0], tuple))
assert (dict_ret['ukbench00002.jpg'][0] == 'ukbench00002_dup.jpg') |
def test_clustered_inference():
(n_samples, n_features) = (100, 2000)
support_size = 10
sigma = 5.0
rho = 0.95
n_clusters = 200
margin_size = 5
interior_support = (support_size - margin_size)
extended_support = (support_size + margin_size)
(X_init, y, beta, epsilon) = multivariate_1D_simulation(n_samples=n_samples, n_features=n_features, support_size=support_size, sigma=sigma, rho=rho, shuffle=False, seed=2)
y = (y - np.mean(y))
X_init = (X_init - np.mean(X_init, axis=0))
connectivity = image.grid_to_graph(n_x=n_features, n_y=1, n_z=1)
ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity, linkage='ward')
(beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr) = clustered_inference(X_init, y, ward, n_clusters)
expected = (0.5 * np.ones(n_features))
expected[:support_size] = 0.0
assert_almost_equal(pval_corr[:interior_support], expected[:interior_support])
assert_almost_equal(pval_corr[extended_support:200], expected[extended_support:200], decimal=1)
(n_samples, n_features, n_times) = (200, 2000, 10)
support_size = 10
sigma = 5.0
rho_noise = 0.9
rho_data = 0.9
n_clusters = 200
margin_size = 5
interior_support = (support_size - margin_size)
extended_support = (support_size + margin_size)
(X, Y, beta, noise) = multivariate_temporal_simulation(n_samples=n_samples, n_features=n_features, n_times=n_times, support_size=support_size, sigma=sigma, rho_noise=rho_noise, rho_data=rho_data, shuffle=False)
connectivity = image.grid_to_graph(n_x=n_features, n_y=1, n_z=1)
ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity, linkage='ward')
(beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr) = clustered_inference(X, Y, ward, n_clusters, method='desparsified-group-lasso')
expected = (0.5 * np.ones(n_features))
expected[:support_size] = 0.0
assert_almost_equal(pval_corr[:interior_support], expected[:interior_support], decimal=3)
assert_almost_equal(pval_corr[extended_support:], expected[extended_support:], decimal=1) |
def union_bbox(bbox1, bbox2):
return [min(bbox1[0], bbox2[0]), max(bbox1[1], bbox2[1]), min(bbox1[2], bbox2[2]), max(bbox1[3], bbox2[3])] |
def _maybe_create_general_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
def has_annotations(instance: Instance) -> bool:
return ('annotations' in instance)
def has_only_crowd_anotations(instance: Instance) -> bool:
for ann in instance['annotations']:
if (ann.get('is_crowd', 0) == 0):
return False
return True
def general_keep_instance_predicate(instance: Instance) -> bool:
return (has_annotations(instance) and (not has_only_crowd_anotations(instance)))
if (not cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS):
return None
return general_keep_instance_predicate |
_module()
class AverageFusion(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
def forward(self, image_features, events_features):
fusion_features = []
for i in range(len(image_features)):
fusion_features.append(((image_features[i] + events_features[i]) / 2))
return fusion_features |
def taubin_filtering():
knot_mesh = o3d.data.KnotMesh()
mesh_in = o3d.io.read_triangle_mesh(knot_mesh.path)
vertices = np.asarray(mesh_in.vertices)
noise = 5
vertices += np.random.uniform(0, noise, size=vertices.shape)
mesh_in.vertices = o3d.utility.Vector3dVector(vertices)
mesh_in.compute_vertex_normals()
print('Displaying input mesh ...')
o3d.visualization.draw_geometries([mesh_in])
print('Displaying output of Taubin mesh filter after 10 iteration ...')
mesh_out = mesh_in.filter_smooth_taubin(number_of_iterations=10)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
print('Displaying output of Taubin mesh filter after 100 iteration ...')
mesh_out = mesh_in.filter_smooth_taubin(number_of_iterations=100)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out]) |
def network_check(config: ElasticLaunchConfig, entrypoint: Union[(Callable, str, None)], args: List[Any]) -> bool:
config = copy.deepcopy(config)
config.network_check = False
if (not config.run_id):
run_id = str(uuid.uuid4().int)
logger.warning(f'config has no run_id, generated a random run_id: {run_id}')
config.run_id = run_id
entrypoint_name = _get_entrypoint_name(entrypoint, args)
node_rank = env_utils.get_node_rank()
logger.info(f'''Starting elastic_operator with launch configs:
entrypoint : {entrypoint_name}
min_nodes : {config.min_nodes}
max_nodes : {config.max_nodes}
nproc_per_node : {config.nproc_per_node}
run_id : {config.run_id}
rdzv_backend : {config.rdzv_backend}
rdzv_endpoint : {config.rdzv_endpoint}
rdzv_configs : {config.rdzv_configs}
max_restarts : {config.max_restarts}
monitor_interval : {config.monitor_interval}
log_dir : {config.log_dir}
metrics_cfg : {config.metrics_cfg}
''')
rdzv_parameters = RendezvousParameters(backend=config.rdzv_backend, endpoint=config.rdzv_endpoint, run_id=config.run_id, min_nodes=config.min_nodes, max_nodes=config.max_nodes, **config.rdzv_configs)
master_addr = _get_local_ip()
rdzv_handler = MasterRendezvousHandler(RendezvousName.NETWORK_CHECK, node_rank, rdzv_parameters, local_world_size=config.nproc_per_node)
spec = WorkerSpec(role=config.role, local_world_size=config.nproc_per_node, entrypoint=entrypoint, args=tuple(args), rdzv_handler=rdzv_handler, max_restarts=0, monitor_interval=config.monitor_interval, master_addr=master_addr)
agent = NetworkCheckElasticAgent(node_rank=node_rank, config=config, entrypoint=entrypoint, spec=spec, start_method=config.start_method)
metrics.initialize_metrics(metrics.MetricsConfig(config.metrics_cfg))
result = agent.run()
logger.info('Network check result is %s', result)
return result |
def double_training_trick(train_Xy, val_Xy, test_Xy, inference_vectorizer):
def double(instances):
ret = []
for inst in instances:
original = inst
ret.append(original)
inst = copy.deepcopy(inst)
(inst['I'], inst['C']) = (original['C'], original['I'])
new_ys = []
for y in original['y']:
new_y = ((y[0] * (- 1)), *y[1:])
new_ys.append(new_y)
inst['y'] = new_ys
ret.append(inst)
return ret
return (double(train_Xy), val_Xy, test_Xy) |
class StableBaselines3ObservationWrapper(ObservationWrapper):
def __init__(self, env: CityLearnEnv):
assert env.central_agent, 'StableBaselines3ObservationWrapper is compatible only when env.central_agent = True. First set env.central_agent = True to use this wrapper.'
super().__init__(env)
self.env: CityLearnEnv
def observation_space(self) -> spaces.Box:
return self.env.observation_space[0]
def observation(self, observations: List[List[float]]) -> np.ndarray:
return np.array(observations[0], dtype='float32') |
def grad_scale(x, scale):
y = x
y_grad = (x * scale)
return ((y - y_grad).detach() + y_grad) |
class ClassifierHead(nn.Module):
def __init__(self, in_channels=512, out_channels=40):
nn.Module.__init__(self)
self.fc = ME.MinkowskiLinear(in_channels, out_channels, bias=True)
self.glob_pool = ME.MinkowskiGlobalMaxPooling()
def forward(self, x):
return self.fc(self.glob_pool(x)) |
_torch
class MakeStudentTester(unittest.TestCase):
_property
def teacher_config(self):
return AutoConfig.from_pretrained(TINY_BART)
def test_valid_t5(self):
(student, *_) = create_student_by_copying_alternating_layers(TINY_T5, tempfile.mkdtemp(), e=1, d=1)
self.assertEqual(student.config.num_hidden_layers, 1)
def test_asymmetric_t5(self):
(student, *_) = create_student_by_copying_alternating_layers(TINY_T5, tempfile.mkdtemp(), e=1, d=None)
def test_same_decoder_small_encoder(self):
(student, *_) = create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=1, d=None)
self.assertEqual(student.config.encoder_layers, 1)
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers)
def test_small_enc_small_dec(self):
(student, *_) = create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=1, d=1)
self.assertEqual(student.config.encoder_layers, 1)
self.assertEqual(student.config.decoder_layers, 1)
def test_raises_assert(self):
with self.assertRaises(AssertionError):
create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=None, d=None) |
def run_speed_benchmark():
if os.path.isfile(SPEED_RUN_PICKLE):
print(f'result file {SPEED_RUN_PICKLE} is already present. skipping data generation.')
return
print('\nSpeed test:')
print('testing {} evenly distributed random polynomials'.format(NR_SAMPLES_SPEED_TEST))
print('average timings per polynomial:\n')
print(' {:11s} | {:38s} | {:35s} | {:35s} | {:20s}'.format('parameters', 'setup time (s)', 'eval time (s)', '# operations', 'lucrative after '))
template = '{0:3s} | {1:7s} | {2:10s} | {3:10s} | {4:13s} | {5:10s} | {6:10s} | {7:10s} | {8:10s} | {9:10s} | {10:10s} | {11:10s}'
print(template.format('dim', 'max_deg', 'canonical', 'horner', 'delta', 'canonical', 'horner', 'delta', 'canonical', 'horner', 'delta', ' # evals'))
print(('=' * 160))
all_results = []
for dim in DIM_RANGE:
dim_run_results = []
for maximal_degree in DEGREE_RANGE:
result_single_run = speed_test_run(dim, maximal_degree, NR_SAMPLES_SPEED_TEST, template)
dim_run_results.append(result_single_run)
print()
all_results.append(dim_run_results)
print('writing results to file:', SPEED_RUN_PICKLE)
with open(SPEED_RUN_PICKLE, 'wb') as f:
pickle.dump(all_results, f)
print('...done.\n') |
class MultiInputDecoder(FairseqDecoder):
def __init__(self, dictionary):
super().__init__(dictionary)
def select_decoder(self, mode, **kwargs):
raise NotImplementedError('Model must implement the select_decoder method')
return (None, kwargs)
def forward(self, prev_output_tokens, encoder_out, incremental_state=None, mode='', **kwargs):
(decoder, kwargs) = self.select_decoder(mode, **kwargs)
return decoder(prev_output_tokens, encoder_out, incremental_state=incremental_state, **kwargs) |
def _make_ferminets():
(key, ion_pos, _, init_pos, spin_split, ndense_list) = _get_initial_pos_and_hyperparams()
slog_psis = []
for (cyclic_spins, use_det_resnet, determinant_fn_mode, full_det, num_heads, use_transformer) in [(False, False, models.construct.DeterminantFnMode.SIGN_COVARIANCE, False, 1, False), (False, False, models.construct.DeterminantFnMode.SIGN_COVARIANCE, True, 1, False), (True, True, models.construct.DeterminantFnMode.SIGN_COVARIANCE, False, 1, False), (False, True, models.construct.DeterminantFnMode.PARALLEL_EVEN, False, 1, False), (True, True, models.construct.DeterminantFnMode.PAIRWISE_EVEN, False, 1, False)]:
compute_input_streams = _get_compute_input_streams(ion_pos)
backflow = _get_backflow(spin_split, ndense_list, cyclic_spins, use_transformer, num_heads)
resnet_det_fn = (_get_det_resnet_fn() if use_det_resnet else None)
slog_psi = models.construct.FermiNet(spin_split, compute_input_streams, backflow, 3, models.weights.get_kernel_initializer('he_normal'), models.weights.get_kernel_initializer('lecun_normal'), models.weights.get_kernel_initializer('ones'), 0.0, models.weights.get_bias_initializer('uniform'), orbitals_use_bias=True, isotropic_decay=True, determinant_fn=resnet_det_fn, determinant_fn_mode=determinant_fn_mode, full_det=full_det)
slog_psis.append(slog_psi)
return (key, init_pos, slog_psis) |
class CustomLexer(ExtendedRegexLexer):
name = 'A Lexer for IHeartLA'
functions = ['trace', 'tr', 'vec', 'diag', 'eig', 'conj', 'Re', 'Im', 'inv', 'det', 'svd', 'rank', 'null', 'orth', 'qr', 'sum', '', 'min', 'max', 'argmin', 'argmax', 'sin', 'asin', 'arcsin', 'cos', 'acos', 'arccos', 'tanh', 'cot', 'sec', 'csc', 'atan2', 'exp', 'log', 'ln', 'sqrt']
word_operators = ['for', 'if', 'else']
def ident_callback(lexer, match, ctx):
m = regex.match('[A-Za-z\\p{Ll}\\p{Lu}\\p{Lo}]\\p{M}*', ctx.text[ctx.pos:ctx.end])
if m:
(yield ((ctx.pos + m.start()), Name.Variable, m[0]))
ctx.pos += m.end()
return
else:
print(('Unable to treat %s as identifier\n' % match))
raise
return
tokens = {'root': [('where|given', Keyword.Namespace), ('from', Keyword.Namespace, 'import_state'), ('\\s=\\s', Keyword.Declaration), ('->|1|\\|\\||[\\+\\-/\\^_><T\\|\\*]', Operator), (words(word_operators), Operator.Word), (':', Keyword, 'where_rhs'), (words(functions), Name.Function), ('[()\\[\\],{};]', Punctuation), ('R|Z', Name.Builtin), ('(`)(.*?)(`)', bygroups(Comment, Name.Variable, Comment)), ('[\\u2070\\u00B9\\u00B2\\u00B3\\u2074-\\u2079]|[\\u2080-\\u2089]|\\d+', Literal.Number), ('\\w', ident_callback), ('\\s+?', Text), ('.*\\n', Text)], 'where_rhs': [('\\s*:', Keyword, 'comment'), ('$', Generic, '#pop'), include('root')], 'comment': [('.*$', Comment, '#pop:2')], 'import_state': [('([^:]+?)(\\s*:\\s*)(.*$)', bygroups(Name.Class, Keyword, Name.Function), '#pop')]} |
def resize(input, size=None, scale_factor=None, mode='nearest', align_corners=None, warning=True):
if warning:
if ((size is not None) and align_corners):
(input_h, input_w) = tuple((int(x) for x in input.shape[2:]))
(output_h, output_w) = tuple((int(x) for x in size))
if ((output_h > input_h) or (output_w > input_w)):
if (((output_h > 1) and (output_w > 1) and (input_h > 1) and (input_w > 1)) and ((output_h - 1) % (input_h - 1)) and ((output_w - 1) % (input_w - 1))):
warnings.warn(f'When align_corners={align_corners}, the output would more aligned if input size {(input_h, input_w)} is `x+1` and out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners) |
def main():
if torch.cuda.is_available():
dev = (torch.cuda.device_count() - 1)
print('Running on gpu:{}'.format(dev))
else:
print('Running on cpu')
args = parse()
with open(os.path.join('configs', args.config), 'r') as f:
print('loading config file: {}'.format(os.path.join('configs', args.config)))
config_raw = yaml.load(f, Loader=yaml.FullLoader)
config = dict2namespace(config_raw)
config.device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.a:
config.model.augment = True
if args.p:
config.model.positive = True
if (args.z > 0):
config.model.final_layer = True
config.model.feature_size = args.z
if ((not args.transfer) and (not args.semisupervised) and (not args.baseline) and (not args.plot) and (not args.representation)):
print('Training an ICE-BeeM on {}'.format(config.data.dataset))
args.doc = 'transfer'
make_and_set_dirs(args, config)
train(args, config)
if (args.transfer and (not args.baseline) and (not args.plot)):
if (not args.all):
print('Transfer for {} - subset size: {} - seed: {}'.format(config.data.dataset, args.subset_size, args.seed))
args.doc = 'transfer'
make_and_set_dirs(args, config)
transfer(args, config)
else:
new_args = argparse.Namespace(**vars(args))
for n in [0, 500, 1000, 2000, 3000, 4000, 5000, 6000]:
for seed in range(args.seed, (args.n_sims + args.seed)):
print('Transfer for {} - subset size: {} - seed: {}'.format(config.data.dataset, n, seed))
new_args.subset_size = n
new_args.seed = seed
np.random.seed(seed)
torch.manual_seed(seed)
new_args.doc = 'transfer'
make_and_set_dirs(new_args, config)
transfer(new_args, config)
if (config.data.dataset.lower() in ['mnist_transferbaseline', 'cifar10_transferbaseline', 'fashionmnist_transferbaseline', 'cifar100_transferbaseline']):
if (not args.all):
print('Transfer baseline for {} - subset size: {} - seed: {}'.format(config.data.dataset.split('_')[0], args.subset_size, args.seed))
args.doc = 'transferBaseline'
args.doc2 = 'size{}_seed{}'.format(args.subset_size, args.seed)
make_and_set_dirs(args, config)
train(args, config)
else:
new_args = argparse.Namespace(**vars(args))
for n in [0, 500, 1000, 2000, 3000, 4000, 5000, 6000]:
for seed in range(args.seed, (args.n_sims + args.seed)):
print('Transfer baseline for {} - subset size: {} - seed: {}'.format(config.data.dataset.split('_')[0], n, seed))
new_args.subset_size = n
new_args.seed = seed
np.random.seed(seed)
torch.manual_seed(seed)
new_args.doc = 'transferBaseline'
new_args.doc2 = 'size{}_seed{}'.format(n, seed)
make_and_set_dirs(new_args, config)
train(new_args, config)
if (args.transfer and args.baseline and (not args.plot)):
new_args = argparse.Namespace(**vars(args))
new_args.config = ((os.path.splitext(args.config)[0] + '_baseline') + os.path.splitext(args.config)[1])
with open(os.path.join('configs', new_args.config), 'r') as f:
print('loading baseline config file: {}'.format(os.path.join('configs', new_args.config)))
config_raw = yaml.load(f, Loader=yaml.FullLoader)
config = dict2namespace(config_raw)
config.device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
if (not args.all):
print('Transfer baseline for {} - subset size: {} - seed: {}'.format(config.data.dataset.split('_')[0], new_args.subset_size, new_args.seed))
new_args.doc = 'transferBaseline'
new_args.doc2 = 'size{}_seed{}'.format(args.subset_size, args.seed)
make_and_set_dirs(new_args, config)
train(new_args, config)
else:
for n in [0, 500, 1000, 2000, 3000, 4000, 5000, 6000]:
for seed in range(args.seed, (args.n_sims + args.seed)):
print('Transfer baseline for {} - subset size: {} - seed: {}'.format(config.data.dataset.split('_')[0], n, seed))
new_args.subset_size = n
new_args.seed = seed
np.random.seed(seed)
torch.manual_seed(seed)
new_args.doc = 'transferBaseline'
new_args.doc2 = 'size{}_seed{}'.format(n, seed)
make_and_set_dirs(new_args, config)
train(new_args, config)
if (args.plot and (not args.baseline) and (not args.semisupervised) and args.transfer and (not args.representation)):
print('Plotting transfer experiment for {}'.format(config.data.dataset))
args.doc = 'transfer'
args.doc_baseline = 'transferBaseline'
make_and_set_dirs(args, config)
plot_transfer(args, config)
if (args.baseline and (not args.semisupervised) and (not args.transfer) and (not args.representation) and (not args.plot)):
print('Training a baseline EBM on {}'.format(config.data.dataset))
args.doc = 'semisupervisedBaseline'
make_and_set_dirs(args, config)
train(args, config, conditional=False)
if (args.semisupervised and (not args.baseline) and (not args.plot)):
print('Computing semi-supervised accuracy for ICE-BeeM on {}'.format(config.data.dataset))
args.doc = 'transfer'
make_and_set_dirs(args, config)
semisupervised(args, config)
if (args.semisupervised and args.baseline and (not args.plot)):
print('Computing semi-supervised accuracy for baseline EBM on {}'.format(config.data.dataset))
args.doc = 'semisupervisedBaseline'
make_and_set_dirs(args, config)
semisupervised(args, config)
if args.representation:
config.n_labels = (10 if (config.data.dataset.lower().split('_')[0] != 'cifar100') else 100)
if ((not args.mcc) and (not args.baseline) and (not args.plot)):
for seed in range(args.seed, (args.n_sims + args.seed)):
print('Learning representation for {} - seed: {}'.format(config.data.dataset, seed))
new_args = argparse.Namespace(**vars(args))
new_args.seed = seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
new_args.doc = 'representation'
new_args.doc2 = 'seed{}'.format(seed)
make_and_set_dirs(new_args, config)
compute_representations(new_args, config)
if (args.baseline and (not args.mcc) and (not args.plot)):
for seed in range(args.seed, (args.n_sims + args.seed)):
print('Learning baseline representation for {} - seed: {}'.format(config.data.dataset, seed))
new_args = argparse.Namespace(**vars(args))
new_args.seed = seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
new_args.doc = 'representationBaseline'
new_args.doc2 = 'seed{}'.format(seed)
make_and_set_dirs(new_args, config)
compute_representations(new_args, config, conditional=False)
if (args.mcc and (not args.baseline) and (not args.plot)):
if args.all:
for seed in range(args.seed, ((args.n_sims + args.seed) - 1)):
for second_seed in range((seed + 1), (args.n_sims + args.seed)):
print('Computing MCCs for {} - seeds: {} and {}'.format(config.data.dataset, seed, second_seed))
new_args = argparse.Namespace(**vars(args))
new_args.seed = seed
new_args.second_seed = second_seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
new_args.doc = 'representation'
make_and_set_dirs(new_args, config)
compute_mcc(new_args, config)
else:
assert ('second_seed' in vars(args).keys())
print('Computing MCCs for {} - seeds: {} and {}'.format(config.data.dataset, args.seed, args.second_seed))
args.doc = 'representation'
make_and_set_dirs(args, config)
compute_mcc(args, config)
if (args.mcc and args.baseline and (not args.plot)):
if args.all:
for seed in range(args.seed, ((args.n_sims + args.seed) - 1)):
for second_seed in range((seed + 1), (args.n_sims + args.seed)):
print('Computing baseline MCCs for {} - seeds: {} and {}'.format(config.data.dataset, seed, second_seed))
new_args = argparse.Namespace(**vars(args))
new_args.seed = seed
new_args.second_seed = second_seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
new_args.doc = 'representationBaseline'
make_and_set_dirs(new_args, config)
compute_mcc(new_args, config)
else:
assert ('second_seed' in vars(args).keys())
print('Computing baseline MCCs for {} - seeds: {} and {}'.format(config.data.dataset, args.seed, args.second_seed))
args.doc = 'representationBaseline'
make_and_set_dirs(args, config)
compute_mcc(args, config)
if args.plot:
print('Plotting representation experiment for {}'.format(config.data.dataset))
args.doc = 'representation'
args.doc_baseline = 'representationBaseline'
make_and_set_dirs(args, config)
plot_representation(args, config) |
class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
def __init__(self, categories, matching_iou_threshold=0.5):
super(WeightedPascalDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False, metric_prefix='WeightedPASCAL', use_weighted_mean_ap=True) |
class MLP_D(nn.Module):
def __init__(self, isize, nz, nc, ndf, ngpu):
super(MLP_D, self).__init__()
self.ngpu = ngpu
main = nn.Sequential(nn.Linear(((nc * isize) * isize), ndf), nn.ReLU(True), nn.Linear(ndf, ndf), nn.ReLU(True), nn.Linear(ndf, ndf), nn.ReLU(True), nn.Linear(ndf, 1))
self.main = main
self.nc = nc
self.isize = isize
self.nz = nz
def forward(self, input):
input = input.view(input.size(0), ((input.size(1) * input.size(2)) * input.size(3)))
if (isinstance(input.data, torch.cuda.FloatTensor) and (self.ngpu > 1)):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
output = output.mean(0)
return output.view(1) |
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print('Overwriting existing file')
else:
print('File exists, skipping download.')
return
tmp_fpath = (fpath + '.tmp')
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = (1024 * 1024)
total_chunks = int((file_size / chunk_size))
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm.tqdm(content_iterator, total=total_chunks, unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath) |
def CheckGlobalStatic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if (((linenum + 1) < clean_lines.NumLines()) and (not Search('[;({]', line))):
line += clean_lines.elided[(linenum + 1)].strip()
match = Match('((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)', line)
if (match and (not Search('\\bstring\\b(\\s+const)?\\s*\\*\\s*(const\\s+)?\\w', line)) and (not Search('\\boperator\\W', line)) and (not Match('\\s*(<.*>)?(::[a-zA-Z0-9_]+)*\\s*\\(([^"]|$)', match.group(3)))):
error(filename, linenum, 'runtime/string', 4, ('For a static/global string constant, use a C style string instead: "%schar %s[]".' % (match.group(1), match.group(2))))
if Search('\\b([A-Za-z0-9_]*_)\\(\\1\\)', line):
error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: x\n outputs: op_to_store\n device: cpu\n evaluation:\n accuracy:\n metric:\n topk: 1\n performance:\n warmup: 5\n iteration: 10\n configs:\n cores_per_instance: 4\n num_of_instance: 2\n tuning:\n accuracy_criterion:\n relative: 0.01\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
_model
def eca_botnext26ts_256(pretrained=False, **kwargs):
kwargs.setdefault('img_size', 256)
return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) |
def x_u_split_semi_cifar(labels, num_expand_x, num_expand_u, device_ids, server_idxs):
unlabeled_idx = []
unlabeled_idx_list = []
for id in range(len(device_ids)):
unlabeled_idx = device_ids[id]
exapand_unlabeled = ((num_expand_u // len(device_ids[id])) // len(device_ids))
unlabeled_idx = np.hstack([unlabeled_idx for _ in range(exapand_unlabeled)])
if (len(unlabeled_idx) < (num_expand_u // len(device_ids))):
diff = ((num_expand_u // len(device_ids)) - len(unlabeled_idx))
unlabeled_idx = np.hstack((unlabeled_idx, np.random.choice(unlabeled_idx, diff)))
else:
assert (len(unlabeled_idx) == (num_expand_u // len(device_ids)))
unlabeled_idx_list.append(unlabeled_idx)
labeled_idx_list = []
for id in range(len(device_ids)):
labeled_idx = server_idxs[id]
exapand_unlabeled = ((num_expand_u // len(device_ids[id])) // len(device_ids))
labeled_idx = np.hstack([labeled_idx for _ in range(exapand_unlabeled)])
if (len(labeled_idx) < (num_expand_u // len(device_ids))):
diff = ((num_expand_u // len(device_ids)) - len(labeled_idx))
labeled_idx = np.hstack((labeled_idx, np.random.choice(labeled_idx, diff)))
else:
assert (len(labeled_idx) == (num_expand_u // len(device_ids)))
labeled_idx_list.append(labeled_idx)
return (labeled_idx_list, unlabeled_idx_list) |
def ensure_directory(path):
if ((path == '') or (path == '.')):
return
if ((path != None) and (len(path) > 0)):
assert (not op.isfile(path)), '{} is a file'.format(path)
if ((not os.path.exists(path)) and (not op.islink(path))):
try:
os.makedirs(path)
except:
if os.path.isdir(path):
pass
else:
raise |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--workers', type=int, default=10)
parser.add_argument('files', nargs='*', help='input files')
args = parser.parse_args()
seen = set()
with fileinput.input(args.files, mode='rb') as h:
pool = Pool(args.workers)
results = pool.imap_unordered(get_hashes_and_lines, h, 1000)
for (i, (hash, raw_line)) in enumerate(results):
if (hash not in seen):
seen.add(hash)
sys.stdout.buffer.write(raw_line)
if ((i % 1000000) == 0):
print(i, file=sys.stderr, end='', flush=True)
elif ((i % 100000) == 0):
print('.', file=sys.stderr, end='', flush=True)
print(file=sys.stderr, flush=True) |
class Tagger(object):
def __init__(self):
self._spacy_tagger = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def tokenize_text(self, text: str):
return [t for t in self._spacy_tagger(text)] |
def test_batting_stats_bref_bad_year() -> None:
with pytest.raises(ValueError):
league_batting_stats.batting_stats_bref('NOT A YEAR') |
def train(train_Xy, n_epochs=4, batch_size=4):
tokenizer = RobertaTokenizer.from_pretrained('allenai/biomed_roberta_base')
model = RobertaForSequenceClassification.from_pretrained('allenai/biomed_roberta_base').to(device=device)
from transformers import AdamW
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
best_val = np.inf
train_epoch_loss = 0
for epoch in range(n_epochs):
model.train()
print('on epoch ', epoch)
train_epoch_loss = 0
(batch_X, batch_y) = ([], [])
cur_batch_size = 0
for (i, article) in enumerate(train_Xy):
if ((i % 100) == 0):
print('on article', i)
(cur_X, cur_y) = instances_from_article(article, max_instances=(batch_size - cur_batch_size))
batch_X.extend(cur_X)
batch_y.extend(cur_y)
cur_batch_size += len(cur_X)
if (cur_batch_size >= batch_size):
optimizer.zero_grad()
batch_X_tensor = tokenizer.batch_encode_plus(batch_X[:batch_size], max_length=512, add_special_tokens=True, pad_to_max_length=True)
batch_y_tensor = torch.tensor(batch_y[:batch_size])
(loss, logits) = model(torch.tensor(batch_X_tensor['input_ids']).to(device=device), attention_mask=torch.tensor(batch_X_tensor['attention_mask']).to(device=device), labels=batch_y_tensor.to(device=device))
train_epoch_loss += loss.cpu().detach().numpy()
loss.backward()
optimizer.step()
cur_batch_size = 0
(batch_X, batch_y) = ([], [])
print('total epoch train loss {}'.format(train_epoch_loss))
print('evaluating on val...')
model.eval()
(total_correct, total_preds) = (0, 0)
val_loss = 0
for (j, article) in enumerate(val_Xy):
(val_X, val_y) = instances_from_article(article, max_instances=batch_size)
val_X_tensor = tokenizer.batch_encode_plus(val_X[:batch_size], max_length=512, add_special_tokens=True, pad_to_max_length=True)
val_y_tensor = torch.tensor(val_y[:batch_size])
(loss, logits) = model(torch.tensor(val_X_tensor['input_ids']).to(device=device), attention_mask=torch.tensor(val_X_tensor['attention_mask']).to(device=device), labels=torch.tensor(val_y_tensor).to(device=device))
val_loss += loss.cpu().detach().numpy()
class_preds = torch.argmax(logits, dim=1).detach().cpu()
total_correct += (class_preds == val_y_tensor).sum()
total_preds += len(val_X)
val_acc = (total_correct / float(total_preds))
print('val loss, acc after epoch {} is: {}, {}'.format(epoch, val_loss, val_acc))
if (val_loss < best_val):
print('new best loss: {}'.format(val_loss))
best_val = val_loss
torch.save(model.state_dict(), 'inference.model') |
class SppBackbone(nn.Module):
def __init__(self):
super(SppBackbone, self).__init__()
self.inplanes = 32
self.in_conv = nn.Sequential(nn.Conv2d(3, 16, kernel_size=3, padding=1, stride=2, bias=False), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.Conv2d(16, 32, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.resblock_1 = self._make_layer(BasicBlock, 64, 3, 2)
self.resblock_2 = self._make_layer(BasicBlock, 128, 3, 2)
self.branch1 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)), nn.Conv2d(128, 32, kernel_size=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)), nn.Conv2d(128, 32, kernel_size=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((4, 4), stride=(4, 4)), nn.Conv2d(128, 32, kernel_size=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((2, 2), stride=(2, 2)), nn.Conv2d(128, 32, kernel_size=1, bias=False), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x: NestedTensor):
(_, _, h, w) = x.left.shape
src_stereo = torch.cat([x.left, x.right], dim=0)
output = self.in_conv(src_stereo)
output_1 = self.resblock_1(output)
output_2 = self.resblock_2(output_1)
(h_spp, w_spp) = (math.ceil((h / 16)), math.ceil((w / 16)))
spp_1 = self.branch1(output_2)
spp_1 = F.interpolate(spp_1, size=(h_spp, w_spp), mode='bilinear', align_corners=False)
spp_2 = self.branch2(output_2)
spp_2 = F.interpolate(spp_2, size=(h_spp, w_spp), mode='bilinear', align_corners=False)
spp_3 = self.branch3(output_2)
spp_3 = F.interpolate(spp_3, size=(h_spp, w_spp), mode='bilinear', align_corners=False)
spp_4 = self.branch4(output_2)
spp_4 = F.interpolate(spp_4, size=(h_spp, w_spp), mode='bilinear', align_corners=False)
output_3 = torch.cat([spp_1, spp_2, spp_3, spp_4], dim=1)
return [src_stereo, output_1, output_2, output_3] |
def save_model(model, optimizer, epoch, args):
os.system('mkdir -p {}'.format(args.work_dirs))
if (optimizer is not None):
torch.save({'net': model.state_dict(), 'optim': optimizer.state_dict(), 'epoch': epoch}, os.path.join(args.work_dirs, '{}.pth'.format(epoch)))
else:
torch.save({'net': model.state_dict(), 'epoch': epoch}, os.path.join(args.work_dirs, '{}.pth'.format(epoch))) |
class DatasetFolder(VisionDataset):
def __init__(self, root, loader, extensions=None, transform_common=None, transform_parallel=None, target_transform=None, is_valid_file=None):
super(DatasetFolder, self).__init__(root, transform_common=transform_common, transform_parallel=transform_parallel, target_transform=target_transform)
(classes, class_to_idx) = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if (len(samples) == 0):
raise RuntimeError(((('Found 0 files in subfolders of: ' + self.root) + '\nSupported extensions are: ') + ','.join(extensions)))
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
def _find_classes(self, dir):
if (sys.version_info >= (3, 5)):
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return (classes, class_to_idx)
def __getitem__(self, index):
(path, target) = self.samples[index]
sample = self.loader(path)
if (self.transform_common is not None):
sample = self.transform_common(sample)
if (self.transform_parallel is not None):
assert isinstance(self.transform_parallel, list)
transformed_sample = [transform(sample) for transform in self.transform_parallel]
if (self.target_transform is not None):
target = self.target_transform(target)
return (transformed_sample, target)
def __len__(self):
return len(self.samples) |
def get_quad_double_solutions(vrblvl=0):
nbrsols = number_quad_double_solutions(vrblvl)
if (vrblvl > 0):
print('number of solutions retrieved :', nbrsols)
result = []
if (nbrsols > 0):
sol = get_next_quad_double_solution(1, vrblvl)
if (vrblvl > 0):
print('the first solution :\n', sol)
result.append(sol)
idx = 1
for _ in range(1, nbrsols):
idx = move_quad_double_solution_cursor(idx, vrblvl)
if (vrblvl > 0):
print('the next index :', idx)
sol = get_next_quad_double_solution(idx, vrblvl)
if (vrblvl > 0):
print('the solution at index', idx, ':\n', sol)
result.append(sol)
return result |
def main(Pd_l=[0.0, 0.0]):
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
((X_train, Y_train), (X_test, Y_test)) = Data_func()
model = DNN(X_train.shape[1], Nh_l, Pd_l, Nout)
history = model.fit(X_train, Y_train, epochs=100, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_acc(history, '(a) ')
plt.show()
plot_loss(history, '(b) ')
plt.show() |
class RandomHorizontalFlip(object):
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, target, target2=None):
if (random.random() < self.prob):
image = F.hflip(image)
target = target.transpose(0)
if (not (target2 is None)):
target2 = target2.transpose(0)
if (target2 is None):
return (image, target)
else:
return (image, target, target2) |
_immediately
def writer(args, finish_queue_lst):
(_, query_id_memmap) = get_embed_memmap(args.query_embedding_dir, args.embedding_dim)
with open(args.output_path, 'w') as outFile:
for qid in query_id_memmap:
score_docid_lst = []
for q in finish_queue_lst:
score_docid_lst = (score_docid_lst + q.get())
score_docid_lst = sorted(score_docid_lst, reverse=True)
for (rank_idx, (score, para_id)) in enumerate(score_docid_lst[:args.hit]):
outFile.write(f'''{qid} {para_id} {(rank_idx + 1)}
''') |
def main():
is_performance = True
is_int8 = False
output_file = 'benchmark.txt'
sequence_len = 0
iterations = 10
warmup = 5
batch_size = [16, 32]
instance_cores = [[4, 7]]
allocator_mode = [1]
model_list = ['bert_mini_mrpc', 'distilroberta_base_wnli', 'distilbert_base_uncased_sst2', 'roberta_base_mrpc', 'bert_base_nli_mean_tokens_stsb', 'bert_base_sparse_mrpc', 'distilbert_base_uncased_mrpc', 'bert_mini_sst2', 'bert_base_mrpc', 'minilm_l6_h384_uncased_sst2', 'distilbert_base_uncased_emotion', 'paraphrase_xlm_r_multilingual_v1_stsb', 'finbert_financial_phrasebank', 'bert_large_squad']
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--batch', '-b', help='batch size 1,2,3: --batch 1 2 3 ', type=int, nargs='+', dest='batch')
parser.add_argument('--allocator', '-a', help=(('allocator 1,5: --allocator 1 5' + '(0:direct 1:cycle,this one is default 2:unified 3:jemalloc+direct 4:jemalloc+cycle ') + ' 5:jemalloc+unified)'), type=int, nargs='+', dest='allocator')
parser.add_argument('--instance_cores', '-i', help='--instance_cores 4x7 1x28 , it means 4instance 7 cores and 1 instance 28 cores', type=str, nargs='+', dest='i_c')
parser.add_argument('--model', '-m', help=(((('--model bert_mini_mrpc,distilbert_base_uncased_sst2,roberta_base_mrpc,' + 'bert_base_nli_mean_tokens_stsb,bert_base_sparse_mrpc,distilbert_base_uncased_mrpc,') + 'bert_mini_sst2,bert_base_mrpc,minilm_l6_h384_uncased_sst2,') + 'distilbert_base_uncased_emotion,paraphrase_xlm_r_multilingual_v1_stsb,') + 'finbert_financial_phrasebank,bert_large_squad'), type=str, nargs='+', dest='model_name')
parser.add_argument('--warmup', '-w', help='warmup 10 times: --warmup 10 ', type=int, dest='warmup')
parser.add_argument('--iterations', '-e', help='execute 50 times: --iterations 50 ', type=int, dest='iterations')
parser.add_argument('--seq_len', '-s', help='you can only input one int', type=int, dest='seq_len')
parser.add_argument('--int8', type=int, dest='int8')
parser.add_argument('--is_performance', '-p', help='1: performance mode, 0: accuracy mode', type=int, dest='is_performance')
parser.add_argument('--label_file', '-l', help='--only bert large need this path', type=str, dest='label_file')
parser.add_argument('--vocab_file', '-v', help='--only bert large need this path', type=str, dest='vocab_file')
parser.add_argument('--output_file', '-o', help='outputfile: --output_file benchmark.txt', type=str, dest='output_file')
args = parser.parse_args()
if args.batch:
batch_size = []
for batch_val in args.batch:
batch_size.append(batch_val)
if args.allocator:
allocator_mode = []
for allocator_val in args.allocator:
allocator_mode.append(allocator_val)
if args.i_c:
instance_cores = []
ic_val = []
for ic_val in args.i_c:
ic_value = ic_val.split('x')
tmp_list = [int(ic_value[0]), int(ic_value[1])]
instance_cores.append(tmp_list)
if args.model_name:
model_list = []
for model_val in args.model_name:
model_list.append(model_val)
if args.warmup:
warmup = args.warmup
if args.iterations:
iterations = args.iterations
if (args.int8 == 1):
is_int8 = True
if (args.is_performance == 0):
is_performance = False
label_file = ''
if args.label_file:
label_file = args.label_file
vocab_file = ''
if args.vocab_file:
vocab_file = args.vocab_file
if args.output_file:
output_file = args.output_file
if args.seq_len:
sequence_len = args.seq_len
test_all(is_performance, model_list, batch_size, instance_cores, allocator_mode, sequence_len, warmup, iterations, is_int8, label_file, vocab_file, output_file) |
def gen_latex_table(table):
content = '\\begin{table*}\n'
content += tabulate(table[1:], headers=table[0], tablefmt='latex')
content += '\n\\end{table*}'
return content |
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
accs = []
n_frame = 0
(timer_avgs, timer_calls) = ([], [])
for seq in seqs:
output_dir = (os.path.join(data_root, '..', 'outputs', exp_name, seq) if (save_images or save_videos) else None)
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[(meta_info.find('frameRate') + 10):meta_info.find('\nseqLength')])
(nf, ta, tc) = eval_seq(opt, dataloader, data_type, result_filename, save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = (all_time / np.sum(timer_calls))
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, (1.0 / avg_time)))
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name))) |
def process_sdf(sdf_path, table, progress=True):
supplier = Chem.SDMolSupplier(sdf_path)
molecules = []
fragments = []
linkers = []
out_table = []
uuid = 0
supplier = (tqdm(supplier, total=len(supplier)) if progress else supplier)
for mol in supplier:
mol_name = mol.GetProp('_Name')
mol_smi = Chem.MolToSmiles(mol)
mol.SetProp('_Name', mol_smi)
for (linker_smi, frags_smi) in table[(table.molecule == mol_name)][['linker', 'fragments']].values:
try:
(frag1, frag2, linker) = prepare_fragments_and_linker(frags_smi, linker_smi, mol)
except Exception as e:
print(f'{mol_smi} | {linker_smi} | {frags_smi} : {e}')
continue
frags = Chem.CombineMols(frag1, frag2)
anchors_idx = get_anchors_idx(frags)
if (len(anchors_idx) != 2):
print(f'{mol_smi} | {linker_smi} | {frags_smi} : found {len(anchors_idx)} anchors')
continue
molecules.append(mol)
fragments.append(frags)
linkers.append(linker)
out_table.append({'uuid': uuid, 'molecule': mol_smi, 'fragments': Chem.MolToSmiles(frags), 'linker': Chem.MolToSmiles(linker), 'anchor_1': anchors_idx[0], 'anchor_2': anchors_idx[1], 'energy': mol.GetProp('_Energy')})
uuid += 1
return (molecules, fragments, linkers, pd.DataFrame(out_table)) |
class DispAgg(nn.Module):
def __init__(self, maxdisp=192):
super(DispAgg, self).__init__()
self.maxdisp = maxdisp
self.LGA3 = LGA3(radius=2)
self.LGA2 = LGA2(radius=2)
self.LGA = LGA(radius=2)
self.softmax = nn.Softmin(dim=1)
self.disparity = DisparityRegression(maxdisp=self.maxdisp)
self.conv32x1 = nn.Conv3d(32, 1, (3, 3, 3), (1, 1, 1), (1, 1, 1), bias=False)
def lga(self, x, g):
g = F.normalize(g, p=1, dim=1)
x = self.LGA2(x, g)
return x
def forward(self, x, lg1, lg2):
x = F.interpolate(self.conv32x1(x), [(self.maxdisp + 1), (x.size()[3] * 3), (x.size()[4] * 3)], mode='trilinear', align_corners=False)
x = torch.squeeze(x, 1)
assert (lg1.size() == lg2.size())
x = self.lga(x, lg1)
x = self.softmax(x)
x = self.lga(x, lg2)
x = F.normalize(x, p=1, dim=1)
return self.disparity(x) |
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args |
def build_model(x, is_training, config):
return backend(spec_frontend(x, is_training, config, 16), is_training, config, 500) |
def convert_y_domain(mpl_plot_bounds, mpl_max_y_bounds):
mpl_y_dom = [mpl_plot_bounds[1], (mpl_plot_bounds[1] + mpl_plot_bounds[3])]
plotting_height = (mpl_max_y_bounds[1] - mpl_max_y_bounds[0])
y0 = ((mpl_y_dom[0] - mpl_max_y_bounds[0]) / plotting_height)
y1 = ((mpl_y_dom[1] - mpl_max_y_bounds[0]) / plotting_height)
return [y0, y1] |
class TestMaskFormer(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_path = 'maskformer/maskformer_r50_ms-16xb1-75e_coco.py'
model_cfg = get_detector_cfg(cfg_path)
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [(base_channels * (2 ** i)) for i in range(4)]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.layer_cfg.ffn_cfg.feedforward_channels = (base_channels * 8)
model_cfg.panoptic_head.pixel_decoder.positional_encoding.num_feats = (base_channels // 2)
model_cfg.panoptic_head.positional_encoding.num_feats = (base_channels // 2)
model_cfg.panoptic_head.transformer_decoder.layer_cfg.self_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.layer_cfg.cross_attn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.layer_cfg.ffn_cfg.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.layer_cfg.ffn_cfg.feedforward_channels = (base_channels * 8)
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
detector.init_weights()
assert detector.backbone
assert detector.panoptic_head
([('cpu',), ('cuda',)])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if ((device == 'cuda') and (not torch.cuda.is_available())):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(2, image_shapes=[(3, 128, 127), (3, 91, 92)], sem_seg_output_strides=1, with_mask=True, with_semantic=True)
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
([('cpu',), ('cuda',)])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if ((device == 'cuda') and (not torch.cuda.is_available())):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(2, image_shapes=[(3, 128, 127), (3, 91, 92)], sem_seg_output_strides=1, with_mask=True, with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
([('cpu',), ('cuda',)])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = MODELS.build(model_cfg)
if ((device == 'cuda') and (not torch.cuda.is_available())):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]], sem_seg_output_strides=1, with_mask=True, with_semantic=True)
data = detector.data_preprocessor(packed_inputs, False)
out = detector.forward(**data, mode='tensor')
self.assertIsInstance(out, tuple) |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input_model', type=str, required=False, default='MRPC.zip')
parser.add_argument('--output_model', type=str, required=True)
parser.add_argument('--max_len', type=int, default=128, help='Maximum length of the sentence pairs')
return parser.parse_args() |
def cat_id_to_desc(cat_id):
if isinstance(cat_id, (list, tuple)):
return tuple((_cat_descs[c] for c in cat_id))
else:
return _cat_descs[cat_id] |
class ResNetAttention(nn.Module):
def __init__(self, label_dim=527, pretrain=True):
super(ResNetAttention, self).__init__()
self.model = torchvision.models.resnet50(pretrained=False)
if (pretrain == False):
print('ResNet50 Model Trained from Scratch (ImageNet Pretraining NOT Used).')
else:
print('Now Use ImageNet Pretrained ResNet50 Model.')
self.model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.model.fc = torch.nn.Identity()
self.model.avgpool = torch.nn.Identity()
self.attention = Attention(2048, label_dim, att_activation='sigmoid', cla_activation='sigmoid')
self.avgpool = nn.AvgPool2d((4, 1))
def forward(self, x):
x = x.unsqueeze(1)
x = x.transpose(2, 3)
batch_size = x.shape[0]
x = self.model(x)
x = x.reshape([batch_size, 2048, 4, 33])
x = self.avgpool(x)
x = x.transpose(2, 3)
(out, norm_att) = self.attention(x)
return out |
def cross_entropy(outputs, targets, exp=1, size_average=True, eps=1e-05):
out = torch.nn.functional.softmax(outputs)
tar = torch.nn.functional.softmax(targets)
if (exp != 1):
out = out.pow(exp)
out = (out / out.sum(1).view((- 1), 1).expand_as(out))
tar = tar.pow(exp)
tar = (tar / tar.sum(1).view((- 1), 1).expand_as(tar))
out = (out + (eps / out.size(1)))
out = (out / out.sum(1).view((- 1), 1).expand_as(out))
ce = (- (tar * out.log()).sum(1))
if size_average:
ce = ce.mean()
return ce |
class PolynomialEncoderTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self, cols=None, random_state: Optional[np.random.RandomState]=None):
self.cols = cols
self.random_state = random_state
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'PolynomialEncoderTransformer':
self.preprocessor = PnEncoder(cols=self.cols)
self.preprocessor.fit(X)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'PolynomialEncoderTransformer', 'name': 'PolynomialEncoder Transformer', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
return ConfigurationSpace() |
class JointPlayerPolicy(OpenSpielPolicy):
def __init__(self, game, policies):
player_ids = [0, 1]
super(JointPlayerPolicy, self).__init__(game, player_ids)
self._policies = policies
self._obs = {'info_state': [None, None], 'legal_actions': [None, None]}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
player_policy: OpenSpielPolicy = self._policies[cur_player]
return player_policy.action_probabilities(state=state, player_id=cur_player) |
def entry_test_corrupt(cfg, model_path=''):
model = get_model(cfg)
model.to(DEVICE)
print(model)
if (torch.cuda.device_count() > 1):
model = nn.DataParallel(model)
(optimizer, lr_sched, bnm_sched) = get_optimizer(cfg.EXP.OPTIMIZER, cfg.TRAIN, model)
model = load_model_opt_sched(model, optimizer, lr_sched, bnm_sched, model_path)
model.eval()
def test_corrupt(cfg, model, split):
loader_test = create_dataloader(split=split, cfg=cfg)
return validate(cfg.EXP.TASK, loader_test, model, cfg.EXP.DATASET)
eval_corrupt_wrapper(model, test_corrupt, {'cfg': cfg}) |
class ConvType(Enum):
HYPERCUBE = (0, 'HYPERCUBE')
SPATIAL_HYPERCUBE = (1, 'SPATIAL_HYPERCUBE')
SPATIO_TEMPORAL_HYPERCUBE = (2, 'SPATIO_TEMPORAL_HYPERCUBE')
HYPERCROSS = (3, 'HYPERCROSS')
SPATIAL_HYPERCROSS = (4, 'SPATIAL_HYPERCROSS')
SPATIO_TEMPORAL_HYPERCROSS = (5, 'SPATIO_TEMPORAL_HYPERCROSS')
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = (6, 'SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS ')
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value |
def index_select_ND(source: torch.Tensor, index: torch.Tensor) -> torch.Tensor:
index_size = index.size()
suffix_dim = source.size()[1:]
final_size = (index_size + suffix_dim)
target = source.index_select(dim=0, index=index.view((- 1)))
target = target.view(final_size)
return target |
class DefaultConfigs():
def __init__(self, model, server_env=None, dim=2):
self.model = model
self.dim = dim
self.select_prototype_subset = None
self.backbone_path = 'models/i3dbackbone.py'
self.source_dir = os.path.dirname(os.path.realpath(__file__))
self.input_df_name = 'info_df.pickle'
self.model_path = 'models/{}.py'.format(self.model)
if server_env:
self.source_dir = '/home/jaegerp/code/mamma_code/medicaldetectiontoolkit'
self.seed = 0
self.n_workers = 6
self.class_specific_seg_flag = False
self.weight_decay = 0.0
self.relu = 'relu'
self.custom_init = False
self.operate_stride1 = False
self.n_cv_splits = 5
self.n_probabilistic_samples = None
self.test_aug = True
self.hold_out_test_set = False
self.ensemble_folds = False
self.box_color_palette = {'det': 'b', 'gt': 'r', 'neg_class': 'purple', 'prop': 'w', 'pos_class': 'g', 'pos_anchor': 'c', 'neg_anchor': 'c'}
self.scan_det_thresh = False
self.plot_stat_curves = True
self.per_patient_ap = True
self.merge_3D_iou = 0.1
self.n_monitoring_figures = 1
self.assign_values_to_extra_figure = {}
self.frcnn_mode = False
self.return_masks_in_val = False
self.return_masks_in_test = False
self.sixth_pooling = False
self.n_latent_dims = 0 |
def fine_tune(args):
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import AdamW, AutoModelForSequenceClassification
train_dataset = load_dataset_from_local(args.train_data_path, args.model_name_or_path)
val_dataset = load_dataset_from_local(args.data_path, args.model_name_or_path)
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path)
val_loader = DataLoader(val_dataset, batch_size=64, shuffle=True)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
optim = AdamW(model.parameters(), lr=5e-05)
results = {'eval_acc': 0}
global_step = (- 1)
for epoch in range(3):
all_labels = []
all_preds = []
for (idx, batch) in enumerate(train_loader):
global_step += 1
optim.zero_grad()
labels = batch.pop('labels')
inputs = batch
model.train()
outputs = model(**inputs, labels=labels)
loss = outputs.loss
logits = outputs.logits
all_labels.append(labels.numpy())
all_preds.append(np.argmax(logits.detach().numpy(), axis=1))
np.concatenate(all_labels, axis=0)
np.concatenate(all_preds, axis=0)
cur_acc = np.mean((np.concatenate(all_labels, axis=0) == np.concatenate(all_preds, axis=0)))
print(' Current acc:%s', round(cur_acc, 4))
loss.backward()
print(f' Loss: {loss.item()}')
optim.step()
if ((global_step % 100) == 0):
best_acc = results['eval_acc']
cur_acc = evaluate(model, val_loader)
if (cur_acc > best_acc):
results['eval_acc'] = cur_acc
best_acc = results['eval_acc']
print(' Best acc:%s', round(best_acc, 4))
checkpoint_prefix = 'checkpoint-best-acc'
output_dir = os.path.join('{}'.format(checkpoint_prefix))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model.config.to_json_file('{}/config.json'.format(checkpoint_prefix))
output_dir = os.path.join(output_dir, '{}'.format('pytorch_model.bin'))
torch.save(model_to_save.state_dict(), output_dir)
print('Saving model checkpoint to %s', output_dir) |
def get_formants(waveform, sample_rate):
myformants = estimate_formants_lpc(waveform, sample_rate)
formants_dict = {'f1': myformants[2], 'f2': myformants[3], 'f3': myformants[4], 'f4': myformants[5]}
return formants_dict |
class Sequence(BaseLoader):
def __init__(self, split, name, regex='*.jpg', lmdb_env=None):
super(Sequence, self).__init__(split, osp.join(cfg.PATH.SEQUENCES, name), regex, lmdb_env=lmdb_env) |
def getLabel(d, argres):
lbs = []
for i in range(len(argres)):
lbs.append(d[argres[i]])
return lbs |
class SinCUTModel(CUTModel):
def modify_commandline_options(parser, is_train=True):
parser = CUTModel.modify_commandline_options(parser, is_train)
parser.add_argument('--lambda_R1', type=float, default=1.0, help='weight for the R1 gradient penalty')
parser.add_argument('--lambda_identity', type=float, default=1.0, help='the "identity preservation loss"')
parser.set_defaults(nce_includes_all_negatives_from_minibatch=True, dataset_mode='singleimage', netG='stylegan2', stylegan2_G_num_downsampling=1, netD='stylegan2', gan_mode='nonsaturating', num_patches=1, nce_layers='0,2,4', lambda_HDCE=4.0, ngf=10, ndf=8, lr=0.002, beta1=0.0, beta2=0.99, load_size=1024, crop_size=64, preprocess='zoom_and_patch')
if is_train:
parser.set_defaults(preprocess='zoom_and_patch', batch_size=16, save_epoch_freq=1, save_latest_freq=20000, n_epochs=8, n_epochs_decay=8)
else:
parser.set_defaults(preprocess='none', batch_size=1, num_test=1)
return parser
def __init__(self, opt):
super().__init__(opt)
if self.isTrain:
if (opt.lambda_R1 > 0.0):
self.loss_names += ['D_R1']
if (opt.lambda_identity > 0.0):
self.loss_names += ['idt']
def compute_D_loss(self):
self.real_B.requires_grad_()
GAN_loss_D = super().compute_D_loss()
self.loss_D_R1 = self.R1_loss(self.pred_real, self.real_B)
self.loss_D = (GAN_loss_D + self.loss_D_R1)
return self.loss_D
def compute_G_loss(self):
CUT_loss_G = super().compute_G_loss()
self.loss_idt = (torch.nn.functional.l1_loss(self.idt_B, self.real_B) * self.opt.lambda_identity)
return (CUT_loss_G + self.loss_idt)
def R1_loss(self, real_pred, real_img):
(grad_real,) = torch.autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True, retain_graph=True)
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], (- 1)).sum(1).mean()
return (grad_penalty * (self.opt.lambda_R1 * 0.5)) |
def get_map(waypoint_tuple_list):
origin_map = np.zeros((6000, 6000, 3), dtype='uint8')
origin_map.fill(255)
origin_map = Image.fromarray(origin_map)
return origin_map |
def rotationx(theta):
return np.array([[1.0, 0.0, 0.0, 0.0], [0.0, np.cos(((theta / 180) * np.pi)), np.sin(((theta / 180) * np.pi)), 0.0], [0.0, (- np.sin(((theta / 180) * np.pi))), np.cos(((theta / 180) * np.pi)), 0.0], [0.0, 0.0, 0.0, 1.0]]) |
class TSP(Environment[State]):
def __init__(self, generator: Optional[Generator]=None, reward_fn: Optional[RewardFn]=None, viewer: Optional[Viewer[State]]=None):
self.generator = (generator or UniformGenerator(num_cities=20))
self.num_cities = self.generator.num_cities
self.reward_fn = (reward_fn or DenseReward())
self._viewer = (viewer or TSPViewer(name='TSP', render_mode='human'))
def __repr__(self) -> str:
return f'TSP environment with {self.num_cities} cities.'
def reset(self, key: PRNGKey) -> Tuple[(State, TimeStep[Observation])]:
state = self.generator(key)
timestep = restart(observation=self._state_to_observation(state))
return (state, timestep)
def step(self, state: State, action: chex.Numeric) -> Tuple[(State, TimeStep[Observation])]:
is_valid = (~ state.visited_mask[action])
next_state = jax.lax.cond(is_valid, self._update_state, (lambda *_: state), state, action)
reward = self.reward_fn(state, action, next_state, is_valid)
observation = self._state_to_observation(next_state)
is_done = ((next_state.num_visited == self.num_cities) | (~ is_valid))
timestep = jax.lax.cond(is_done, termination, transition, reward, observation)
return (next_state, timestep)
def observation_spec(self) -> specs.Spec[Observation]:
coordinates = specs.BoundedArray(shape=(self.num_cities, 2), minimum=0.0, maximum=1.0, dtype=float, name='coordinates')
position = specs.DiscreteArray(self.num_cities, dtype=jnp.int32, name='position')
trajectory = specs.BoundedArray(shape=(self.num_cities,), dtype=jnp.int32, minimum=(- 1), maximum=(self.num_cities - 1), name='trajectory')
action_mask = specs.BoundedArray(shape=(self.num_cities,), dtype=bool, minimum=False, maximum=True, name='action_mask')
return specs.Spec(Observation, 'ObservationSpec', coordinates=coordinates, position=position, trajectory=trajectory, action_mask=action_mask)
def action_spec(self) -> specs.DiscreteArray:
return specs.DiscreteArray(self.num_cities, name='action')
def render(self, state: State) -> Optional[NDArray]:
return self._viewer.render(state)
def animate(self, states: Sequence[State], interval: int=200, save_path: Optional[str]=None) -> matplotlib.animation.FuncAnimation:
return self._viewer.animate(states, interval, save_path)
def close(self) -> None:
self._viewer.close()
def _update_state(self, state: State, action: chex.Numeric) -> State:
return State(coordinates=state.coordinates, position=action, visited_mask=state.visited_mask.at[action].set(True), trajectory=state.trajectory.at[state.num_visited].set(action), num_visited=(state.num_visited + 1), key=state.key)
def _state_to_observation(self, state: State) -> Observation:
return Observation(coordinates=state.coordinates, position=state.position, trajectory=state.trajectory, action_mask=(~ state.visited_mask)) |
def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
if ((torch.max(indices) >= sequence_length) or (torch.min(indices) < 0)):
print(f'All elements in indices should be in range (0, {(sequence_length - 1)})')
offsets = (get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length)
for _ in range((len(indices.size()) - 1)):
offsets = offsets.unsqueeze(1)
offset_indices = (indices + offsets)
offset_indices = offset_indices.view((- 1))
return offset_indices |
class BitGroupNormActivation(nn.GroupNorm):
def __init__(self, config, num_channels, eps=1e-05, affine=True, apply_activation=True):
super(BitGroupNormActivation, self).__init__(config.num_groups, num_channels, eps=eps, affine=affine)
if apply_activation:
self.activation = ACT2FN[config.hidden_act]
else:
self.activation = nn.Identity()
def forward(self, hidden_state):
hidden_state = nn.functional.group_norm(hidden_state, self.num_groups, self.weight, self.bias, self.eps)
hidden_state = self.activation(hidden_state)
return hidden_state |
def _a3_tab2(brd):
return ((((((- 0.0326) * (brd ** 4.0)) + (0.1816 * (brd ** 3.0))) - (0.2943 * (brd ** 2.0))) - (0.6329 * brd)) + 2.3193) |
def test_quadpack():
from galpy.util.quadpack import dblquad
int = dblquad((lambda y, x: ((4.0 * x) * y)), 0.0, 1.0, (lambda z: 0.0), (lambda z: 1.0))
assert (numpy.fabs((int[0] - 1.0)) < int[1]), 'galpy.util.quadpack.dblquad did not work as expected'
return None |
def read_swag_examples(input_file, is_training):
with open(input_file, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
lines = []
for line in reader:
if (sys.version_info[0] == 2):
line = list((unicode(cell, 'utf-8') for cell in line))
lines.append(line)
if (is_training and (lines[0][(- 1)] != 'label')):
raise ValueError('For training, the input file must contain a label column.')
examples = [SwagExample(swag_id=line[2], context_sentence=line[4], start_ending=line[5], ending_0=line[7], ending_1=line[8], ending_2=line[9], ending_3=line[10], label=(int(line[11]) if is_training else None)) for line in lines[1:]]
return examples |
class TestSNNBiasFit(TrainSNN, GenFullyObsSigmoidSNN, TestBase, unittest.TestCase):
def setUp(self):
self.n_neurons = 2
self.n_epochs = 10
self.sample_size = 500
self.length = 50
def preprocess(self):
self.trainable_model.params['kernel_weight'].data = deepcopy(self.gen_model.params['kernel_weight'].data)
self.trainable_model.params['kernel_weight'].requires_grad = False
def check_fit(self):
print(' * true snn\n', self.gen_model)
print(' * learned snn\n', self.trainable_model)
self.assertTrue(torch.allclose(self.gen_model.params['bias'], self.trainable_model.params['bias'], atol=0.2)) |
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if ('log_time' in kw):
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int(((te - ts) * 1000))
else:
print(('%r \n %2.2f ms' % (method, ((te - ts) * 1000))))
return result
return timed |
def get_epsilon_sigma_uff(m1, m2):
n1 = m1.GetNumAtoms()
n2 = m2.GetNumAtoms()
(vdw_epsilon, vdw_sigma) = (np.zeros((n1, n2)), np.zeros((n1, n2)))
m_combine = CombineMols(m1, m2)
for i1 in range(n1):
for i2 in range(n2):
param = GetUFFVdWParams(m_combine, i1, (i1 + i2))
if (param is None):
continue
(d, e) = param
vdw_epsilon[(i1, i2)] = e
vdw_sigma[(i1, i2)] = d
return (vdw_epsilon, vdw_sigma) |
class SummarizationDatasetReaderPkl(DatasetReader):
def __init__(self, source_token_indexers: Dict[(str, TokenIndexer)]=None, dir: str=None, lazy: bool=True, single_oracle=True, fix_edu_num=None, trim_sent_oracle: bool=True, vocab_path: str='', save_to: str=None, dbg: bool=False) -> None:
super().__init__(lazy)
self.word_token_indexers = (source_token_indexers or {'tokens': SingleIdTokenIndexer()})
self.single_oracle = single_oracle
self.fix_edu_num = fix_edu_num
self.vocab = None
self.trim_oracle = trim_sent_oracle
self.build_vocab(vocab_path, save_to=save_to)
self.dir = dir
self.dbg = dbg
def build_vocab(self, file_path, save_to: str=None):
if os.path.isdir(save_to):
try:
self.vocab = Vocabulary.from_files(save_to)
print('Load vocab success')
logging.info('Vocab size: {}'.format(int(self.vocab.get_vocab_size())))
return
except:
print('Failed in loading pre-saved vocab.')
raise NotImplementedError
instances = []
with open(file_path, 'r') as fd:
for line in fd:
data_dict = json.loads(line)
doc_str = data_dict['doc']
allen_token_word_in_doc = TextField([Token(word) for word in doc_str.split()], self.word_token_indexers)
instances.append(Instance({'text': allen_token_word_in_doc}))
self.vocab = Vocabulary.from_instances(instances, min_count={'tokens': 10})
if (save_to is not None):
self.vocab.save_to_files(save_to)
logging.info('Vocab size: {}'.format(int(self.vocab.get_vocab_size())))
def _read(self, file_path: str) -> Iterable[Instance]:
assert (self.vocab is not None)
print('Into the read function. FP: {}'.format(file_path))
if file_path.startswith('train'):
files = [x for x in os.listdir(self.dir) if x.startswith(file_path)]
random.shuffle(files)
if self.dbg:
files = files[:1]
else:
files = files[:6]
elif (file_path.startswith('test') or file_path.startswith('dev')):
files = [x for x in os.listdir(self.dir) if x.startswith(file_path)]
for file in files:
print('Reading {}'.format(file))
logging.info('Reading {}'.format(file))
f = open(os.path.join(self.dir, file), 'rb')
data = pickle.load(f)
for instance_fields in data:
if self.trim_oracle:
sent_oracle = instance_fields['_sent_oracle']
else:
sent_oracle = instance_fields['_non_compression_sent_oracle']
instance_fields.pop('_sent_oracle')
instance_fields.pop('_non_compression_sent_oracle')
(sent_label_list, sent_rouge_list) = filter_oracle_label(self.single_oracle, self.fix_edu_num, sent_oracle)
def edit_label_rouge(_label_list, _rouge_list):
_label_list = [x for x in _label_list]
_max_len = max([len(x) for x in _label_list])
for (idx, label) in enumerate(_label_list):
if (len(label) < _max_len):
_label_list[idx] = (_label_list[idx] + ([(- 1)] * (_max_len - len(label))))
np_gold_label = np.asarray(_label_list, dtype=np.int)
f = ArrayField(array=np_gold_label, padding_value=(- 1))
r = ArrayField(np.asarray(_rouge_list, dtype=np.float32))
return (f, r)
if (sent_label_list and sent_rouge_list):
(label, rouge) = edit_label_rouge(_label_list=sent_label_list, _rouge_list=sent_rouge_list)
instance_fields['sent_label'] = label
instance_fields['sent_rouge'] = rouge
else:
raise NotImplementedError
(yield self.text_to_instance(instance_fields))
def text_to_instance(self, instance_fields) -> Instance:
return Instance(instance_fields) |
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info('Model:\n{}'.format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume)
return do_test(cfg, model)
distributed = (comm.get_world_size() > 1)
if distributed:
model = DistributedDataParallel(model, device_ids=[comm.get_local_rank()], broadcast_buffers=False)
do_train(cfg, model)
return do_test(cfg, model) |
_pt_tf_cross_test
_sentencepiece
_tokenizers
class TFPegasusIntegrationTests(unittest.TestCase):
src_text = [PGE_ARTICLE, XSUM_ENTRY_LONGER]
expected_text = ["California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to reduce the risk of wildfires.", 'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.']
model_name = 'google/pegasus-xsum'
_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
_property
def model(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
assert (self.expected_text == generated_words)
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer.prepare_seq2seq_batch(src_texts=self.src_text, **tokenizer_kwargs, return_tensors='tf')
generated_ids = self.model.generate(model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=True)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)
return generated_words
def test_batch_generation(self):
self._assert_generated_batch_equal_expected() |
def select_using_loss(batch: Union[(torch.Tensor, torch.Tensor)], batch_idx: int, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule', keep: float=0.5, scale_factor: float=1, loss_fn: Callable=None) -> Tuple[(torch.Tensor, torch.Tensor)]:
INTERPOLATE_MODES = {3: 'linear', 4: 'bilinear', 5: 'trilinear'}
(input, target) = (batch[0], batch[1])
interp_mode = 'bilinear'
if (scale_factor > 1):
invalidInputError(False, 'scale_factor must be <= 1')
if (scale_factor != 1):
if (input.dim() not in INTERPOLATE_MODES):
invalidInputError(False, f'Input must be 3D, 4D, or 5D if scale_factor != 1, got {input.dim()}')
interp_mode = INTERPOLATE_MODES[input.dim()]
with torch.no_grad():
N = input.shape[0]
if (scale_factor < 1):
X_scaled = F.interpolate(input, scale_factor=scale_factor, mode=interp_mode, align_corners=False, recompute_scale_factor=False)
else:
X_scaled = input
if (loss_fn is None):
invalidInputError(False, 'loss_fn must be passed explicitly to the class.')
else:
losses = loss_fn(trainer.model(input), target)
if (not (len(losses) == len(target))):
invalidInputError(False, 'Losses have wrong dimension, maybe they are reduced. Please offer unreduced losses which have the same dimension with batch_size. It can be passed by ``loss_fn=`` when you initialize the class.')
sorted_idx = torch.argsort(torch.Tensor(losses))
n_select = int((keep * N))
percs = (np.arange(0.5, N, 1) / N)
probs = (percs ** ((1.0 / keep) - 1.0))
probs = (probs / np.sum(probs))
select_percs_idx = np.random.choice(N, n_select, replace=False, p=probs)
select_idx = sorted_idx[list(select_percs_idx)]
return (input[select_idx], target[select_idx]) |
def adjust_beat_tracked_data(downbeat_fp, midi_obj):
downbeat = get_downbeats(downbeat_fp)
print(downbeat_fp)
tps = (735 * 60)
changed_spb = []
prev = downbeat[0]
bpms = []
for db in downbeat[1:]:
bpm = int(((1 / (db - prev)) * 60))
changed_spb.append((db - prev))
tick = int((db * tps))
midi_obj.tempo_changes.append(TempoChange(bpm, tick))
prev = db
bpms.append(bpm)
if (len(bpms) != 0):
bpm = max(bpms, key=bpms.count)
else:
bpm = 150
tick = int((downbeat[0] * tps))
midi_obj.tempo_changes.append(TempoChange(bpm, tick))
midi_obj.tempo_changes.sort(key=(lambda x: (x.time, x.tempo)))
if (len(changed_spb) != 0):
new_tpb = int((tps * np.median(np.array(changed_spb))))
if (new_tpb > 32767):
new_tpb /= 2
new_tpb = int(round(new_tpb, (- 1)))
if (new_tpb < (- 32768)):
print(downbeat_fp)
midi_obj.ticks_per_beat = new_tpb
print(new_tpb)
return midi_obj |
class GATConv(nn.Module):
def __init__(self, in_channels: int, out_channels: int, bias: bool=True, use_bn: bool=False, drop_rate: float=0.5, atten_neg_slope: float=0.2, is_last: bool=False):
super().__init__()
self.is_last = is_last
self.bn = (nn.BatchNorm1d(out_channels) if use_bn else None)
self.atten_dropout = nn.Dropout(drop_rate)
self.atten_act = nn.LeakyReLU(atten_neg_slope)
self.act = nn.ELU(inplace=True)
self.theta = nn.Linear(in_channels, out_channels, bias=bias)
self.atten_src = nn.Linear(out_channels, 1, bias=False)
self.atten_dst = nn.Linear(out_channels, 1, bias=False)
def forward(self, X: torch.Tensor, g: Graph) -> torch.Tensor:
X = self.theta(X)
x_for_src = self.atten_src(X)
x_for_dst = self.atten_dst(X)
e_atten_score = (x_for_src[g.e_src] + x_for_dst[g.e_dst])
e_atten_score = self.atten_dropout(self.atten_act(e_atten_score).squeeze())
e_atten_score = torch.clamp(e_atten_score, min=0.001, max=5)
X = g.v2v(X, aggr='softmax_then_sum', e_weight=e_atten_score)
if (not self.is_last):
X = self.act(X)
if (self.bn is not None):
X = self.bn(X)
return X |
def test_img_self_att():
fake_feature = Variable(torch.randn(16, ((32 * 7) * 7)))
fake_feature = fake_feature.view(16, (- 1), 7, 7)
img_self_attention = ImageSelfAttention(32)
out = img_self_attention(fake_feature)
print(out.size()) |
def normalized_columns_initializer(weights, std=1.0):
out = torch.randn(weights.size())
out *= (std / torch.sqrt(out.pow(2).sum(1).expand_as(out)))
return out |
class TFAutoModelForMaskedLM():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class EfficientNetV2S(extractor.BaseModule):
def __init__(self, config, name):
super(EfficientNetV2S, self).__init__()
self.name = name
drop_rate = config['dropout']
self.features = timm.create_model('efficientnetv2_s', drop_rate=drop_rate)
self.n_features = 0
def forward(self, x):
return self.features(x) |
_REGISTRY.register()
def build_fcos_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
if cfg.MODEL.BACKBONE.ANTI_ALIAS:
bottom_up = build_resnet_lpf_backbone(cfg, input_shape)
elif (cfg.MODEL.RESNETS.DEFORM_INTERVAL > 1):
bottom_up = build_resnet_interval_backbone(cfg, input_shape)
elif cfg.MODEL.MOBILENET:
bottom_up = build_mnv2_backbone(cfg, input_shape)
else:
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
top_levels = cfg.MODEL.FCOS.TOP_LEVELS
in_channels_top = out_channels
if (top_levels == 2):
top_block = LastLevelP6P7(in_channels_top, out_channels, 'p5')
if (top_levels == 1):
top_block = LastLevelP6(in_channels_top, out_channels, 'p5')
elif (top_levels == 0):
top_block = None
backbone = FPN(bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=top_block, fuse_type=cfg.MODEL.FPN.FUSE_TYPE)
return backbone |
class Loader(object):
def __call__(self, model, pattern_config=None):
framework = get_model_fwk_name(model)
if (framework == 'tensorflow'):
if isinstance(model, str):
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with open(model, 'rb') as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
config = tf.compat.v1.ConfigProto()
model = tf.compat.v1.Session(graph=graph, config=config)
'Extract the node attr from onnxruntime.'
if (framework == 'onnxruntime'):
if isinstance(model, str):
model = onnx.load(model)
try:
from ..onnx_utils import ONNX_OPTIMIZER_PASS
optimize_level = os.getenv('ONNX_OPTIMIZER_LEVEL', 1)
passes = [v for (k, v) in ONNX_OPTIMIZER_PASS.items() if (k <= optimize_level)]
model = onnxoptimizer.optimize(model, passes, fixed_point=False)
onnx.save(model, 'optmodel.onnx')
logger.info('Try to optimize onnx model use onnxoptimizer and optimize passes are {}'.format(passes))
except BaseException:
pass
if (framework == 'torch'):
if isinstance(model, str):
model = torch.jit.load(model)
model = torch.jit.freeze(model.eval())
else:
import io
model = torch.jit.load(io.BytesIO(model.save_to_buffer()))
model = torch.jit.freeze(model.eval())
return (model, framework) |
class Trainer(object):
def __init__(self, config, train_data_loader, test_data_loader):
self.config = config
self.train_data_loader = train_data_loader
self.test_data_loader = test_data_loader
self.start_step = 0
self.tensorboard = None
self._build_model()
if (config.num_gpu > 0):
self.NoiseGenerator = DataParallelWithCallback(self.NoiseGenerator.cuda(), device_ids=range(config.num_gpu))
self.Classifier = DataParallelWithCallback(self.Classifier.cuda(), device_ids=range(config.num_gpu))
if config.load_path:
self._load_model()
self.FGSM = get_fgsm(self.config.dataset)
self.PGD = get_pgd(self.config.dataset)
self.CW = get_cw(self.config.dataset)
def _build_model(self):
noise_channel_size = ((3 if self.config.is_rgb else 1) * ((1 + (1 if (self.config.g_method == 3) else 0)) + (1 if self.config.g_use_grad else 0)))
self.NoiseGenerator = NoiseGenerator(self.config.g_base_channel_dim, noise_channel_size, self.config.g_z_dim, self.config.g_deeper_layer, self.config.num_classes, (3 if self.config.is_rgb else 1))
self.Classifier = Classifier(num_classes=self.config.num_classes, classifier_name=self.config.f_classifier_name, dataset=self.config.dataset, pretrained=self.config.f_pretrain, pretrained_dir=self.config.pretrained_dir)
self.NoiseGenerator.apply(weights_init_normal)
if (not self.config.f_pretrain):
self.Classifier.apply(weights_init_normal)
def _load_model(self):
print('[*] Load models from {}...'.format(self.config.load_path))
paths = glob(os.path.join(self.config.load_path, 'Classifier_*.pth'))
paths.sort()
if (len(paths) == 0):
path = os.path.join(self.config.load_path, 'Classifier.pth')
if (not os.path.exists(path)):
print('[!] No checkpoint found in {}...'.format(self.config.load_path))
return
self.start_step = 0
else:
idxes = [int(os.path.basename(path.split('.')[(- 2)].split('_')[(- 1)])) for path in paths]
self.start_step = max(idxes)
if (self.config.num_gpu == 0):
map_location = (lambda storage, loc: storage)
else:
map_location = None
if (self.config.f_update_style != (- 1)):
bad_classifier_state = torch.load('{}/Classifier_{}.pth'.format(self.config.load_path, self.start_step), map_location=map_location)
starts_with_module = False
for key in bad_classifier_state.keys():
if key.startswith('module.'):
starts_with_module = True
break
if (starts_with_module and (self.config.num_gpu < 1)):
correct_classifier_state = {k[7:]: v for (k, v) in bad_classifier_state.items()}
else:
correct_classifier_state = bad_classifier_state
self.Classifier.load_state_dict(correct_classifier_state)
if (self.config.f_update_style != (- 1)):
bad_generator_state = torch.load('{}/NoiseGen_{}.pth'.format(self.config.load_path, self.start_step), map_location=map_location)
else:
bad_generator_state = torch.load('{}/Generator.pth'.format(self.config.load_path), map_location=map_location)
starts_with_module = False
for key in bad_generator_state.keys():
if key.startswith('module.'):
starts_with_module = True
break
if (starts_with_module and (self.config.num_gpu < 1)):
correct_generator_state = {k[7:]: v for (k, v) in bad_generator_state.items()}
else:
correct_generator_state = bad_generator_state
self.NoiseGenerator.load_state_dict(correct_generator_state)
def _save_model(self, step):
print('[*] Save models to {}...'.format(self.config.model_dir))
torch.save(self.Classifier.state_dict(), '{}/Classifier_{}.pth'.format(self.config.model_dir, step))
torch.save(self.NoiseGenerator.state_dict(), '{}/NoiseGen_{}.pth'.format(self.config.model_dir, step))
def _merge_noise(self, sum_noise, cur_noise, eps_step, eps_all):
cur_noise = (cur_noise * eps_step)
return torch.clamp((sum_noise + cur_noise), ((- 1.0) * eps_all), (1.0 * eps_all))
def _cross_entropy_loss(self, noise_class_output, label, pure_batch, adv_mult=1.0):
log_prob = F.log_softmax(noise_class_output, dim=1)
weight = torch.ones_like(label).float()
weight[pure_batch:] *= adv_mult
output = F.nll_loss(log_prob, label, reduction='none')
return torch.mean((weight * output))
def _compute_acc(self, logits, labels):
(_max_val, max_idx) = torch.max(logits, 1)
return torch.mean(torch.eq(max_idx, labels).double())
def _dsgan_loss(self, noise, output, single_batch, stability=1e-08):
if (noise is None):
return None
numerator = torch.mean(torch.abs((output[:single_batch] - output[single_batch:])), dim=[_ for _ in range(1, len(output.shape))])
denominator = torch.mean(torch.abs((noise[:single_batch] - noise[single_batch:])), dim=[_ for _ in range(1, len(noise.shape))])
our_term = torch.mean((numerator / (denominator + stability)))
return our_term
def train(self):
if (self.config.g_optimizer == 'adam'):
g_optimizer = torch.optim.Adam(self.NoiseGenerator.parameters(), lr=self.config.g_lr, betas=(self.config.g_beta1, self.config.g_beta2), weight_decay=self.config.weight_decay)
elif (self.config.g_optimizer == 'sgd'):
g_optimizer = torch.optim.SGD(self.NoiseGenerator.parameters(), lr=self.config.g_lr, momentum=self.config.g_momentum, weight_decay=self.config.weight_decay)
else:
raise Exception("[!] Optimizer for the generator should be ['adam', 'sgd']")
if (self.config.f_update_style == 2):
if (self.start_step != 0):
for group in g_optimizer.param_groups:
group.setdefault('initial_lr', self.config.g_lr)
g_scheduler = torch.optim.lr_scheduler.StepLR(g_optimizer, step_size=(self.config.max_step // 2), gamma=self.config.lr_gamma, last_epoch=((- 1) if (self.start_step == 0) else self.start_step))
else:
g_scheduler = None
if (self.config.f_optimizer == 'adam'):
f_optimzer = torch.optim.Adam(self.Classifier.parameters(), lr=self.config.f_lr, betas=(self.config.f_beta1, self.config.f_beta2), weight_decay=self.config.weight_decay)
elif (self.config.f_optimizer == 'sgd'):
f_optimizer = torch.optim.SGD(self.Classifier.parameters(), lr=self.config.f_lr, momentum=self.config.f_momentum, weight_decay=self.config.weight_decay)
else:
raise Exception("[!] Optimizer for the generator should be ['adam', 'sgd']")
f_scheduler = torch.optim.lr_scheduler.StepLR(f_optimizer, step_size=(self.config.max_step // 2), gamma=self.config.lr_gamma, last_epoch=((- 1) if (self.start_step == 0) else self.start_step))
if (self.start_step != 0):
for group in f_optimizer.param_groups:
group.setdefault('initial_lr', self.config.f_lr)
loader = iter(self.train_data_loader)
self.tensorboard = SummaryWriter(self.config.model_dir)
self.tensorboard.add_text(tag='argument', text_string=str(self.config.__dict__))
for step in trange(self.start_step, self.config.max_step, ncols=80):
try:
data = loader.next()
except StopIteration:
loader = iter(self.train_data_loader)
data = loader.next()
real_img = self._get_variable(data[0].type(torch.FloatTensor))
if ((not self.config.is_rgb) and (len(real_img.shape) == 3)):
real_img = torch.unsqueeze(real_img, 1)
label = self._get_variable(data[1].type(torch.LongTensor))
single_batch_size = label.size(0)
if (f_scheduler is not None):
f_scheduler.step()
if (g_scheduler is not None):
g_scheduler.step()
if ((step < 1000) and (self.config.f_classifier_name == 'lenet')):
self.Classifier.train()
self.Classifier.zero_grad()
class_output = self.Classifier(real_img)
cls_loss = self._cross_entropy_loss(class_output, label, single_batch_size)
cls_loss.backward()
f_optimizer.step()
continue
self.Classifier.eval()
self.Classifier.zero_grad()
grad_input = real_img.detach()
grad_input.requires_grad = True
class_output = self.Classifier.forward(grad_input)
cls_loss = self._cross_entropy_loss(class_output, label, single_batch_size)
grad_loss = cls_loss
if self.config.g_use_grad:
grad_loss.backward()
f_grad = grad_input.grad
if self.config.g_normalize_grad:
f_grad_norm = (f_grad + 1e-15)
f_grad = (f_grad / f_grad_norm.norm(dim=(2, 3), keepdim=True))
f_grad = f_grad.detach()
double_real_img = torch.cat((real_img, real_img), 0).detach()
double_label = torch.cat((label, label), 0).detach()
if ((self.config.g_method % 2) == 1):
double_adv_sum = torch.zeros_like(double_real_img)
else:
double_adv_sum = None
if self.config.g_use_grad:
double_adv_grad = torch.cat((f_grad, f_grad), 0)
else:
double_adv_grad = None
if (self.config.g_z_dim > 0):
if (self.config.num_gpu > 0):
g_z = torch.cuda.FloatTensor((single_batch_size * 2), self.config.g_z_dim).normal_()
else:
g_z = torch.FloatTensor((single_batch_size * 2), self.config.g_z_dim).normal_()
else:
g_z = None
self.NoiseGenerator.train()
self.Classifier.eval()
self.NoiseGenerator.zero_grad()
proxy_loss_sum = 0.0
dsgan_loss_sum = 0.0
update_list = []
if (self.config.g_mini_update_style not in [0, 1, 2, 3]):
raise Exception('[!] g_mini_update_style should be in [0,1,2,3]')
for g_iter_step_no in range(self.config.train_g_iter):
if (not self.config.use_cross_entropy_for_g):
print('[!] We cannot train our generator without cross_entropy')
break
img_grad_noise = double_real_img
if self.config.g_use_grad:
img_grad_noise = torch.cat((double_real_img, double_adv_grad), 1)
if (self.config.g_method == 3):
img_grad_noise = torch.cat((img_grad_noise, double_adv_sum), 1)
noise_output_for_g = self.NoiseGenerator(img_grad_noise, double_label, g_z)
if ((self.config.g_method % 2) == 1):
clamp_noise = self._merge_noise(double_adv_sum, noise_output_for_g, (self.config.epsilon * self.config.g_ministep_size), self.config.epsilon)
else:
clamp_noise = (self.config.epsilon * noise_output_for_g)
adv_img_for_g = torch.clamp((double_real_img.detach() + clamp_noise), 0.0, 1.0)
copy_for_grad = adv_img_for_g.detach()
copy_for_grad.requires_grad = True
if (((self.config.g_mini_update_style % 2) == 0) or ((g_iter_step_no + 1) == self.config.train_g_iter)):
self.Classifier.zero_grad()
noise_class_output_for_g = self.Classifier.forward(adv_img_for_g)
proxy_loss = 0.0
if self.config.use_cross_entropy_for_g:
ce_loss = self._cross_entropy_loss(noise_class_output_for_g, double_label, single_batch_size)
proxy_loss -= ce_loss
proxy_loss_sum += proxy_loss
if ((self.config.g_z_dim > 0) and ((self.config.g_mini_update_style >= 2) or ((g_iter_step_no + 1) == self.config.train_g_iter))):
dsgan_magnitude = self._dsgan_loss(g_z, adv_img_for_g, single_batch_size)
if (self.config.dsgan_lambda > 0.0):
dsgan_loss = (((- 1.0) * self.config.dsgan_lambda) * dsgan_magnitude)
else:
dsgan_loss = 0.0
dsgan_loss_sum += dsgan_loss
if ((g_iter_step_no + 1) != self.config.train_g_iter):
if self.config.g_use_grad:
self.Classifier.zero_grad()
grad_output_for_g = self.Classifier.forward(copy_for_grad)
grad_ce_loss = self._cross_entropy_loss(grad_output_for_g, double_label, single_batch_size)
grad_loss = grad_ce_loss
grad_loss.backward()
f_grad = copy_for_grad.grad
if self.config.g_normalize_grad:
f_grad_norm = (f_grad + 1e-15)
f_grad = (f_grad / f_grad_norm.norm(dim=(2, 3), keepdim=True))
double_adv_grad = f_grad.detach()
if (double_adv_sum is not None):
double_adv_sum = clamp_noise
update_list.append(adv_img_for_g.detach())
g_loss_sum = (proxy_loss_sum + dsgan_loss_sum)
g_loss_sum.backward()
nn.utils.clip_grad_norm_(self.NoiseGenerator.parameters(), 1.0)
g_optimizer.step()
if (self.config.f_update_style == 1):
f_label_list = [torch.cat((label, label, label), 0)]
f_update_list = [torch.cat((real_img, update_list[(- 1)]), 0)]
elif (self.config.f_update_style == 2):
f_label_list = [double_label, label]
f_update_list = [update_list[(- 1)], real_img]
elif (self.config.f_update_style == (- 1)):
if ((step % self.config.save_step) == (self.config.save_step - 1)):
self._save_model(step)
self.defence_regular_eval(iter_step=step)
continue
else:
raise Exception('[!] f_update_style should be [1: single, 2: twice]')
self.Classifier.train()
noise_class_output_for_debugging = None
noise_class_loss_for_debugging = None
real_pred_sum = 0.0
fake_pred_sum = 0.0
for (image_for_f, label_for_f) in zip(f_update_list, f_label_list):
self.Classifier.zero_grad()
noise_class_output = self.Classifier(image_for_f)
if (noise_class_output_for_debugging is None):
noise_class_output_for_debugging = noise_class_output
cls_loss = self._cross_entropy_loss(noise_class_output, label_for_f, single_batch_size)
if (noise_class_loss_for_debugging is None):
noise_class_loss_for_debugging = cls_loss
f_loss = cls_loss
f_loss.backward()
nn.utils.clip_grad_norm_(self.Classifier.parameters(), 1.0)
f_optimizer.step()
f_acc = self._compute_acc(class_output[(- single_batch_size):], label).data
self.tensorboard.add_scalar('train/f_loss', f_loss.data, step)
self.tensorboard.add_scalar('train/f_acc', f_acc, step)
if (self.config.dsgan_lambda > 0.0):
self.tensorboard.add_scalar('train/lambda', self.config.dsgan_lambda, step)
fg_acc = self._compute_acc(noise_class_output_for_debugging[((- single_batch_size) * 2):], double_label).data
fg_loss = self._cross_entropy_loss(noise_class_output_for_debugging[((- single_batch_size) * 2):], double_label, single_batch_size)
self.tensorboard.add_scalar('train/fg_cls_loss', fg_loss.data, step)
self.tensorboard.add_scalar('train/fg_acc', fg_acc, step)
if ((step % self.config.log_step) == 0):
print('')
print('[{}/{}] Acc_F: {:.4f} F_loss: {:.4f} Acc_FG: {:.4f} cls_loss: {:.4f}'.format(step, self.config.max_step, f_acc, f_loss.data, fg_acc, fg_loss.data))
if (self.config.train_g_iter > 0):
self.tensorboard.add_scalar('train/proxy_loss_sum', proxy_loss_sum.data, step)
self.tensorboard.add_scalar('train/proxy_loss_last', proxy_loss.data, step)
if ((self.config.g_z_dim > 0) and (dsgan_magnitude is not None)):
if (self.config.dsgan_lambda > 0.0):
self.tensorboard.add_scalar('train/dsgan_loss_sum', dsgan_loss_sum.data, step)
self.tensorboard.add_scalar('train/dsgan_loss_last', dsgan_magnitude.data, step)
if ((step % self.config.log_step) == 0):
print('[{}/{}] our_loss: {:.4f} proxy_loss: {:.4f}'.format(step, self.config.max_step, dsgan_magnitude.data, proxy_loss.data))
self.tensorboard.add_scalar('train/g_loss_sum', g_loss_sum.data, step)
if ((step % self.config.save_step) == (self.config.save_step - 1)):
if self.config.g_use_grad:
slice1 = update_list[(- 1)][:single_batch_size]
slice2 = update_list[(- 1)][single_batch_size:]
grad_abs = torch.abs(double_adv_grad)
grad_min = torch.min(grad_abs)
grad_rescale = (grad_abs - grad_min)
grad_max = torch.max(grad_rescale)
grad_rescale /= grad_max
grad_slice1 = grad_rescale[:single_batch_size]
grad_slice2 = grad_rescale[single_batch_size:]
self.tensorboard.add_image('train/pair1', tvutils.make_grid(torch.cat((real_img[:15], slice1[:15], slice2[:15], grad_slice1[:15], grad_slice2[:15]), 0), nrow=15), step)
self.tensorboard.add_image('train/pair2', tvutils.make_grid(torch.cat((real_img[15:30], slice1[15:30], slice2[15:30], grad_slice1[15:30], grad_slice2[15:30]), 0), nrow=15), step)
self._save_model(step)
self.defence_regular_eval(iter_step=step)
def _test_classifier(self, image_tensor, label_tensor, iter_step=0, method_name='PGD'):
total_acc_f = []
num_items = len(label_tensor)
self.Classifier.eval()
for index in range(0, num_items, self.config.single_batch_size):
adv_img = image_tensor[index:min((index + self.config.single_batch_size), num_items)]
label = label_tensor[index:min((index + self.config.single_batch_size), num_items)]
logits = self.Classifier.forward(adv_img)
acc_f = self._compute_acc(logits, label)
total_acc_f.append(acc_f.data)
performance = (sum(total_acc_f) / len(total_acc_f))
print('[{} / {}] Acc: {:.4f}'.format(method_name, iter_step, performance))
if (self.tensorboard is not None):
self.tensorboard.add_scalar('test/{}_acc'.format(method_name), performance, iter_step)
def get_sample_pdf_of_checkpoint(self, default_z_iter=10):
loader = iter(self.test_data_loader)
test_dir = os.path.join(self.config.model_dir, 'test')
if (not os.path.exists(test_dir)):
os.makedirs(test_dir)
self.Classifier.eval()
self.NoiseGenerator.eval()
total_acc_f = []
total_acc_g = []
real_img_arr = []
real_label_arr = []
adv_img_arr = []
adv_att_arr = []
for step in trange(len(self.test_data_loader), ncols=80):
try:
data = loader.next()
except StopIteration:
print('[!] Test sample generation finished. Samples are in {}'.format(test_dir))
break
real_img = self._get_variable(data[0].type(torch.FloatTensor))
if ((not self.config.is_rgb) and (len(real_img.shape) == 3)):
real_img = torch.unsqueeze(real_img, 1)
label = self._get_variable(data[1].type(torch.LongTensor))
single_batch_size = label.size(0)
self.Classifier.zero_grad()
grad_input = real_img.detach()
grad_input.requires_grad = True
class_output = self.Classifier.forward(grad_input)
f_loss = self._cross_entropy_loss(class_output, label, single_batch_size)
f_loss.backward()
if self.config.g_use_grad:
f_grad = grad_input.grad
if self.config.g_normalize_grad:
f_grad_norm = (f_grad + 1e-15)
f_grad = (f_grad / f_grad_norm.norm(dim=(2, 3), keepdim=True))
num_iter_z = (default_z_iter if (self.config.g_z_dim > 0) else 1)
adv_img_inner_arr = []
adv_att_inner_arr = []
for _ in range(num_iter_z):
adv_grad = f_grad.detach()
if ((self.config.g_method % 2) == 1):
adv_sum = torch.zeros_like(real_img)
else:
adv_sum = None
if (self.config.g_z_dim > 0):
if (self.config.num_gpu > 0):
g_z = torch.cuda.FloatTensor(single_batch_size, self.config.g_z_dim).normal_()
else:
g_z = torch.FloatTensor(single_batch_size, self.config.g_z_dim).normal_()
else:
g_z = None
self.NoiseGenerator.zero_grad()
for g_iter_step_no in range(self.config.train_g_iter):
img_grad_noise = real_img
if self.config.g_use_grad:
img_grad_noise = torch.cat((img_grad_noise, adv_grad), 1)
if (self.config.g_method == 3):
img_grad_noise = torch.cat((img_grad_noise, adv_sum), 1)
noise_output = self.NoiseGenerator.forward(img_grad_noise, label, g_z)
if ((self.config.g_method % 2) == 1):
clamp_noise = self._merge_noise(adv_sum, noise_output, (self.config.epsilon * self.config.g_ministep_size), self.config.epsilon)
else:
clamp_noise = (self.config.epsilon * noise_output)
adv_img_for_g = torch.clamp((real_img.detach() + clamp_noise), 0.0, 1.0)
copy_for_grad = adv_img_for_g.detach()
copy_for_grad.requires_grad = True
if ((g_iter_step_no + 1) != self.config.train_g_iter):
if self.config.g_use_grad:
self.Classifier.zero_grad()
grad_output_for_g = self.Classifier.forward(copy_for_grad)
grad_ce_loss = self._cross_entropy_loss(grad_output_for_g, label, single_batch_size)
grad_loss = grad_ce_loss
grad_loss.backward()
f_inner_grad = copy_for_grad.grad
if self.config.g_normalize_grad:
f_inner_grad_norm = (f_inner_grad + 1e-15)
f_inner_grad = (f_inner_grad / f_inner_grad_norm.norm(dim=(2, 3), keepdim=True))
adv_grad = f_inner_grad.detach()
if (adv_sum is not None):
adv_sum = clamp_noise
target_image = adv_img_for_g.detach()
target_attack = clamp_noise.detach()
adv_img_inner_arr.append(target_image.detach().data)
adv_att_inner_arr.append(target_attack.detach().data)
self.Classifier.zero_grad()
class_output = self.Classifier.forward(real_img)
noise_class_output = self.Classifier.forward(target_image)
acc_f = self._compute_acc(class_output, label)
acc_g = self._compute_acc(noise_class_output, label)
total_acc_f.append(acc_f.data)
total_acc_g.append(acc_g.data)
real_img_arr.append(real_img.unsqueeze(1).detach().data)
real_label_arr.append(label.data)
adv_img_arr.append(torch.transpose(torch.stack(adv_img_inner_arr), 0, 1))
adv_att_arr.append(torch.transpose(torch.stack(adv_att_inner_arr), 0, 1))
print('[{}] Acc_F: {:.4f}, Acc_FG: {}'.format(test_dir, (sum(total_acc_f) / len(total_acc_f)), (sum(total_acc_g) / len(total_acc_g))))
print('Converting the results into numpy format.')
real_img_arr = torch.cat(real_img_arr, 0)
orig_data_cpu = real_img_arr.mul(255).clamp(0, 255).byte().permute(0, 1, 3, 4, 2).cpu().numpy()
real_label_arr = torch.cat(real_label_arr, 0)
orig_label_cpu = real_label_arr.to(dtype=torch.int16).cpu().numpy()
adv_img_arr = torch.cat(adv_img_arr, 0)
adv_img_cpu = adv_img_arr.mul(255).clamp(0, 255).byte().permute(0, 1, 3, 4, 2).cpu().numpy()
adv_att_arr = torch.clamp(((1.0 + (torch.cat(adv_att_arr, 0) / self.config.epsilon)) / 2.0), 0.0, 1.0)
adv_att_cpu = adv_att_arr.mul(255).clamp(0, 255).byte().permute(0, 1, 3, 4, 2).cpu().numpy()
print('start generating a pdf file')
item_dict_for_pdf = {}
for (real, label, img, att) in zip(orig_data_cpu, orig_label_cpu, adv_img_cpu, adv_att_cpu):
current_std = np.reshape(att[4:], [6, (- 1)])
current_std = (np.expand_dims(current_std, 1) - np.expand_dims(current_std, 0))
current_std = np.mean((np.sum((current_std * current_std), axis=(- 1)) * (1 - np.eye(6))))
temp_arr = np.concatenate([img[4:], att[4:]], axis=0)
temp_arr = np.transpose(temp_arr, (1, 0, 2, 3))
shape = temp_arr.shape
if ((label not in item_dict_for_pdf) or (item_dict_for_pdf[label][0] < current_std)):
if (shape[3] == 1):
item_dict_for_pdf[label] = [current_std, np.reshape(temp_arr, (shape[0], (shape[1] * shape[2])))]
else:
item_dict_for_pdf[label] = [current_std, np.reshape(temp_arr, (shape[0], (shape[1] * shape[2]), shape[3]))]
sorted_list = [item_dict_for_pdf[_][1] for _ in range(self.config.num_classes)]
output = np.concatenate(sorted_list, axis=0)
print('start saving it in {} as vis_{}.pdf'.format(self.config.log_dir, self.config.model_name))
import scipy.misc
scipy.misc.imsave(os.path.join(self.config.log_dir, 'vis_{}.pdf'.format(self.config.model_name)), output)
def _run_single_attack(self, iter_step=0, method_name='PGD'):
test_dir = os.path.join(self.config.model_dir, 'test')
if (not os.path.exists(test_dir)):
os.makedirs(test_dir)
loader = iter(self.test_data_loader)
steps_required_per_epoch = len(loader)
if method_name.endswith('_slow'):
steps_required_per_epoch = 5
print(steps_required_per_epoch)
print('[Info] Start running {} for step {}'.format(method_name, iter_step))
output_list = []
target_list = []
self.Classifier.eval()
for step in range(steps_required_per_epoch):
try:
data = loader.next()
except StopIteration:
loader = iter(self.test_data_loader)
data = loader.next()
input_img = self._get_variable(data[0].type(torch.FloatTensor))
if ((not self.config.is_rgb) and (len(input_img.shape) == 3)):
input_img = torch.unsqueeze(input_img, 1)
target_label = self._get_variable(data[1].type(torch.LongTensor))
single_batch_size = target_label.size(0)
if (method_name == 'FGSM'):
adv_result = run_fgsm(self.FGSM, self.Classifier, input_img, target_label, self.config.epsilon)
elif (method_name == 'PGD'):
adv_result = run_pgd(self.PGD, self.Classifier, input_img, target_label, self.config.epsilon, self.config.test_iter_steps)
elif (method_name == 'CW'):
adv_result = run_cw(self.CW, self.Classifier, input_img, target_label)
elif (method_name == 'ORIGINAL'):
adv_result = input_img
output_list.append(adv_result)
target_list.append(target_label)
output_tensor = torch.cat(output_list, dim=0)
label_tensor = torch.cat(target_list, dim=0)
if self.config.test_save_adv:
np.save('{}/attack_{}_step{}_img.npy'.format(test_dir, method_name, iter_step), output_tensor.permute(0, 2, 3, 1).cpu().numpy())
np.save('{}/attack_{}_step{}_label.npy'.format(test_dir, method_name, iter_step), label_tensor.permute(0, 2, 3, 1).cpu().numpy())
self._test_classifier(output_tensor, label_tensor, iter_step, method_name)
def defence_regular_eval(self, iter_step=0):
self.Classifier.eval()
self._run_single_attack(iter_step, 'FGSM')
self._run_single_attack(iter_step, 'PGD')
self._run_single_attack(iter_step, 'ORIGINAL')
self.Classifier.train()
return
def defence_over_cnw(self, iter_step=0):
self.Classifier.eval()
self._run_single_attack(iter_step, 'CW')
self.Classifier.train()
return
def _get_variable(self, inputs):
if (self.config.num_gpu > 0):
out = Variable(inputs.cuda())
else:
out = Variable(inputs)
return out |
class WhereConditionNode(StmtNode):
def __init__(self, parse_info=None, raw_text=None):
super().__init__(IRNodeType.WhereCondition, parse_info=parse_info, raw_text=raw_text)
self.id = []
self.type = None
self.desc = None
def get_type_dict(self):
ret = {}
for name in self.id:
ret[name.get_main_id()] = self.type.la_type
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.