code stringlengths 101 5.91M |
|---|
def bmm_maybe_select(A, B, index):
if ((A.dtype == th.int64) and (len(A.shape) == 1)):
B = B.view((- 1), B.shape[2])
flatidx = ((index * B.shape[1]) + A)
return B.index_select(0, flatidx)
else:
BB = B.index_select(0, index)
return th.bmm(A.unsqueeze(1), BB).squeeze() |
def test_create_affine_identity_file():
filename = 'facewarper_affine_identity.txt'
directory = os.path.join(tempfile.gettempdir(), 'FaceWarper')
if (not os.path.exists(directory)):
os.makedirs(directory)
filepath = os.path.join(directory, filename)
write_affine_identity(filepath)
return filepath |
class Bert4WS(Bert4WSFunction, nn.Module):
def __init__(self, vocabulary, embedding_size=768, hidden_dropout_prob=0.1, bert_model='hfl/chinese-roberta-wwm-ext', device=torch.device('cuda')):
super().__init__()
self.vocabulary = vocabulary
self.embedding_size = embedding_size
self.label_size = len(self.vocabulary)
self.hidden_dropout_prob = hidden_dropout_prob
self.device = device
self.bert = BertModel.from_pretrained(bert_model)
self.classifier = nn.Linear(self.embedding_size, self.label_size)
self.dropout = nn.Dropout(self.hidden_dropout_prob)
self.crf = ConditionalRandomField(self.label_size, include_start_end_trans=True, allowed_transitions=allowed_transitions(vocabulary, include_start_end=True)) |
class Backbone(nn.Module):
def __init__(self, block, layers, zero_init_residual=False):
super(Backbone, self).__init__()
self.inplanes = 128
self.conv1 = nn.Conv2d(6, 128, kernel_size=7, stride=2, padding=3, groups=2, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0])
self.layer2 = self._make_layer(block, 256, layers[1], stride=2)
self.layer3 = self._make_layer(block, 512, layers[2], stride=2)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), 1000)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers) |
def set_z3_state(seed=None):
z3.set_param('smt.phase_selection', 5, 'smt.arith.random_initial_value', True, 'smt.random_seed', seed, 'sat.random_seed', seed, 'sat.phase', 'random', 'memory_max_size', (50 * 1024)) |
class MLP_Softmax(nn.Module):
def __init__(self, input_size, embedding_size, output_size, dropout=0):
super(MLP_Softmax, self).__init__()
self.mlp = nn.Sequential(MLP_Plain(input_size, embedding_size, output_size, dropout), nn.Softmax(dim=2))
def forward(self, input):
return self.mlp(input) |
def inpainting_inference(model, masked_img, mask):
device = next(model.parameters()).device
infer_pipeline = [dict(type='LoadImageFromFile', key='masked_img'), dict(type='LoadMask', mask_mode='file', mask_config=dict()), dict(type='Pad', keys=['masked_img', 'mask'], mode='reflect'), dict(type='Normalize', keys=['masked_img'], mean=([127.5] * 3), std=([127.5] * 3), to_rgb=False), dict(type='GetMaskedImage', img_name='masked_img'), dict(type='Collect', keys=['masked_img', 'mask'], meta_keys=['masked_img_path']), dict(type='ImageToTensor', keys=['masked_img', 'mask'])]
test_pipeline = Compose(infer_pipeline)
data = dict(masked_img_path=masked_img, mask_path=mask)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if ('cuda' in str(device)):
data = scatter(data, [device])[0]
else:
data.pop('meta')
with torch.no_grad():
result = model(test_mode=True, **data)
return result['fake_img'] |
class MatchingNotFoundError(Exception):
def __init__(self, missingIdsIn1: List[str], missingIdsIn2: List[str], namemissingIdsIn1: str, namemissingIdsIn2: str):
self.missingIdsIn1 = missingIdsIn1
self.missingIdsIn2 = missingIdsIn2
self.namemissingIdsIn1 = namemissingIdsIn1
self.namemissingIdsIn2 = namemissingIdsIn2
super().__init__(f'Missing ids from matching {self.namemissingIdsIn1} and {self.namemissingIdsIn2}')
def __str__(self):
return self.getString()
def getString(self):
string = f'''Exception: Missing ids from matching {self.namemissingIdsIn1} and {self.namemissingIdsIn2}
'''
string += f'''misssing ids in {self.namemissingIdsIn1}: {self.missingIdsIn1}
'''
string += f'''misssing ids in {self.namemissingIdsIn2}: {self.missingIdsIn2}
'''
return string |
class BottleneckBlock(ResNetBlockBase):
def __init__(self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm='BN', stride_in_1x1=False, dilation=1, avd=False, avg_down=False, radix=2, bottleneck_width=64):
super().__init__(in_channels, out_channels, stride)
self.avd = (avd and (stride > 1))
self.avg_down = avg_down
self.radix = radix
cardinality = num_groups
group_width = (int((bottleneck_channels * (bottleneck_width / 64.0))) * cardinality)
if (in_channels != out_channels):
if self.avg_down:
self.shortcut_avgpool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)
self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False, norm=get_norm(norm, out_channels))
else:
self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels))
else:
self.shortcut = None
(stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride))
self.conv1 = Conv2d(in_channels, group_width, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, group_width))
if (self.radix > 1):
from .splat import SplAtConv2d
self.conv2 = SplAtConv2d(group_width, group_width, kernel_size=3, stride=(1 if self.avd else stride_3x3), padding=dilation, dilation=dilation, groups=cardinality, bias=False, radix=self.radix, norm=norm)
else:
self.conv2 = Conv2d(group_width, group_width, kernel_size=3, stride=(1 if self.avd else stride_3x3), padding=(1 * dilation), bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, group_width))
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
self.conv3 = Conv2d(group_width, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels))
if (self.radix > 1):
for layer in [self.conv1, self.conv3, self.shortcut]:
if (layer is not None):
weight_init.c2_msra_fill(layer)
else:
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if (layer is not None):
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
if (self.radix > 1):
out = self.conv2(out)
else:
out = self.conv2(out)
out = F.relu_(out)
if self.avd:
out = self.avd_layer(out)
out = self.conv3(out)
if (self.shortcut is not None):
if self.avg_down:
x = self.shortcut_avgpool(x)
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out |
class HfApiLoginTest(HfApiCommonTest):
def test_login_invalid(self):
with self.assertRaises(HTTPError):
self._api.login(username=USER, password='fake')
def test_login_valid(self):
token = self._api.login(username=USER, password=PASS)
self.assertIsInstance(token, str) |
class CSRMatrix3d(CSXMatrix3d):
def __init__(self, inp, shape=None, device=None):
if ((type(inp) == list) and isinstance(inp[0], ssp.spmatrix)):
max_shape = [0, 0]
for s in inp:
max_shape[0] = max(max_shape[0], s.shape[0])
max_shape[1] = max(max_shape[1], s.shape[1])
if (shape is None):
shape = tuple(([len(inp)] + max_shape))
else:
assert (shape[0] == len(inp))
assert (shape[1] <= max_shape[0])
assert (shape[2] <= max_shape[1])
elif (type(inp) == list):
assert (shape is not None)
batch = shape[0]
row = ((len(inp[1]) - 1) // batch)
col = _max(inp[0])
assert (shape[1] == row)
assert (shape[2] >= col)
super(CSRMatrix3d, self).__init__(inp, shape, device)
def sptype(self):
return 'csr'
def transpose(self, keep_type=False):
if (not keep_type):
shape_t = list(self.shape)
tmp = shape_t[1]
shape_t[1] = shape_t[2]
shape_t[2] = tmp
return CSCMatrix3d(self.as_list(), shape=shape_t, device=self.device)
else:
coo = []
for sp in self.as_ssp():
coo.append(sp.transpose().tocoo().astype(sp.dtype))
return CSRMatrix3d(coo, device=self.device)
def dot(self, other, *args, **kwargs):
return dot(self, other, *args, **kwargs)
def dotdiag(self, other):
assert (self.shape[0] == other.shape[0]), 'Batch size mismatch'
assert (self.shape[2] == other.shape[1]), 'Matrix shape mismatch'
batch_size = self.shape[0]
out_h = self.shape[1]
out_w = self.shape[2]
result = sparse_dot.csr_dot_diag(*self.as_list(), other, batch_size, out_h, out_w)
ret = CSRMatrix3d(result, shape=self.shape)
return ret |
class Metadata():
def __init__(self, name, partition):
self.name = name
self.stems = penn.load.partition(name)[partition]
self.files = [((penn.CACHE_DIR / name) / f'{stem}-audio.npy') for stem in self.stems]
self.frames = [penn.convert.samples_to_frames(len(np.load(file, mmap_mode='r'))) for file in self.files]
assert all(((frame >= penn.NUM_TRAINING_FRAMES) for frame in self.frames))
self.frames = [(frame - (penn.NUM_TRAINING_FRAMES - 1)) for frame in self.frames]
self.offsets = np.cumsum(self.frames)
self.total = self.offsets[(- 1)]
def voiced_indices(self):
files = [((penn.CACHE_DIR / self.name) / f'{stem}-voiced.npy') for stem in self.stems]
offset = 0
indices = []
for file in files:
voiced = np.load(file)
if (penn.NUM_TRAINING_FRAMES > 1):
voiced = voiced[:(- (penn.NUM_TRAINING_FRAMES - 1))]
indices.extend(list((voiced.nonzero()[0] + offset)))
offset += len(voiced)
return indices |
def test_hard_intersection() -> None:
box1 = TFBoxTensor(tf.Variable([[[1, 1], [3, 5]], [[1, 1], [3, 3]]], dtype=tf.float32))
box2 = TFBoxTensor(tf.Variable([[[2, 0], [6, 2]], [[3, 2], [4, 4]]], dtype=tf.float32))
expected = TFHardIntersection()(box1, box2)
res = TFIntersection()(box1, box2)
assert (res == expected) |
.timeout(10)
def test_init_with_env_updates():
max_path_length = 16
env = GarageEnv(PointEnv())
policy = FixedPolicy(env.spec, scripted_actions=[env.action_space.sample() for _ in range(max_path_length)])
tasks = SetTaskSampler((lambda : GarageEnv(PointEnv())))
n_workers = 8
workers = WorkerFactory(seed=100, max_path_length=max_path_length, n_workers=n_workers)
sampler = MultiprocessingSampler.from_worker_factory(workers, policy, envs=tasks.sample(n_workers))
rollouts = sampler.obtain_samples(0, 160, policy)
assert (sum(rollouts.lengths) >= 160)
sampler.shutdown_worker()
env.close() |
class Word2Vec(BaseModule):
def __init__(self, TEXT=None, embedding_dim=50, batch_size=10, n_gram=4, **kwargs):
super(Word2Vec, self).__init__()
self.batch_size = batch_size
self.n_gram = n_gram
self.vocab_size = len(TEXT.itos)
self.embedding_dim = embedding_dim
self.embeddings_word = nn.Embedding(self.vocab_size, self.embedding_dim)
self.embeddings_context = nn.Embedding(self.vocab_size, self.embedding_dim)
def get_masked_embeddings(self, embeddings, idx, mask):
return (embeddings(idx) * mask.unsqueeze((- 1)))
def word_similarity(self, w1, w2):
with torch.no_grad():
word1 = self.embeddings_word(w1)
word2 = self.embeddings_word(w2)
return torch.sum((word1 * word2))
def conditional_similarity(self, w1, w2):
return self.word_similarity(w1, w2)
def forward(self, idx_word, idx_context, context_mask, train=True):
context = self.get_masked_embeddings(self.embeddings_context, idx_context, context_mask.unsqueeze((- 1)))
word = self.embeddings_word(idx_word)
score = torch.sum((word.unsqueeze(1).unsqueeze(1) * context), dim=(- 1))
return score |
def background_command_waiter(command, popen_object, require_zero_status):
popen_object.communicate()
if (popen_object.returncode is not 0):
str = 'Command exited with status {0}: {1}'.format(popen_object.returncode, command)
if require_zero_status:
logger.error(str)
thread_module.interrupt_main()
else:
logger.warning(str) |
def random_drop_coordinate(grasp_pose, drop_pose, z=0.3):
x_random = random_drop_axis_coordinate(grasp_pose, axis_idx=0, axis_corner=0.43, axis_range=0.3)
y_random = random_drop_axis_coordinate(grasp_pose, axis_idx=1, axis_corner=(- 0.0), axis_range=(- 0.22))
pose_random = kdl.Frame(drop_pose.M, kdl.Vector(x_random, y_random, z))
return pose_random |
class ScribblerDilate128(nn.Module):
def __init__(self, input_nc, output_nc, ngf):
super(ScribblerDilate128, self).__init__()
self.conv = nn.Conv2d
self.batch_norm = nn.BatchNorm2d
self.ngf = ngf
self.res_block = ResidualBlock
self.dilate_block = DilationBlock
self.biup = UpsamplingBlock
self.concat = ConcatTable
self.model = self.create_model(input_nc, output_nc)
def create_test_model(self, input_nc, output_nc):
model = nn.Sequential()
ngf = self.ngf
model.add_module('res_block_1', self.res_block(output_nc))
return model
def create_model(self, input_nc, output_nc):
model = nn.Sequential()
ngf = self.ngf
model.add_module('conv_1', self.dilate_block(input_nc, ngf))
model.add_module('batch_1', self.batch_norm(ngf))
model.add_module('norm_1', nn.ReLU(True))
block1 = nn.Sequential()
block1.add_module('res_block_1', self.res_block(ngf))
block1.add_module('conv_2', self.conv(ngf, (ngf * 2), 3, 2, 1))
block1.add_module('batch_2', self.batch_norm((ngf * 2)))
block1.add_module('norm_2', nn.ReLU(True))
block1.add_module('res_block_2', self.res_block((ngf * 2)))
block1.add_module('conv_3', self.conv((ngf * 2), (ngf * 4), 3, 2, 1))
block1.add_module('batch_3', self.batch_norm((ngf * 4)))
block1.add_module('norm_3', nn.ReLU(True))
block1.add_module('res_block_3', self.res_block((ngf * 4)))
block1.add_module('conv_4', self.conv((ngf * 4), (ngf * 8), 3, 1, 1))
block1.add_module('batch_4', self.batch_norm((ngf * 8)))
block1.add_module('norm_4', nn.ReLU(True))
block1.add_module('res_block_4', self.res_block((ngf * 8)))
block1.add_module('res_block_5', self.res_block((ngf * 8)))
block1.add_module('res_block_6', self.res_block((ngf * 8)))
block1.add_module('res_block_7', self.res_block((ngf * 8)))
block1.add_module('res_block_8', self.res_block((ngf * 8)))
block1.add_module('upsampl_1', self.biup((ngf * 8), (ngf * 4), 3, 1, 1, dil=1))
block1.add_module('batch_5', self.batch_norm((ngf * 4)))
block1.add_module('norm_5', nn.ReLU(True))
block1.add_module('res_block_9', self.res_block((ngf * 4)))
block1.add_module('upsampl_2', self.biup((ngf * 4), (ngf * 2), 3, 1, 1, dil=1))
block1.add_module('batch_6', self.batch_norm((ngf * 2)))
block1.add_module('norm_6', nn.ReLU(True))
block1.add_module('res_block_11', self.res_block((ngf * 2)))
block1.add_module('conv_7', self.conv((ngf * 2), ngf, 3, 1, 1))
block1.add_module('batch_7', self.batch_norm(ngf))
block1.add_module('norm_7', nn.ReLU(True))
block2 = nn.Sequential()
block2.add_module('res_block_13', self.res_block(ngf))
block2.add_module('res_block_14', self.res_block(ngf))
block2.add_module('res_block_15', self.res_block(ngf))
mlp = self.concat(block1, block2)
model.add_module('concat', mlp)
model.add_module('upsampl_4', self.biup((2 * ngf), 3, 3, 1, 1, dil=3))
model.add_module('tanh', nn.Tanh())
return model
def forward(self, input):
return self.model(input) |
class WandBProgressBarWrapper(BaseProgressBar):
def __init__(self, wrapped_bar, wandb_project, run_name=None):
self.wrapped_bar = wrapped_bar
if (wandb is None):
logger.warning('wandb not found, pip install wandb')
return
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
self._log_to_wandb(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def update_config(self, config):
if (wandb is not None):
wandb.config.update(config)
self.wrapped_bar.update_config(config)
def _log_to_wandb(self, stats, tag=None, step=None):
if (wandb is None):
return
if (step is None):
step = stats['num_updates']
prefix = ('' if (tag is None) else (tag + '/'))
for key in (stats.keys() - {'num_updates'}):
if isinstance(stats[key], AverageMeter):
wandb.log({(prefix + key): stats[key].val}, step=step)
elif isinstance(stats[key], Number):
wandb.log({(prefix + key): stats[key]}, step=step) |
def test_pydoc():
import pydoc
import pybind11_tests
assert (pybind11_tests.__name__ == 'pybind11_tests')
assert (pybind11_tests.__doc__ == 'pybind11 test module')
assert pydoc.text.docmodule(pybind11_tests) |
def se_resnet_50(pretrained=False, **kwargs):
model = SENet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model |
def ground_truth_to_binary(ground_truth):
binarized = []
for (i, instance) in enumerate(ground_truth):
instance_labels = []
for (j, token_label) in enumerate(instance):
if (token_label == (- 100)):
instance_labels.append((- 100))
elif (token_label > 0.5):
instance_labels.append(1)
else:
instance_labels.append(0)
binarized.append(instance_labels)
return binarized |
class FlashSentenceEncoderLayer(nn.Module):
def __init__(self, embedding_dim: int=512, hidden_dim: int=1024, z_dim: int=128, dropout: float=0.0, attention_dropout: float=0.0, hidden_dropout: float=0.0, norm_type: str='layernorm', max_positions: int=1024, export: bool=False) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout_module = FairseqDropout(dropout, module_name=self.__class__.__name__)
self.gau = self.build_gated_attention_unit(embedding_dim, hidden_dim, z_dim, attention_dropout, hidden_dropout, max_positions)
self.pre_norm = self.build_norm_layer(norm_type, embedding_dim, export)
def build_norm_layer(self, norm_type, embedding_dim, export):
if (norm_type == 'layernorm'):
return LayerNorm(embedding_dim, export=export)
elif (norm_type == 'rmsnorm'):
return RMSNorm(embedding_dim, export=export)
else:
raise ValueError('Unknown norm type: {}'.format(norm_type))
def build_gated_attention_unit(self, embedding_dim, hidden_dim, z_dim, attention_dropout, hidden_dropout, max_positions):
return GatedAttentionUnit(embed_dim=embedding_dim, zdim=z_dim, hdim=hidden_dim, attention_dropout=attention_dropout, hidden_dropout=hidden_dropout, max_positions=max_positions)
def normalize(self, x):
if isinstance(self.pre_norm, nn.BatchNorm1d):
assert (x.dim() == 3)
x = x.permute(1, 2, 0)
x = self.pre_norm(x)
return x.permute(2, 0, 1)
else:
return self.pre_norm(x)
def forward(self, x: torch.Tensor, x_padding_mask: Optional[torch.Tensor]=None):
residual = x
x = self.normalize(x)
(x, attn) = self.gau(x, x_padding_mask)
x = self.dropout_module(x)
x = (residual + x)
return (x, attn) |
.skipif((sys.platform == 'linux'), reason='This test checks multiprocessing override on non-linux platforms.')
def test_encode_images_num_workers_default_override_on_nonlinux(cnn, mocker):
num_enc_workers = 4
gen_batches_mocker = mocker.patch('imagededup.methods.cnn.CNN._get_cnn_features_batch')
result = cnn.encode_images(TEST_IMAGE_DIR, num_enc_workers=num_enc_workers)
gen_batches_mocker.assert_called_once_with(image_dir=TEST_IMAGE_DIR, recursive=False, num_workers=0) |
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
(obs, _, _, info) = env.step(env.action_space.sample())
dims = {'o': obs['observation'].shape[0], 'u': env.action_space.shape[0], 'g': obs['desired_goal'].shape[0]}
for (key, value) in info.items():
value = np.array(value)
if (value.ndim == 0):
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims |
def wipe_and_exit(config):
if os.path.exists(config.TENSORBOARD_DIR):
print('Removing tensorboard directory...')
shutil.rmtree(config.TENSORBOARD_DIR, ignore_errors=True)
if os.path.exists(config.CHECKPOINT_FOLDER):
print('Removing checkpoint folder...')
shutil.rmtree(config.CHECKPOINT_FOLDER, ignore_errors=True)
if os.path.exists(config.LOG_FILE):
print('Removing log file...')
shutil.rmtree(config.LOG_FILE, ignore_errors=True)
exit(0) |
class COCO(_COCO):
def __init__(self, annotation_file=None):
if (getattr(pycocotools, '__version__', '0') >= '12.0.2'):
warnings.warn('mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids) |
def load_model_and_data(fname):
dump = torch.load(fname)
data = dt.DataInstructionEmbedding()
data.read_meta_data()
data.load_dataset_params(dump.dataset_params)
return (dump.model, data) |
class MilestonesFinetuning(BaseFinetuning):
def __init__(self, milestones: tuple=(5, 10), train_bn: bool=False):
super().__init__()
self.milestones = milestones
self.train_bn = train_bn
def freeze_before_training(self, pl_module: LightningModule):
self.freeze(modules=pl_module.feature_extractor, train_bn=self.train_bn)
def finetune_function(self, pl_module: LightningModule, epoch: int, optimizer: Optimizer, opt_idx: int):
if (epoch == self.milestones[0]):
self.unfreeze_and_add_param_group(modules=pl_module.feature_extractor[(- 5):], optimizer=optimizer, train_bn=self.train_bn)
elif (epoch == self.milestones[1]):
self.unfreeze_and_add_param_group(modules=pl_module.feature_extractor[:(- 5)], optimizer=optimizer, train_bn=self.train_bn) |
class FakeProvider(BaseProvider):
def get_backend(self, name=None, **kwargs):
backend = self._backends[0]
if name:
filtered_backends = [backend for backend in self._backends if (backend.name() == name)]
if (not filtered_backends):
raise QiskitBackendNotFoundError()
backend = filtered_backends[0]
return backend
def backends(self, name=None, **kwargs):
return self._backends
def __init__(self):
self._backends = [FakeQasmSimulator(), FakeTenerife(), FakeMelbourne(), FakeRueschlikon(), FakeTokyo(), FakeOpenPulse2Q()]
super().__init__() |
def match_xxz(match_dict):
path = '/backup3/jcxu/data/xxz-latent/test.article'
with open(path, 'r') as fd:
lines = fd.read().splitlines()
line_num = len(lines)
output_list = ['' for _ in range(line_num)]
feat_lines = [[] for _ in range(line_num)]
meta_sents = []
for (idx, l) in enumerate(lines):
sents = l.split('<S_SEP>')
meta_sents.append(sents)
l = l.replace('<S_SEP>', '')
toks = l.split(' ')[:52]
toks = set([x for x in toks if (x != '')])
for (key, val) in match_dict.items():
joint = toks.union(val)
rat = (len(joint) / len(toks))
if (rat > 0.85):
output_list[idx] = key
del match_dict[key]
break
print('remain')
print(match_dict)
print('match')
print(output_list)
return (output_list, meta_sents) |
def wrap_sys_argv_cmd(cmd: str, pre):
splits = cmd.split(' ')
el = splits[1:]
pairs = ['{} {}'.format(i, j) for (i, j) in zip(el[::2], el[1::2])]
pro = splits[0]
sep = (' \\\n' + (((len(pre) + len(pro)) + 2) * ' '))
out = sep.join(pairs)
return '{} {} {}'.format(pre, pro, out) |
_registry(operator_type='TransposeBatchMatMul')
class TransposeBatchMatMul(Operator):
def __init__(self):
super().__init__() |
class MrpcProcessor(DataProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning)
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
logger.info('LOOKING AT {}'.format(os.path.join(data_dir, 'train.tsv')))
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, i))
text_a = line[3]
text_b = line[4]
label = (None if (set_type == 'test') else line[0])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
class StringPool():
def __init__(self):
self.strs = {'': 0}
self.known_id_count = 1
def read_constids(self, file: str):
idx = 1
with open(file, 'r') as f:
for line in f:
l = line.strip()
if (not l.startswith('X(')):
continue
l = l[2:]
assert l.endswith(')'), l
l = l[:(- 1)].strip()
i = self.id(l)
assert (i.index == idx), (i, idx, l)
idx += 1
self.known_id_count = idx
def id(self, val: str):
if (val in self.strs):
return IdString(self.strs[val])
else:
idx = len(self.strs)
self.strs[val] = idx
return IdString(idx)
def serialise_lists(self, context: str, bba: BBAWriter):
bba.label(f'{context}_strs')
for (s, idx) in sorted(self.strs.items(), key=(lambda x: x[1])):
if (idx < self.known_id_count):
continue
bba.str(s)
def serialise(self, context: str, bba: BBAWriter):
bba.u32(self.known_id_count)
bba.slice(f'{context}_strs', (len(self.strs) - self.known_id_count)) |
def restore_op():
global tensor_magic_op_supported, raw_tensor_magic_op, torch_op_supported, raw_torch_op, func_op_sopprted, raw_func_op
global tensor_target
for op_name in tensor_magic_op_supported:
setattr(tensor_target, op_name, raw_tensor_magic_op[op_name])
for op_name in torch_op_supported:
setattr(torch, op_name, raw_torch_op[op_name])
for op_name in func_op_sopprted:
setattr(F, op_name, raw_func_op[op_name]) |
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument('--save-path', default=None, help='save path of whole config, suffixed with .py, .json or .yml')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
args = parser.parse_args()
return args |
def imgtensor2im(image_tensor, imtype=np.uint8):
image_numpy = inv_normalize(image_tensor).cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) * 255.0)
if (image_numpy.shape[2] < 3):
image_numpy = np.dstack(([image_numpy] * 3))
return image_numpy.astype(imtype) |
def split_911(amr):
while True:
index = None
for (i, token) in enumerate(amr.tokens):
if (token == '911'):
index = i
break
else:
break
amr.replace_span([index], ['09', '11'], ['CD', 'CD'], ['DATE', 'DATE']) |
def halve_range_str(range_str):
ranges = range_str.split(',')
halved_ranges = []
for r in ranges:
c = [str(max(1, (int(x) // 2))) for x in r.split(':')]
halved_ranges.append(':'.join(c))
return ','.join(halved_ranges) |
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
for (regex, diagnosis) in diagnoses:
if re.search(regex, msg):
diagnosis = ('%(file)s:%(line)s:' + diagnosis)
for m in _FindAllMatches(regex, msg):
(yield (short_name, long_name, (diagnosis % m.groupdict()))) |
def print_model_parameters(model, only_num=True):
print('Model Parameter')
if (not only_num):
for (name, param) in model.named_parameters():
print(name, param.shape, param.requires_grad)
total_num = sum([param.nelement() for param in model.parameters()])
print('Total params num: {}'.format(total_num))
print('Finish Parameter') |
def partial_load(pretrained_dict, model, skip_keys=[], log=False):
model_dict = model.state_dict()
filtered_dict = {k: v for (k, v) in pretrained_dict.items() if ((k in model_dict) and (not any([(sk in k) for sk in skip_keys])))}
skipped_keys = [k for k in pretrained_dict if (k not in filtered_dict)]
unload_keys = [k for k in model_dict if (k not in pretrained_dict)]
model_dict.update(filtered_dict)
model.load_state_dict(model_dict)
if log:
print('\nSkipped keys: ', skipped_keys)
print('\nLoading keys: ', filtered_dict.keys())
print('\nUnLoaded keys: ', unload_keys) |
class LSUNDataset(Dataset):
def __init__(self, data, transform, size=(32, 32)):
self.data = data
self.size = size
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
(image, label) = self.data[idx]
return (TF.resize(image, self.size), label) |
class ctx_eval(object):
def __init__(self, module):
self.prev_training_state = get_module_training_state(module)
self.module = module
set_module_training_off(module)
def __enter__(self):
pass
def __exit__(self, *args):
set_module_training_state(self.module, self.prev_training_state)
return False |
_builder('audio_caption')
class AudioCapBuilder(BaseDatasetBuilder):
train_dataset_cls = AudioCaptionEvalDataset
eval_dataset_cls = AudioCaptionEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/clotho/defaults_cap.yaml'}
def build(self):
self.build_processors()
build_info = self.config.build_info
ann_info = build_info.annotations
aud_info = build_info.get('audios')
datasets = dict()
for split in ann_info.keys():
if (split not in ['train', 'val', 'test']):
continue
is_train = (split == 'train')
text_processor = (self.text_processors['train'] if is_train else self.text_processors['eval'])
aud_processor = (self.aud_processors['train'] if is_train else self.aud_processors['eval'])
ann_paths = ann_info.get(split).storage
if isinstance(ann_paths, str):
ann_paths = [ann_paths]
abs_ann_paths = []
for ann_path in ann_paths:
if (not os.path.isabs(ann_path)):
ann_path = utils.get_cache_path(ann_path)
abs_ann_paths.append(ann_path)
ann_paths = abs_ann_paths
aud_path = aud_info.storage
if (not os.path.isabs(aud_path)):
aud_path = utils.get_cache_path(aud_path)
if (not os.path.exists(aud_path)):
warnings.warn('storage path {} does not exist.'.format(aud_path))
dataset_cls = (self.train_dataset_cls if is_train else self.eval_dataset_cls)
datasets[split] = dataset_cls(text_processor=text_processor, aud_processor=aud_processor, ann_paths=ann_paths, aud_root=aud_path)
return datasets |
class SetMinus(AbstractDistribution):
def __init__(self, base, hold_out):
self.base = base
self.hold_out = hold_out
self._keys = base.keys
if (not hold_out.keys.issubset(self._keys)):
raise ValueError('Keys {} of hold_out is not a subset of keys {} of SetMinus base distribution.'.format(hold_out.keys, base.keys))
def sample(self, rng=None):
rng = self._get_rng(rng)
tries = 0
while (tries < _MAX_TRIES):
tries += 1
sample = self.base.sample(rng=rng)
if (not self.hold_out.contains(sample)):
return sample
raise ValueError('Maximum number of tried exceeded when trying to sample from {}.'.format(str(self)))
def contains(self, spec):
return (self.base.contains(spec) and (not self.hold_out.contains(spec)))
def to_str(self, indent):
s = ((((((indent * ' ') + '<SetMinus:\n') + ((indent + 1) * ' ')) + 'base=\n{},\n') + ((indent + 1) * ' ')) + 'hold_out=\n{}>').format(self.base.to_str((indent + 2)), self.hold_out.to_str((indent + 2)))
return s
def keys(self):
return self._keys |
def load_cifar100(data_dir, use_augmentation=False):
test_transform = transforms.Compose([transforms.ToTensor()])
if use_augmentation:
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(0.5), transforms.RandomRotation(15), transforms.ToTensor()])
else:
train_transform = test_transform
train_dataset = torchvision.datasets.CIFAR100(root=data_dir, train=True, download=True, transform=train_transform)
test_dataset = torchvision.datasets.CIFAR100(root=data_dir, train=False, download=True, transform=test_transform)
return (train_dataset, test_dataset) |
def build_errors(options):
s_emb = tensor.matrix('s_emb', dtype='float32')
im_emb = tensor.matrix('im_emb', dtype='float32')
errs = None
if (options['method'] == 'order'):
indices = tensor.arange(s_emb.shape[0])
(errs, _) = theano.map((lambda i, s, im: order_violations(s[i], im, options).sum(axis=1).flatten()), sequences=[indices], non_sequences=[s_emb, im_emb])
else:
errs = (- tensor.dot(s_emb, im_emb.T))
return ([s_emb, im_emb], errs) |
class TransformerLanguageModelConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(default='relu', metadata={'help': 'activation function to use'})
dropout: float = field(default=0.1, metadata={'help': 'dropout probability'})
attention_dropout: float = field(default=0.0, metadata={'help': 'dropout probability for attention weights'})
activation_dropout: float = field(default=0.0, metadata={'help': 'dropout probability after activation in FFN.'})
relu_dropout: float = field(default=0.0, metadata={'help': 'dropout probability after activation in FFN.'})
decoder_embed_dim: int = field(default=512, metadata={'help': 'decoder embedding dimension'})
decoder_output_dim: int = field(default=512, metadata={'help': 'decoder output dimension'})
decoder_input_dim: int = field(default=512, metadata={'help': 'decoder input dimension'})
decoder_ffn_embed_dim: int = field(default=2048, metadata={'help': 'decoder embedding dimension for FFN'})
decoder_layers: int = field(default=6, metadata={'help': 'num decoder layers'})
decoder_attention_heads: int = field(default=8, metadata={'help': 'num decoder attention heads'})
decoder_normalize_before: bool = field(default=False, metadata={'help': 'apply layernorm before each decoder block'})
no_decoder_final_norm: bool = field(default=False, metadata={'help': "don't add an extra layernorm after the last decoder block"})
adaptive_softmax_cutoff: Optional[str] = field(default=None, metadata={'help': 'comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'})
adaptive_softmax_dropout: float = field(default=0, metadata={'help': 'sets adaptive softmax dropout for the tail projections'})
adaptive_softmax_factor: float = field(default=4, metadata={'help': 'adaptive input factor'})
no_token_positional_embeddings: bool = field(default=False, metadata={'help': 'if set, disables positional embeddings (outside self attention)'})
share_decoder_input_output_embed: bool = field(default=False, metadata={'help': 'share decoder input and output embeddings'})
character_embeddings: bool = field(default=False, metadata={'help': 'if set, uses character embedding convolutions to produce token embeddings'})
character_filters: str = field(default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', metadata={'help': 'size of character embeddings'})
character_embedding_dim: int = field(default=4, metadata={'help': 'size of character embeddings'})
char_embedder_highway_layers: int = field(default=2, metadata={'help': 'number of highway layers for character token embeddder'})
adaptive_input: bool = field(default=False, metadata={'help': 'if set, uses adaptive input'})
adaptive_input_factor: float = field(default=4, metadata={'help': 'adaptive input factor'})
adaptive_input_cutoff: Optional[str] = field(default=None, metadata={'help': 'comma separated list of adaptive input cutoff points.'})
tie_adaptive_weights: bool = field(default=False, metadata={'help': 'if set, ties the weights of adaptive softmax and adaptive input'})
tie_adaptive_proj: bool = field(default=False, metadata={'help': 'if set, ties the projection weights of adaptive softmax and adaptive input'})
decoder_learned_pos: bool = field(default=False, metadata={'help': 'use learned positional embeddings in the decoder'})
layernorm_embedding: bool = field(default=False, metadata={'help': 'add layernorm to embedding'})
no_scale_embedding: bool = field(default=False, metadata={'help': 'if True, dont scale embeddings'})
checkpoint_activations: bool = field(default=False, metadata={'help': 'checkpoint activations at each layer'})
offload_activations: bool = field(default=False, metadata={'help': 'move checkpointed activations to CPU after they are used.'})
decoder_layerdrop: float = field(default=0.0, metadata={'help': 'LayerDrop probability for decoder'})
decoder_layers_to_keep: Optional[str] = field(default=None, metadata={'help': 'which layers to *keep* when pruning as a comma-separated list'})
quant_noise_pq: float = field(default=0.0, metadata={'help': 'iterative PQ quantization noise at training time'})
quant_noise_pq_block_size: int = field(default=8, metadata={'help': 'block size of quantization noise at training time'})
quant_noise_scalar: float = field(default=0.0, metadata={'help': 'scalar quantization noise and scalar quantization at training time'})
min_params_to_wrap: int = field(default=DEFAULT_MIN_PARAMS_TO_WRAP, metadata={'help': 'minimum number of params for a layer to be wrapped with FSDP() when training with --ddp-backend=fully_sharded. Smaller values will improve memory efficiency, but may make torch.distributed communication less efficient due to smaller input sizes. This option is set to 0 (i.e., always wrap) when --checkpoint-activations or --offload-activations are passed.'})
base_layers: Optional[int] = field(default=0, metadata={'help': 'number of BASE layers in total'})
base_sublayers: Optional[int] = field(default=1, metadata={'help': 'number of sublayers in each BASE layer'})
base_shuffle: Optional[int] = field(default=1, metadata={'help': 'shuffle tokens between workers before computing assignment'})
add_bos_token: bool = II('task.add_bos_token')
tokens_per_sample: int = II('task.tokens_per_sample')
max_target_positions: Optional[int] = II('task.max_target_positions')
tpu: bool = II('common.tpu') |
def compute_overall_iou(pred, target, num_classes):
shape_ious = []
pred = pred.max(dim=2)[1]
pred_np = pred.cpu().data.numpy()
target_np = target.cpu().data.numpy()
for shape_idx in range(pred.size(0)):
part_ious = []
for part in range(num_classes):
I = np.sum(np.logical_and((pred_np[shape_idx] == part), (target_np[shape_idx] == part)))
U = np.sum(np.logical_or((pred_np[shape_idx] == part), (target_np[shape_idx] == part)))
F = np.sum((target_np[shape_idx] == part))
if (F != 0):
iou = (I / float(U))
part_ious.append(iou)
shape_ious.append(np.mean(part_ious))
return shape_ious |
class Generator(LearningModule):
def __init__(self, args, dist, nc, z=None, source=None, mode='train', bnkwargs={}, gen_transform=None):
N = self.net = Net(source=source, name='Generator')
self.set_mode(mode)
h_and_weights = dist.embed_data()
bn_use_ave = (mode == 'test')
(self.data, _) = get_deconvnet(image_size=args.crop_resize, name=args.gen_net)(h_and_weights, N=N, nout=nc, size=args.gen_net_size, num_fc=args.net_fc, fc_dims=args.net_fc_dims, nonlin=args.deconv_nonlin, bn_use_ave=bn_use_ave, ksize=args.deconv_ksize, **bnkwargs)
if (gen_transform is not None):
self.data = Output(gen_transform(self.data.value), shape=self.data.shape) |
def get_line_style(line):
style = {}
style['alpha'] = line.get_alpha()
if (style['alpha'] is None):
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style |
class InferenceBase(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
if (not hidden_dim):
self.layer = nn.Sequential(nn.Linear(input_dim, output_dim))
else:
self.layer = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU(), nn.Linear(hidden_dim, output_dim))
def forward(self, inputs):
input_size = inputs.size()[:(- 1)]
input_channel = inputs.size((- 1))
f = inputs.view((- 1), input_channel)
f = self.layer(f)
f = f.view(*input_size, (- 1))
return f
def get_output_dim(self, input_dim):
return self.output_dim |
def evaluate_svm(train_features, train_labels, test_features, test_labels):
clf = LinearSVC()
clf.fit(train_features, train_labels)
pred = clf.predict(test_features)
return ((np.sum((test_labels == pred)) * 1.0) / pred.shape[0]) |
class ResFCNetBase(ResNetBase):
OUT_PIXEL_DIST = 1
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(ResFCNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
net_metadata = self.net_metadata
def space_n_time_m(n, m):
return (n if (D == 3) else [n, n, n, m])
if (D == 4):
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.PLANES[0]
self.conv1 = conv(in_channels, self.inplanes, pixel_dist=1, kernel_size=space_n_time_m(5, 1), stride=1, dilation=1, bias=False, D=D, net_metadata=net_metadata)
self.bn1 = nn.BatchNorm1d(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0], pixel_dist=1)
self.conv2p1s2 = conv(self.inplanes, self.inplanes, pixel_dist=1, kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS, bias=False, D=D, net_metadata=net_metadata)
self.bn2 = nn.BatchNorm1d(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1], pixel_dist=space_n_time_m(2, 1))
self.convtr2p2s2 = conv_tr(self.inplanes, self.PLANES[1], pixel_dist=space_n_time_m(2, 1), kernel_size=space_n_time_m(2, 1), upsample_stride=space_n_time_m(2, 1), dilation=1, conv_type=ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS, bias=False, D=D, net_metadata=net_metadata)
self.bntr2 = nn.BatchNorm1d(self.PLANES[1])
self.conv3p2s2 = conv(self.inplanes, self.inplanes, pixel_dist=space_n_time_m(2, 1), kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS, bias=False, D=D, net_metadata=net_metadata)
self.bn3 = nn.BatchNorm1d(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2], pixel_dist=space_n_time_m(4, 1))
self.convtr3p4s4 = conv_tr(self.inplanes, self.PLANES[2], pixel_dist=space_n_time_m(4, 1), kernel_size=space_n_time_m(4, 1), upsample_stride=space_n_time_m(4, 1), dilation=1, conv_type=ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS, bias=False, D=D, net_metadata=net_metadata)
self.bntr3 = nn.BatchNorm1d(self.PLANES[2])
self.conv4p4s2 = conv(self.inplanes, self.inplanes, pixel_dist=space_n_time_m(4, 1), kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), dilation=1, conv_type=ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS, bias=False, D=D, net_metadata=net_metadata)
self.bn4 = nn.BatchNorm1d(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3], pixel_dist=space_n_time_m(8, 1))
self.convtr4p8s8 = conv_tr(self.inplanes, self.PLANES[3], pixel_dist=space_n_time_m(8, 1), kernel_size=space_n_time_m(8, 1), upsample_stride=space_n_time_m(8, 1), dilation=1, conv_type=ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS, bias=False, D=D, net_metadata=net_metadata)
self.bntr4 = nn.BatchNorm1d(self.PLANES[3])
self.relu = nn.ReLU(inplace=True)
self.final = conv((sum(self.PLANES[1:4]) + (self.PLANES[0] * self.BLOCK.expansion)), out_channels, pixel_dist=1, kernel_size=1, stride=1, dilation=1, bias=True, D=D, net_metadata=net_metadata)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out_b1 = self.block1(out)
out = self.conv2p1s2(out_b1)
out = self.bn2(out)
out = self.relu(out)
out_b2 = self.block2(out)
out = self.convtr2p2s2(out_b2)
out = self.bntr2(out)
out_b2p1 = self.relu(out)
out = self.conv3p2s2(out_b2)
out = self.bn3(out)
out = self.relu(out)
out_b3 = self.block3(out)
out = self.convtr3p4s4(out_b3)
out = self.bntr3(out)
out_b3p1 = self.relu(out)
out = self.conv4p4s2(out_b3)
out = self.bn4(out)
out = self.relu(out)
out_b4 = self.block4(out)
out = self.convtr4p8s8(out_b4)
out = self.bntr4(out)
out_b4p1 = self.relu(out)
if self.USE_VALID_CONV:
out_b1 = self.unpool1(out_b1)
out = torch.cat((out_b4p1, out_b3p1, out_b2p1, out_b1), dim=1)
return self.final(out) |
def extract_archive(file_path, path='.', archive_format='auto'):
if (archive_format is None):
return False
if (archive_format == 'auto'):
archive_format = ['tar', 'zip']
if isinstance(archive_format, str):
archive_format = [archive_format]
for archive_type in archive_format:
if (archive_type == 'tar'):
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if (archive_type == 'zip'):
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False |
def postprocess1(x):
(file_path, util_dir) = x
print(file_path, args.util_dir)
node_utils = NU.from_json(util_dir, 0)
nr = NodeRestore(node_utils)
with open((file_path + '.frame'), 'w', encoding='utf-8') as f:
for amr in nr.restore_file(file_path):
f.write((str(amr) + '\n\n'))
expander = Expander(util_dir=util_dir)
with open((file_path + '.post'), 'w', encoding='utf-8') as f:
for amr in expander.expand_file((file_path + '.frame')):
f.write((str(amr) + '\n\n'))
os.remove((file_path + '.frame')) |
class SupportVectorComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = 0.
res['default_boston_places'] = 2
res['default_boston_iterative'] = None
res['default_boston_sparse'] = 0.
res['default_boston_sparse_places'] = 2
res['default_boston_iterative_sparse'] = None
res['default_diabetes'] = 0.
res['default_diabetes_iterative'] = None
res['default_diabetes_sparse'] = 0.
res['default_diabetes_iterative_sparse'] = None
sk_mod = sklearn.svm.LinearSVR
module = LibLinear_SVR |
def make_encoder(encoder_type, obs_shape, feature_dim, num_layers, num_filters, output_logits=False):
assert (encoder_type in _AVAILABLE_ENCODERS)
return _AVAILABLE_ENCODERS[encoder_type](obs_shape, feature_dim, num_layers, num_filters, output_logits) |
class Adam(torch.optim.Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p.data.add_(((- group['weight_decay']) * group['lr']), p.data)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss |
def shuffle_iterator(iterator: typing.Iterator, queue_size: int) -> typing.Iterable[typing.Any]:
buffer = []
try:
for _ in range(queue_size):
buffer.append(next(iterator))
except StopIteration:
warnings.warn(f'Number of elements in the iterator is less than the queue size (N={queue_size}).')
while buffer:
index = np.random.randint(len(buffer))
try:
item = buffer[index]
buffer[index] = next(iterator)
(yield item)
except StopIteration:
(yield buffer.pop(index)) |
class Continuous(AbstractDistribution):
def __init__(self, key, minval, maxval, dtype='float32'):
self.key = key
self.minval = minval
self.maxval = maxval
self.dtype = dtype
def sample(self, rng=None):
rng = self._get_rng(rng)
out = rng.uniform(low=self.minval, high=self.maxval)
out = np.cast[self.dtype](out)
return {self.key: out}
def contains(self, spec):
if (self.key not in spec):
raise KeyError('key {} is not in spec {}, but must be to evaluate containment.'.format(self.key, spec))
else:
return ((spec[self.key] >= self.minval) and (spec[self.key] < self.maxval))
def to_str(self, indent):
s = '<Continuous: key={}, mival={}, maxval={}, dtype={}>'.format(self.key, self.minval, self.maxval, self.dtype)
return ((indent * ' ') + s)
def keys(self):
return set([self.key]) |
def ALDA_loss(ad_out_score, labels_source, softmax_out, weight_type=1, threshold=0.9):
ad_out = torch.sigmoid(ad_out_score)
batch_size = (ad_out.size(0) // 2)
class_num = ad_out.size(1)
labels_source_mask = torch.zeros(batch_size, class_num).to(ad_out.device).scatter_(1, labels_source.unsqueeze(1), 1)
probs_source = softmax_out[:batch_size].detach()
probs_target = softmax_out[batch_size:].detach()
(maxpred, argpred) = torch.max(probs_source, dim=1)
preds_source_mask = torch.zeros(batch_size, class_num).to(ad_out.device).scatter_(1, argpred.unsqueeze(1), 1)
(maxpred, argpred) = torch.max(probs_target, dim=1)
preds_target_mask = torch.zeros(batch_size, class_num).to(ad_out.device).scatter_(1, argpred.unsqueeze(1), 1)
target_mask = (maxpred > threshold)
preds_target_mask = torch.where(target_mask.unsqueeze(1), preds_target_mask, torch.zeros(1).to(ad_out.device))
confusion_matrix = create_matrix(class_num)
ant_eye = (1 - torch.eye(class_num)).cuda().unsqueeze(0)
confusion_matrix = ((ant_eye / (class_num - 1)) + torch.mul(confusion_matrix.unsqueeze(0), ad_out.unsqueeze(1)))
preds_mask = torch.cat([preds_source_mask, preds_target_mask], dim=0)
loss_pred = torch.mul(confusion_matrix, preds_mask.unsqueeze(1)).sum(dim=2)
loss_target = ((1 - preds_target_mask) / (class_num - 1))
loss_target = torch.cat([labels_source_mask, loss_target], dim=0)
if (not ((loss_pred >= 0).all() and (loss_pred <= 1).all())):
raise AssertionError
mask = torch.cat([(maxpred >= 0), target_mask], dim=0)
adv_loss = nn.BCELoss(reduction='none')(loss_pred, loss_target)[mask]
adv_loss = (torch.sum(adv_loss) / mask.float().sum())
reg_loss = nn.CrossEntropyLoss()(ad_out_score[:batch_size], labels_source)
target_probs = (1.0 * softmax_out[batch_size:])
correct_target = torch.mul(confusion_matrix.detach()[batch_size:], preds_target_mask.unsqueeze(1)).sum(dim=2)
correct_loss = (- torch.mul(target_probs, correct_target))
correct_loss = torch.mean(correct_loss[target_mask])
return (adv_loss, reg_loss, correct_loss) |
def get_from_cache(url: str, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent: Union[(Dict, str, None)]=None, local_files_only=False) -> Optional[str]:
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
url_to_download = url
etag = None
if (not local_files_only):
try:
headers = {'user-agent':
r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=etag_timeout)
r.raise_for_status()
etag = (r.headers.get('X-Linked-Etag') or r.headers.get('ETag'))
if (etag is None):
raise OSError("Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility.")
if (300 <= r.status_code <= 399):
url_to_download = r.headers['Location']
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):
pass
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if (etag is None):
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [file for file in fnmatch.filter(os.listdir(cache_dir), (filename.split('.')[0] + '.*')) if ((not file.endswith('.json')) and (not file.endswith('.lock')))]
if (len(matching_files) > 0):
return os.path.join(cache_dir, matching_files[(- 1)])
elif local_files_only:
raise ValueError("Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable model look-ups and downloads online, set 'local_files_only' to False.")
else:
raise ValueError('Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on.')
if (os.path.exists(cache_path) and (not force_download)):
return cache_path
lock_path = (cache_path + '.lock')
with FileLock(lock_path):
if (os.path.exists(cache_path) and (not force_download)):
return cache_path
if resume_download:
incomplete_path = (cache_path + '.incomplete')
def _resumable_file_manager() -> 'io.BufferedWriter':
with open(incomplete_path, 'ab') as f:
(yield f)
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, mode='wb', dir=cache_dir, delete=False)
resume_size = 0
with temp_file_manager() as temp_file:
logger.info('%s not found in cache or force_download set to True, downloading to %s', url, temp_file.name)
temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info('storing %s in cache at %s', url, cache_path)
os.replace(temp_file.name, cache_path)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
return cache_path |
class ExperimentPlanner3D_v21_16GB(ExperimentPlanner3D_v21):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21_16GB, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = 'nnUNetData_plans_v2.1_16GB'
self.plans_fname = join(self.preprocessed_output_folder, 'nnUNetPlansv2.1_16GB_plans_3D.pkl')
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases, num_modalities, num_classes):
new_median_shape = np.round(((original_spacing / current_spacing) * original_shape)).astype(int)
dataset_num_voxels = (np.prod(new_median_shape) * num_cases)
input_patch_size = (1 / np.array(current_spacing))
input_patch_size /= input_patch_size.mean()
input_patch_size *= ((1 / min(input_patch_size)) * 512)
input_patch_size = np.round(input_patch_size).astype(int)
input_patch_size = [min(i, j) for (i, j) in zip(input_patch_size, new_median_shape)]
(network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing, input_patch_size, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
ref = ((Generic_UNet.use_this_for_batch_size_computation_3D * 16) / 8.5)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while (here > ref):
axis_to_be_reduced = np.argsort((new_shp / new_median_shape))[(- 1)]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
(_, _, _, _, shape_must_be_divisible_by_new) = get_pool_and_conv_props(current_spacing, tmp, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
(network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, shape_must_be_divisible_by) = get_pool_and_conv_props(current_spacing, new_shp, self.unet_featuremap_min_edge_length, self.unet_max_numpool)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, self.unet_base_num_features, self.unet_max_num_filters, num_modalities, num_classes, pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D
batch_size = int(np.floor((max((ref / here), 1) * batch_size)))
max_batch_size = np.round(((self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels) / np.prod(input_patch_size, dtype=np.int64))).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = max(1, min(batch_size, max_batch_size))
do_dummy_2D_data_aug = ((max(input_patch_size) / input_patch_size[0]) > self.anisotropy_threshold)
plan = {'batch_size': batch_size, 'num_pool_per_axis': network_num_pool_per_axis, 'patch_size': input_patch_size, 'median_patient_size_in_voxels': new_median_shape, 'current_spacing': current_spacing, 'original_spacing': original_spacing, 'do_dummy_2D_data_aug': do_dummy_2D_data_aug, 'pool_op_kernel_sizes': pool_op_kernel_sizes, 'conv_kernel_sizes': conv_kernel_sizes}
return plan |
class DukeMTMCreID(ImageDataset):
dataset_dir = ''
dataset_url = '
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
self.download_dataset(self.dataset_dir, self.dataset_url)
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
required_files = [self.dataset_dir, self.train_dir, self.query_dir, self.gallery_dir]
self.check_before_run(required_files)
train = self.process_dir(self.train_dir, relabel=True)
query = self.process_dir(self.query_dir, relabel=False)
gallery = self.process_dir(self.gallery_dir, relabel=False)
super(DukeMTMCreID, self).__init__(train, query, gallery, **kwargs)
def process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(sorted(pid_container))}
data = []
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
assert (1 <= camid <= 8)
camid -= 1
if relabel:
pid = pid2label[pid]
data.append((img_path, pid, camid))
return data |
class CAdd(Layer):
def __init__(self, size, bRegularizer=None, bigdl_type='float'):
super(CAdd, self).__init__(None, bigdl_type, size, bRegularizer)
def set_init_method(self, weight_init_method=None, bias_init_method=None):
callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method)
return self |
def precision(input, target):
axes = tuple(range(1, input.dim()))
binary_input = (input > 0.5).float()
true_positives = (binary_input * target).sum(dim=axes)
all_positive_calls = binary_input.sum(dim=axes)
precision = (true_positives / all_positive_calls)
return precision.mean() |
class DownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase):
block_class = DownBlock2D
block_type = 'down'
def test_output(self):
expected_slice = [(- 0.0232), (- 0.9869), 0.8054, (- 0.0637), (- 0.1688), (- 1.4264), 0.447, (- 1.3394), 0.0904]
super().test_output(expected_slice) |
def get_Xy(data, D=12):
X_l = []
y_l = []
N = len(data)
assert (N > D), 'N should be larger than D, where N is len(data)'
for ii in range(((N - D) - 1)):
X_l.append(data[ii:(ii + D)])
y_l.append(data[(ii + D)])
X = np.array(X_l)
X = X.reshape(X.shape[0], X.shape[1], 1)
y = np.array(y_l)
print(X.shape, y.shape)
return (X, y) |
def efficientnet_b6b(in_size=(528, 528), **kwargs):
return get_efficientnet(version='b6', in_size=in_size, tf_mode=True, bn_eps=0.001, model_name='efficientnet_b6b', **kwargs) |
def choose_requirement(primary, secondary):
try:
name = re.split('[!<>=]', primary)[0]
get_distribution(name)
except DistributionNotFound:
return secondary
return str(primary) |
def __main__():
parser = argparse.ArgumentParser(description='BioTorch')
parser.add_argument('--config_file', help='Path to the configuration file')
try:
args = parser.parse_args()
benchmark = Benchmark(args.config_file)
if (benchmark.benchmark_mode == 'training'):
benchmark.run()
else:
benchmark.run_eval()
except Exception as e:
message = 'an unexpected error occurred: {}: {}'.format(type(e).__name__, ((e.message if hasattr(e, 'message') else '') or str(e)))
raise ValueError(message) |
def resize(input, size=None, scale_factor=None, mode='nearest', align_corners=None, warning=True):
if warning:
if ((size is not None) and align_corners):
(input_h, input_w) = tuple((int(x) for x in input.shape[2:]))
(output_h, output_w) = tuple((int(x) for x in size))
if ((output_h > input_h) or (output_w > output_h)):
if (((output_h > 1) and (output_w > 1) and (input_h > 1) and (input_w > 1)) and ((output_h - 1) % (input_h - 1)) and ((output_w - 1) % (input_w - 1))):
warnings.warn(f'When align_corners={align_corners}, the output would more aligned if input size {(input_h, input_w)} is `x+1` and out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple((int(x) for x in size))
return F.interpolate(input, size, scale_factor, mode, align_corners) |
def train(args, train_dataset, model, tokenizer, criterion):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn, num_workers=args.num_workers)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
(best_f1, n_no_improve) = (0, 0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
labels = batch[5]
inputs = {'input_ids': batch[0], 'input_modal': batch[2], 'attention_mask': batch[1], 'modal_start_tokens': batch[3], 'modal_end_tokens': batch[4]}
outputs = model(**inputs)
logits = outputs[0]
loss = criterion(logits, labels)
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
logs = {}
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer, criterion)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = ((tr_loss - logging_loss) / args.logging_steps)
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for (key, value) in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME))
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank == (- 1)):
results = evaluate(args, model, tokenizer, criterion)
if (results['micro_f1'] > best_f1):
best_f1 = results['micro_f1']
n_no_improve = 0
else:
n_no_improve += 1
if (n_no_improve > args.patience):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
def diaresnet50b(**kwargs):
return get_diaresnet(blocks=50, conv1_stride=False, model_name='diaresnet50b', **kwargs) |
_UTILS.register_module()
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result: AssignResult, pred_instances: InstanceData, gt_instances: InstanceData, *args, **kwargs):
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique()
neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique()
gt_flags = priors.new_zeros(priors.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds=pos_inds, neg_inds=neg_inds, priors=priors, gt_bboxes=gt_bboxes, assign_result=assign_result, gt_flags=gt_flags, avg_factor_with_neg=False)
return sampling_result |
class TestTransforms(unittest.TestCase):
def setUp(self):
setup_logger()
def test_apply_rotated_boxes(self):
np.random.seed(125)
cfg = get_cfg()
is_train = True
augs = detection_utils.build_augmentation(cfg, is_train)
image = np.random.rand(200, 300)
(image, transforms) = T.apply_augmentations(augs, image)
image_shape = image.shape[:2]
assert (image_shape == (800, 1200))
annotation = {'bbox': [179, 97, 62, 40, (- 56)]}
boxes = np.array([annotation['bbox']], dtype=np.float64)
transformed_bbox = transforms.apply_rotated_box(boxes)[0]
expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64)
err_msg = 'transformed_bbox = {}, expected {}'.format(transformed_bbox, expected_bbox)
assert np.allclose(transformed_bbox, expected_bbox), err_msg
def test_resize_and_crop(self):
np.random.seed(125)
min_scale = 0.2
max_scale = 2.0
target_height = 1100
target_width = 1000
resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width)
fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width))
hflip_aug = T.RandomFlip()
augs = [resize_aug, fixed_size_crop_aug, hflip_aug]
original_image = np.random.rand(900, 800)
(image, transforms) = T.apply_augmentations(augs, original_image)
image_shape = image.shape[:2]
self.assertEqual((1100, 1000), image_shape)
boxes = np.array([[91, 46, 144, 111], [523, 251, 614, 295]], dtype=np.float64)
transformed_bboxs = transforms.apply_box(boxes)
expected_bboxs = np.array([[895.42, 33., 933.91125, 80.66], [554.0825, 182., 620.17125, 214.]], dtype=np.float64)
err_msg = 'transformed_bbox = {}, expected {}'.format(transformed_bboxs, expected_bboxs)
self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg)
polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]])
transformed_polygons = transforms.apply_polygons([polygon])
expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]])
self.assertEqual(1, len(transformed_polygons))
err_msg = 'transformed_polygon = {}, expected {}'.format(transformed_polygons[0], expected_polygon)
self.assertTrue(polygon_allclose(transformed_polygons[0], expected_polygon), err_msg)
def test_apply_rotated_boxes_unequal_scaling_factor(self):
np.random.seed(125)
(h, w) = (400, 200)
(newh, neww) = (800, 800)
image = np.random.rand(h, w)
augs = []
augs.append(T.Resize(shape=(newh, neww)))
(image, transforms) = T.apply_augmentations(augs, image)
image_shape = image.shape[:2]
assert (image_shape == (newh, neww))
boxes = np.array([[150, 100, 40, 20, 0], [150, 100, 40, 20, 30], [150, 100, 40, 20, 90], [150, 100, 40, 20, (- 90)]], dtype=np.float64)
transformed_boxes = transforms.apply_rotated_box(boxes)
expected_bboxes = np.array([[600, 200, 160, 40, 0], [600, 200, 144., 52., 49.], [600, 200, 80, 80, 90], [600, 200, 80, 80, (- 90)]], dtype=np.float64)
err_msg = 'transformed_boxes = {}, expected {}'.format(transformed_boxes, expected_bboxes)
assert np.allclose(transformed_boxes, expected_bboxes), err_msg
def test_print_augmentation(self):
t = T.RandomCrop('relative', (100, 100))
self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))")
t0 = T.RandomFlip(prob=0.5)
self.assertEqual(str(t0), 'RandomFlip(prob=0.5)')
t1 = T.RandomFlip()
self.assertEqual(str(t1), 'RandomFlip()')
t = T.AugmentationList([t0, t1])
self.assertEqual(str(t), f'AugmentationList[{t0}, {t1}]')
def test_random_apply_prob_out_of_range_check(self):
test_probabilities = {0.0: True, 0.5: True, 1.0: True, (- 0.01): False, 1.01: False}
for (given_probability, is_valid) in test_probabilities.items():
if (not is_valid):
self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability)
else:
T.RandomApply(T.NoOpTransform(), prob=given_probability)
def test_random_apply_wrapping_aug_probability_occured_evaluation(self):
transform_mock = mock.MagicMock(name='MockTransform', spec=T.Augmentation)
image_mock = mock.MagicMock(name='MockImage')
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, '_rand_range', return_value=0.0001):
transform = random_apply.get_transform(image_mock)
transform_mock.get_transform.assert_called_once_with(image_mock)
self.assertIsNot(transform, transform_mock)
def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self):
transform_mock = mock.MagicMock(name='MockTransform', spec=T.Transform)
image_mock = mock.MagicMock(name='MockImage')
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, '_rand_range', return_value=0.0001):
transform = random_apply.get_transform(image_mock)
self.assertIs(transform, transform_mock)
def test_random_apply_probability_not_occured_evaluation(self):
transform_mock = mock.MagicMock(name='MockTransform', spec=T.Augmentation)
image_mock = mock.MagicMock(name='MockImage')
random_apply = T.RandomApply(transform_mock, prob=0.001)
with mock.patch.object(random_apply, '_rand_range', return_value=0.9):
transform = random_apply.get_transform(image_mock)
transform_mock.get_transform.assert_not_called()
self.assertIsInstance(transform, T.NoOpTransform)
def test_augmentation_input_args(self):
input_shape = (100, 100)
output_shape = (50, 50)
class TG1(T.Augmentation):
def get_transform(self, image, sem_seg):
return T.ResizeTransform(input_shape[0], input_shape[1], output_shape[0], output_shape[1])
class TG2(T.Augmentation):
def get_transform(self, image):
assert (image.shape[:2] == output_shape)
return T.HFlipTransform(output_shape[1])
image = np.random.rand(*input_shape).astype('float32')
sem_seg = (np.random.rand(*input_shape) < 0.5).astype('uint8')
inputs = T.AugInput(image, sem_seg=sem_seg)
tfms = inputs.apply_augmentations([TG1(), TG2()])
self.assertIsInstance(tfms[0], T.ResizeTransform)
self.assertIsInstance(tfms[1], T.HFlipTransform)
self.assertTrue((inputs.image.shape[:2] == output_shape))
self.assertTrue((inputs.sem_seg.shape[:2] == output_shape))
class TG3(T.Augmentation):
def get_transform(self, image, nonexist):
pass
with self.assertRaises(AttributeError):
inputs.apply_augmentations([TG3()])
def test_augmentation_list(self):
input_shape = (100, 100)
image = np.random.rand(*input_shape).astype('float32')
sem_seg = (np.random.rand(*input_shape) < 0.5).astype('uint8')
inputs = T.AugInput(image, sem_seg=sem_seg)
augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)])
_ = T.AugmentationList([augs, T.Resize(30)])(inputs)
def test_color_transforms(self):
rand_img = (np.random.random((100, 100, 3)) * 255)
rand_img = rand_img.astype('uint8')
noop_transform = T.ColorTransform((lambda img: img))
self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img)))
magnitude = np.random.randint(0, 256)
solarize_transform = T.PILColorTransform((lambda img: ImageOps.solarize(img, magnitude)))
expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude)
self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img)))
def test_resize_transform(self):
input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
for (in_shape, out_shape) in zip(input_shapes, output_shapes):
in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1])
out_img = tfm.apply_image(in_img)
self.assertEqual(out_img.shape, out_shape)
def test_resize_shorted_edge_scriptable(self):
def f(image):
(newh, neww) = T.ResizeShortestEdge.get_output_shape(image.shape[(- 2)], image.shape[(- 1)], 80, 133)
return F.interpolate(image.unsqueeze(0), size=(newh, neww))
input = torch.randn(3, 10, 10)
script_f = torch.jit.script(f)
self.assertTrue(torch.allclose(f(input), script_f(input)))
input = torch.randn(3, 8, 100)
self.assertTrue(torch.allclose(f(input), script_f(input)))
def test_extent_transform(self):
input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)]
src_rect = (20, 20, 80, 80)
output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)]
for (in_shape, out_shape) in zip(input_shapes, output_shapes):
in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8)
tfm = T.ExtentTransform(src_rect, out_shape[:2])
out_img = tfm.apply_image(in_img)
self.assertTrue((out_img.shape == out_shape)) |
def _get_pool_dask(n_workers=None, maybe_create=False):
try:
from dask.distributed import get_client
except ImportError:
if (not maybe_create):
return None
else:
raise
try:
client = get_client()
except ValueError:
if (not maybe_create):
return None
from dask.distributed import LocalCluster, Client
import tempfile
import shutil
local_directory = tempfile.mkdtemp()
lc = LocalCluster(n_workers=n_workers, threads_per_worker=1, local_directory=local_directory, memory_limit=0)
client = Client(lc)
warnings.warn('Parallel specified but no existing global dask client found... created one (with {} workers).'.format(get_n_workers(client)))
def delete_local_dask_directory():
shutil.rmtree(local_directory, ignore_errors=True)
if (n_workers is not None):
current_n_workers = get_n_workers(client)
if (n_workers != current_n_workers):
warnings.warn("Found existing client (with {} workers which) doesn't match the requested {}... using it instead.".format(current_n_workers, n_workers))
return client |
class RealmPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
_arg_scope
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None, seed=None):
with variable_scope.variable_scope(scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dropout(rate=(1 - keep_prob), noise_shape=noise_shape, seed=seed, name=sc.name, _scope=sc)
outputs = layer.apply(inputs, training=is_training)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
def get_checkpoint_history_callback(outdir, config, dataset, comet_experiment, horovod_enabled, is_hpo_run=False):
callbacks = []
if ((not horovod_enabled) or (hvd.rank() == 0)):
cp_dir = (Path(outdir) / 'weights')
cp_dir.mkdir(parents=True, exist_ok=True)
cp_callback = ModelOptimizerCheckpoint(filepath=str((cp_dir / 'weights-{epoch:02d}-{val_loss:.6f}.hdf5')), save_weights_only=True, verbose=1, monitor=config['callbacks']['checkpoint']['monitor'], save_best_only=False)
cp_callback.opt_path = str((cp_dir / 'opt-{epoch:02d}-{val_loss:.6f}.pkl'))
if config.get('do_checkpoint_callback', True):
callbacks += [cp_callback]
if (not horovod_enabled):
history_path = (Path(outdir) / 'history')
history_path.mkdir(parents=True, exist_ok=True)
history_path = str(history_path)
cb = CustomCallback(history_path, dataset.tensorflow_dataset.take(config['validation_num_events']), config, plot_freq=config['callbacks']['plot_freq'], horovod_enabled=horovod_enabled, comet_experiment=comet_experiment, is_hpo_run=is_hpo_run)
if config.get('do_validation_callback', True):
callbacks += [cb]
tb = CustomTensorBoard(log_dir=(outdir + '/logs'), histogram_freq=config['callbacks']['tensorboard']['hist_freq'], write_graph=False, write_images=False, update_freq='batch', profile_batch=(config['callbacks']['tensorboard']['profile_batch'] if ('profile_batch' in config['callbacks']['tensorboard'].keys()) else 0), dump_history=config['callbacks']['tensorboard']['dump_history'])
tb.__class__.__name__ = 'TensorBoard'
callbacks += [tb]
return callbacks |
def format_hep(citation_elements):
prefixes = ('astro-ph-', 'hep-th-', 'hep-ph-', 'hep-ex-', 'hep-lat-', 'math-ph-')
for el in citation_elements:
if (el['type'] == 'REPORTNUMBER'):
for p in prefixes:
if el['report_num'].startswith(p):
el['report_num'] = ((el['report_num'][:(len(p) - 1)] + '/') + el['report_num'][len(p):])
return citation_elements |
def translate(input, steps):
x = transform(Image.open(input).convert('RGB')).unsqueeze(0).to(device)
c = E(x)
c_trg = c
for j in range(len(steps)):
step = steps[j]
if (step['type'] == 'latent-guided'):
if (step['seed'] is not None):
torch.manual_seed(step['seed'])
torch.cuda.manual_seed(step['seed'])
z = torch.randn(1, noise_dim).to(device)
s_trg = M(z, step['tag'], step['attribute'])
elif (step['type'] == 'reference-guided'):
reference = transform(Image.open(step['reference']).convert('RGB')).unsqueeze(0).to(device)
s_trg = F(reference, step['tag'])
c_trg = T(c_trg, s_trg, step['tag'])
x_trg = G(c_trg)
output = x_trg.squeeze(0).cpu().permute(1, 2, 0).add(1).mul((1 / 2)).clamp(0, 1).numpy()
return output |
def mobilenetv1_w4a4_imagenet(target_platform=None):
target_platform = resolve_target_platform(target_platform)
driver_mode = get_driver_mode()
model_name = 'mobilenetv1-w4a4'
filename = find_bitfile(model_name, target_platform)
if (target_platform in ['ZCU104']):
runtime_weight_dir = find_runtime_weights(model_name, target_platform)
else:
runtime_weight_dir = ''
fclk_mhz = 185.0
return FINNExampleOverlay(filename, driver_mode, _imagenet_top5inds_io_shape_dict, runtime_weight_dir=runtime_weight_dir, fclk_mhz=fclk_mhz) |
def build_pixel_decoder(cfg, input_shape):
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, 'forward_features', None)
if (not callable(forward_features)):
raise ValueError(f'Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. Please implement forward_features for {name} to only return mask features.')
return model |
def main():
np.random.seed(SEED)
run(data_fn=FLAGS.data_fn, prop_missing=FLAGS.prop_missing, max_num_feature=FLAGS.max_num_feature, feature_selection=FLAGS.feature_selection, data_dir=FLAGS.data_dir, out_dir=FLAGS.out_dir) |
def audio_featurize(wavfile):
hop_length = 512
n_fft = 2048
(y, sr) = librosa.load(wavfile)
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
mfcc_delta = librosa.feature.delta(mfcc)
mfcc_features = np.array([np.mean(mfcc[0]), np.std(mfcc[0]), np.amin(mfcc[0]), np.amax(mfcc[0]), np.mean(mfcc[1]), np.std(mfcc[1]), np.amin(mfcc[1]), np.amax(mfcc[1]), np.mean(mfcc[2]), np.std(mfcc[2]), np.amin(mfcc[2]), np.amax(mfcc[2]), np.mean(mfcc[3]), np.std(mfcc[3]), np.amin(mfcc[3]), np.amax(mfcc[3]), np.mean(mfcc[4]), np.std(mfcc[4]), np.amin(mfcc[4]), np.amax(mfcc[4]), np.mean(mfcc[5]), np.std(mfcc[5]), np.amin(mfcc[5]), np.amax(mfcc[5]), np.mean(mfcc[6]), np.std(mfcc[6]), np.amin(mfcc[6]), np.amax(mfcc[6]), np.mean(mfcc[7]), np.std(mfcc[7]), np.amin(mfcc[7]), np.amax(mfcc[7]), np.mean(mfcc[8]), np.std(mfcc[8]), np.amin(mfcc[8]), np.amax(mfcc[8]), np.mean(mfcc[9]), np.std(mfcc[9]), np.amin(mfcc[9]), np.amax(mfcc[9]), np.mean(mfcc[10]), np.std(mfcc[10]), np.amin(mfcc[10]), np.amax(mfcc[10]), np.mean(mfcc[11]), np.std(mfcc[11]), np.amin(mfcc[11]), np.amax(mfcc[11]), np.mean(mfcc[12]), np.std(mfcc[12]), np.amin(mfcc[12]), np.amax(mfcc[12]), np.mean(mfcc_delta[0]), np.std(mfcc_delta[0]), np.amin(mfcc_delta[0]), np.amax(mfcc_delta[0]), np.mean(mfcc_delta[1]), np.std(mfcc_delta[1]), np.amin(mfcc_delta[1]), np.amax(mfcc_delta[1]), np.mean(mfcc_delta[2]), np.std(mfcc_delta[2]), np.amin(mfcc_delta[2]), np.amax(mfcc_delta[2]), np.mean(mfcc_delta[3]), np.std(mfcc_delta[3]), np.amin(mfcc_delta[3]), np.amax(mfcc_delta[3]), np.mean(mfcc_delta[4]), np.std(mfcc_delta[4]), np.amin(mfcc_delta[4]), np.amax(mfcc_delta[4]), np.mean(mfcc_delta[5]), np.std(mfcc_delta[5]), np.amin(mfcc_delta[5]), np.amax(mfcc_delta[5]), np.mean(mfcc_delta[6]), np.std(mfcc_delta[6]), np.amin(mfcc_delta[6]), np.amax(mfcc_delta[6]), np.mean(mfcc_delta[7]), np.std(mfcc_delta[7]), np.amin(mfcc_delta[7]), np.amax(mfcc_delta[7]), np.mean(mfcc_delta[8]), np.std(mfcc_delta[8]), np.amin(mfcc_delta[8]), np.amax(mfcc_delta[8]), np.mean(mfcc_delta[9]), np.std(mfcc_delta[9]), np.amin(mfcc_delta[9]), np.amax(mfcc_delta[9]), np.mean(mfcc_delta[10]), np.std(mfcc_delta[10]), np.amin(mfcc_delta[10]), np.amax(mfcc_delta[10]), np.mean(mfcc_delta[11]), np.std(mfcc_delta[11]), np.amin(mfcc_delta[11]), np.amax(mfcc_delta[11]), np.mean(mfcc_delta[12]), np.std(mfcc_delta[12]), np.amin(mfcc_delta[12]), np.amax(mfcc_delta[12])])
return mfcc_features |
class MAESTROProber(bench.ProberForBertSeqLabel):
def __init__(self, cfg):
super().__init__(cfg)
def init_metrics(self):
self.all_metrics = set()
for split in ['train', 'valid', 'test']:
setattr(self, f'{split}_prec', torchmetrics.Precision(task='binary', threshold=self.cfg.frame_threshold))
self.all_metrics.add('prec')
setattr(self, f'{split}_recall', torchmetrics.Recall(task='binary', threshold=self.cfg.frame_threshold))
self.all_metrics.add('recall')
setattr(self, f'{split}_f1', torchmetrics.F1Score(task='binary', threshold=self.cfg.frame_threshold))
self.all_metrics.add('f1')
_grad()
def update_metrics(self, split, y, y_pred):
y = y.int()
y_pred = y_pred.detach()
y_pred = torch.sigmoid(y_pred)
y_flat = torch.flatten(y)
y_pred_flat = torch.flatten(y_pred)
getattr(self, f'{split}_prec').update(y_pred_flat, y_flat)
getattr(self, f'{split}_recall').update(y_pred_flat, y_flat)
getattr(self, f'{split}_f1').update(y_pred_flat, y_flat)
_grad()
def log_metrics(self, split):
self.log(f'{split}_prec', getattr(self, f'{split}_prec').compute(), sync_dist=True)
getattr(self, f'{split}_prec').reset()
self.log(f'{split}_recall', getattr(self, f'{split}_recall').compute(), sync_dist=True)
getattr(self, f'{split}_recall').reset()
self.log(f'{split}_f1', getattr(self, f'{split}_f1').compute(), sync_dist=True)
getattr(self, f'{split}_f1').reset() |
class Writer(object):
def __init__(self, out):
self.out = out
self.indent = 0
def writeln(self, s):
self.out.write(('%s%s\n' % ((' ' * self.indent), s))) |
def create_model(use_selfatt=False, use_fc=False, dropout=None, stage1_weights=False, dataset=None, log_dir=None, test=False, *args):
print('Loading Scratch ResNet 50 Feature Model.')
resnet50 = ResNet(Bottleneck, [3, 4, 6, 3], use_modulatedatt=use_selfatt, use_fc=use_fc, dropout=None)
if (not test):
if stage1_weights:
assert dataset
print(('Loading %s Stage 1 ResNet 10 Weights.' % dataset))
if (log_dir is not None):
weight_dir = path.join('/'.join(log_dir.split('/')[:(- 1)]), 'stage1')
else:
weight_dir = ('./logs/%s/stage1' % dataset)
print(('==> Loading weights from %s' % weight_dir))
resnet50 = init_weights(model=resnet50, weights_path=path.join(weight_dir, 'final_model_checkpoint.pth'))
else:
print('No Pretrained Weights For Feature Model.')
return resnet50 |
def plot_partial_trajectory(trajectory, partial_observed_trajectory, mean_trajectory=None):
fig = plt.figure()
plt.plot(partial_observed_trajectory[0], partial_observed_trajectory[1], color='#6ba3ff', label='Observed', linewidth=3.0)
plt.plot(trajectory[0], trajectory[1], '--', color='#ff6a6a', label='Inferred', linewidth=2.0)
if (mean_trajectory is not None):
plt.plot(mean_trajectory[0], mean_trajectory[1], color='#85d87f', label='Mean')
fig.suptitle('Probable trajectory')
plt.legend()
plt.text(0.01, 0.7, ('Observed samples: ' + str(partial_observed_trajectory.shape[1])), transform=fig.axes[0].transAxes)
plt.show() |
class TestSamplingRandomMapIterator(unittest.TestCase, TestCheckpointableIterator):
def setUp(self):
data = list(range(53))
def transform(random: Random, item: int):
return (item + random.random())
seed = 1
random = Random()
random.seed(seed)
self.expected_result = [(n + random.random()) for n in data]
self.iterator = SamplingRandomMapIterator(NativeCheckpointableIterator(data), transform=transform, seed=seed) |
class Contiguous(Layer):
def __init__(self, bigdl_type='float'):
super(Contiguous, self).__init__(None, bigdl_type) |
_grad()
def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path):
model = torch.load(checkpoint_path, map_location='cpu')
sd = model['model']
cfg = BlenderbotConfig.from_json_file(config_json_path)
m = BlenderbotForConditionalGeneration(cfg)
valid_keys = m.model.state_dict().keys()
failures = []
mapping = {}
for (k, v) in sd.items():
if (k in IGNORE_KEYS):
continue
new_k = rename_state_dict_key(k)
if (new_k not in valid_keys):
failures.append([k, new_k])
else:
mapping[new_k] = v
if cfg.normalize_before:
rename_layernorm_keys(sd)
m.model.load_state_dict(mapping, strict=True)
m.half()
m.save_pretrained(pytorch_dump_folder_path) |
def graph_ASWTModelComp2():
filename = 'graph_sources/ASWTModel_comp4.txt'
categories = []
aswt_stop = []
standard_stop = []
patient_stop = []
mind_stop = []
aveges_stop = []
with open(filename, 'r') as fh:
r = 0
for line_raw in fh:
line = line_raw.split(',')
if (r != 0):
categories.append(line[0])
standard_stop.append(line[6])
aswt_stop.append(line[7])
patient_stop.append(line[8])
mind_stop.append(line[9])
aveges_stop.append(line[10])
r += 1
aswt_stop = np.array(aswt_stop).astype(float)
standard_stop = np.array(standard_stop).astype(float)
patient_stop = np.array(patient_stop).astype(float)
mind_stop = np.array(mind_stop).astype(float)
aveges_stop = np.array(aveges_stop).astype(float)
full_series = [standard_stop, aswt_stop, patient_stop, mind_stop, aveges_stop]
series_labels = ['Performance Stopping', 'ASWS Stopping', 'Patience Stopping', 'Minimum Diff Stopping', 'Average Diff Stopping']
graph_stacked_bar(categories, full_series, series_labels, 'graph_images/ASWTStandardComp4.pgf', loc='upper right') |
_module
class SmoothL1Loss(nn.Module):
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_bbox = (self.loss_weight * smooth_l1_loss(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs))
return loss_bbox |
def construct_query_and_database_sets(base_path, runs_folder, folders, pointcloud_fols, filenames, p, output_name):
database_trees = []
test_trees = []
for (folder, filename) in zip(folders, filenames):
print(folder)
df_database = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_test = pd.DataFrame(columns=['file', 'northing', 'easting'])
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
for (index, row) in df_locations.iterrows():
(in_test, in_buffer) = check_in_test_set(row['northing'], row['easting'], p, x_width, y_width)
if in_test:
df_test = df_test._append(row, ignore_index=True)
df_database = df_database._append(row, ignore_index=True)
database_tree = KDTree(df_database[['northing', 'easting']])
test_tree = KDTree(df_test[['northing', 'easting']])
database_trees.append(database_tree)
test_trees.append(test_tree)
test_sets = []
database_sets = []
for (folder, filename) in zip(folders, filenames):
database = {}
test = {}
df_locations = pd.read_csv(os.path.join(base_path, runs_folder, folder, filename), sep=',')
df_locations['timestamp'] = ((((runs_folder + folder) + pointcloud_fols) + df_locations['timestamp'].astype(str)) + '.bin')
df_locations = df_locations.rename(columns={'timestamp': 'file'})
for (index, row) in df_locations.iterrows():
(in_test, in_buffer) = check_in_test_set(row['northing'], row['easting'], p, x_width, y_width)
if in_test:
test[len(test.keys())] = {'query': row['file'], 'northing': row['northing'], 'easting': row['easting']}
database[len(database.keys())] = {'query': row['file'], 'northing': row['northing'], 'easting': row['easting']}
database_sets.append(database)
test_sets.append(test)
for i in range(len(database_sets)):
tree = database_trees[i]
for j in range(len(test_sets)):
if (i == j):
continue
for key in range(len(test_sets[j].keys())):
coor = np.array([[test_sets[j][key]['northing'], test_sets[j][key]['easting']]])
index = tree.query_radius(coor, r=10)
test_sets[j][key][i] = index[0].tolist()
output_to_file(database_sets, (output_name + '_evaluation_database.pickle'))
output_to_file(test_sets, (output_name + '_evaluation_query.pickle')) |
class TransformerEncoderBase(FairseqEncoder):
def __init__(self, cfg, dictionary, embed_tokens, return_fc=False):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.dropout_module = FairseqDropout(cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__))
self.encoder_layerdrop = cfg.encoder.layerdrop
self.return_fc = return_fc
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = (1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim))
self.embed_positions = (PositionalEmbedding(cfg.max_source_positions, embed_dim, self.padding_idx, learned=cfg.encoder.learned_pos) if (not cfg.no_token_positional_embeddings) else None)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
if ((not cfg.adaptive_input) and (cfg.quant_noise.pq > 0)):
self.quant_noise = apply_quant_noise_(nn.Linear(embed_dim, embed_dim, bias=False), cfg.quant_noise.pq, cfg.quant_noise.pq_block_size)
else:
self.quant_noise = None
if (self.encoder_layerdrop > 0.0):
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend([self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)])
self.num_layers = len(self.layers)
if cfg.encoder.normalize_before:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
def build_encoder_layer(self, cfg):
layer = transformer_layer.TransformerEncoderLayerBase(cfg, return_fc=self.return_fc)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
min_params_to_wrap = (cfg.min_params_to_wrap if (not checkpoint) else 0)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(self, src_tokens, token_embedding: Optional[torch.Tensor]=None):
if (token_embedding is None):
token_embedding = self.embed_tokens(src_tokens)
x = embed = (self.embed_scale * token_embedding)
if (self.embed_positions is not None):
x = (embed + self.embed_positions(src_tokens))
if (self.layernorm_embedding is not None):
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if (self.quant_noise is not None):
x = self.quant_noise(x)
return (x, embed)
def forward(self, src_tokens, src_lengths: Optional[torch.Tensor]=None, return_all_hiddens: bool=False, token_embeddings: Optional[torch.Tensor]=None):
return self.forward_scriptable(src_tokens, src_lengths, return_all_hiddens, token_embeddings)
def forward_scriptable(self, src_tokens, src_lengths: Optional[torch.Tensor]=None, return_all_hiddens: bool=False, token_embeddings: Optional[torch.Tensor]=None):
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = ((src_tokens.device.type == 'xla') or encoder_padding_mask.any())
(x, encoder_embedding) = self.forward_embedding(src_tokens, token_embeddings)
if has_pads:
x = (x * (1 - encoder_padding_mask.unsqueeze((- 1)).type_as(x)))
x = x.transpose(0, 1)
encoder_states = []
fc_results = []
if return_all_hiddens:
encoder_states.append(x)
for layer in self.layers:
lr = layer(x, encoder_padding_mask=(encoder_padding_mask if has_pads else None))
if (isinstance(lr, tuple) and (len(lr) == 2)):
(x, fc_result) = lr
else:
x = lr
fc_result = None
if (return_all_hiddens and (not torch.jit.is_scripting())):
assert (encoder_states is not None)
encoder_states.append(x)
fc_results.append(fc_result)
if (self.layer_norm is not None):
x = self.layer_norm(x)
src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape((- 1), 1).contiguous()
return {'encoder_out': [x], 'encoder_padding_mask': [encoder_padding_mask], 'encoder_embedding': [encoder_embedding], 'encoder_states': encoder_states, 'fc_results': fc_results, 'src_tokens': [], 'src_lengths': [src_lengths]}
.export
def reorder_encoder_out(self, encoder_out: Dict[(str, List[Tensor])], new_order):
if (len(encoder_out['encoder_out']) == 0):
new_encoder_out = []
else:
new_encoder_out = [encoder_out['encoder_out'][0].index_select(1, new_order)]
if (len(encoder_out['encoder_padding_mask']) == 0):
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [encoder_out['encoder_padding_mask'][0].index_select(0, new_order)]
if (len(encoder_out['encoder_embedding']) == 0):
new_encoder_embedding = []
else:
new_encoder_embedding = [encoder_out['encoder_embedding'][0].index_select(0, new_order)]
if (len(encoder_out['src_tokens']) == 0):
src_tokens = []
else:
src_tokens = [encoder_out['src_tokens'][0].index_select(0, new_order)]
if (len(encoder_out['src_lengths']) == 0):
src_lengths = []
else:
src_lengths = [encoder_out['src_lengths'][0].index_select(0, new_order)]
encoder_states = encoder_out['encoder_states']
if (len(encoder_states) > 0):
for (idx, state) in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {'encoder_out': new_encoder_out, 'encoder_padding_mask': new_encoder_padding_mask, 'encoder_embedding': new_encoder_embedding, 'encoder_states': encoder_states, 'src_tokens': src_tokens, 'src_lengths': src_lengths}
def max_positions(self):
if (self.embed_positions is None):
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if (weights_key in state_dict):
print('deleting {0}'.format(weights_key))
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(self.num_layers):
self.layers[i].upgrade_state_dict_named(state_dict, '{}.layers.{}'.format(name, i))
version_key = '{}.version'.format(name)
if (utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2):
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.