code stringlengths 101 5.91M |
|---|
def test_train_learning_rate_sinescheduler(train_data_fx, model_fx):
h = model_fx.train(train_data_fx[0], train_data_fx[1], epochs=10, learning_rate={'scheduler': 'sineexponentialdecay', 'initial_learning_rate': 0.001, 'final_learning_rate': 0.0001, 'decay_epochs': 100, 'sine_freq': 2, 'sine_decay_rate': 0.5}) |
_dataset(NAME)
class KITTIMaskedDiosDataset(KITTIMturkersInstanceDataset):
def __init__(self, config, subset, name=NAME):
super().__init__(config, subset, name)
def get_extraction_keys(self):
return self.pascal_masked_dataset.get_extraction_keys()
def postproc_example_before_assembly(self, tensors):
return self.pascal_masked_dataset.postproc_example_before_assembly(tensors)
def use_segmentation_mask(self, res):
self.pascal_masked_dataset.use_segmentation_mask(res)
def postproc_annotation(self, ann_filename, ann):
mask = super().postproc_annotation(ann_filename, ann)
return {DataKeys.SEGMENTATION_LABELS: mask, DataKeys.RAW_SEGMENTATION_LABELS: mask, DataKeys.IMAGE_FILENAMES: ann_filename} |
class FeatureExtractor(object):
def __init__(self, model_name='', model_path='', image_size=(256, 128), pixel_mean=[0.485, 0.456, 0.406], pixel_std=[0.229, 0.224, 0.225], pixel_norm=True, device='cuda', verbose=True):
model = build_model(model_name, num_classes=1, pretrained=True, use_gpu=device.startswith('cuda'))
model.eval()
if verbose:
(num_params, flops) = compute_model_complexity(model, (1, 3, image_size[0], image_size[1]))
print('Model: {}'.format(model_name))
print('- params: {:,}'.format(num_params))
print('- flops: {:,}'.format(flops))
if (model_path and check_isfile(model_path)):
load_pretrained_weights(model, model_path)
transforms = []
transforms += [T.Resize(image_size)]
transforms += [T.ToTensor()]
if pixel_norm:
transforms += [T.Normalize(mean=pixel_mean, std=pixel_std)]
preprocess = T.Compose(transforms)
to_pil = T.ToPILImage()
device = torch.device(device)
model.to(device)
self.model = model
self.preprocess = preprocess
self.to_pil = to_pil
self.device = device
def __call__(self, input):
if isinstance(input, list):
images = []
for element in input:
if isinstance(element, str):
image = Image.open(element).convert('RGB')
elif isinstance(element, np.ndarray):
image = self.to_pil(element)
else:
raise TypeError('Type of each element must belong to [str | numpy.ndarray]')
image = self.preprocess(image)
images.append(image)
images = torch.stack(images, dim=0)
images = images.to(self.device)
elif isinstance(input, str):
image = Image.open(input).convert('RGB')
image = self.preprocess(image)
images = image.unsqueeze(0).to(self.device)
elif isinstance(input, np.ndarray):
image = self.to_pil(input)
image = self.preprocess(image)
images = image.unsqueeze(0).to(self.device)
elif isinstance(input, torch.Tensor):
if (input.dim() == 3):
input = input.unsqueeze(0)
images = input.to(self.device)
else:
raise NotImplementedError
with torch.no_grad():
features = self.model(images)
return features |
def test_validate_parameters_invalid_language():
with pytest.raises(ValueError):
loader.validate_parameters('assin', 'invalid_language', 'full') |
class PackagePickler(_Pickler):
dispatch = _Pickler.dispatch.copy()
def __init__(self, importer: Importer, *args, **kwargs):
self.importer = importer
super().__init__(*args, **kwargs)
def save_global(self, obj, name=None):
write = self.write
memo = self.memo
try:
(module_name, name) = self.importer.get_name(obj, name)
except (ObjNotFoundError, ObjMismatchError) as err:
raise PicklingError(f"Can't pickle {obj}: {str(err)}") from None
module = self.importer.import_module(module_name)
(_, parent) = _getattribute(module, name)
if (self.proto >= 2):
code = _extension_registry.get((module_name, name))
if code:
assert (code > 0)
if (code <= 255):
write((EXT1 + pack('<B', code)))
elif (code <= 65535):
write((EXT2 + pack('<H', code)))
else:
write((EXT4 + pack('<i', code)))
return
lastname = name.rpartition('.')[2]
if (parent is module):
name = lastname
if (self.proto >= 4):
self.save(module_name)
self.save(name)
write(STACK_GLOBAL)
elif (parent is not module):
self.save_reduce(getattr, (parent, lastname))
elif (self.proto >= 3):
write(((((GLOBAL + bytes(module_name, 'utf-8')) + b'\n') + bytes(name, 'utf-8')) + b'\n'))
else:
if self.fix_imports:
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if ((module_name, name) in r_name_mapping):
(module_name, name) = r_name_mapping[(module_name, name)]
elif (module_name in r_import_mapping):
module_name = r_import_mapping[module_name]
try:
write(((((GLOBAL + bytes(module_name, 'ascii')) + b'\n') + bytes(name, 'ascii')) + b'\n'))
except UnicodeEncodeError:
raise PicklingError(("can't pickle global identifier '%s.%s' using pickle protocol %i" % (module, name, self.proto))) from None
self.memoize(obj)
dispatch[FunctionType] = save_global |
def linalg_solve(A: dace.float64[(100, 100)], B: dace.float64[(100, 10)]):
return np.linalg.solve(A, B) |
def test3d_8n_ub():
query_pts = np.array([[787014.438, (- 340616.906), 6313018.0], [751763.125, (- 59925.969), 6326205.5], [769957.188, (- 202418.125), 6321069.5]])
kdtree = KDTree(data_pts_real)
(dist, idx) = kdtree.query(query_pts, k=8, distance_upper_bound=10000.0, sqr_dists=False)
exp_dist = np.array([[0.0, 4052.50235, 4073.89794, 8082.01128, 8170.63009, np.Inf, np.Inf, np.Inf], [1., 2702.16896, 2714.31274, 5395.37066, 5437.9321, 8078.55631, 8171.1997, np.Inf], [141.424892, 3255.00021, 3442.84958, 6580.19346, 6810.38455, 9891.40135, np.Inf, np.Inf]])
n = 100
exp_idx = np.array([[7, 8, 6, 9, 5, n, n, n], [93, 94, 92, 95, 91, 96, 90, n], [45, 46, 44, 47, 43, 48, n, n]])
assert np.array_equal(idx, exp_idx)
assert np.allclose(dist, exp_dist) |
def update_args(doc: str, beg: int, prefix: str=' ') -> str:
prefix += ' '
for i in range(10):
if ((res := re_blank_line.match(doc, beg)) is not None):
(beg, end) = res.span(0)
break
if ((res := re_arg.search(doc, beg)) is None):
return doc
prefix = res.group(1)
beg = (res.end(0) + 1)
if (res.group(2) in ('approximate', 'inplace')):
(beg, end) = res.span(0)
doc = (doc[:beg] + doc[(end + 1):])
doc = ((doc[:beg] + (prefix + BITS_ARG)) + doc[beg:])
return doc |
class Mixed_3c(nn.Module):
def __init__(self):
super(Mixed_3c, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(256, 128, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(256, 128, kernel_size=1, stride=1), SepConv3d(128, 192, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv3d(256, 32, kernel_size=1, stride=1), SepConv3d(32, 96, kernel_size=3, stride=1, padding=1))
self.branch3 = nn.Sequential(nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1), BasicConv3d(256, 64, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
_dispatch
def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None):
return (Dispatchable(x, np.ndarray),) |
_module
class NonLinearNeckSimCLRDense(nn.Module):
def __init__(self, in_channels, hid_channels, out_channels, num_layers=2, sync_bn=True, with_bias=False, with_last_bn=True, with_avg_pool=True, with_attn=False):
super(NonLinearNeckSimCLRDense, self).__init__()
self.sync_bn = sync_bn
self.with_last_bn = with_last_bn
self.with_avg_pool = with_avg_pool
self.with_attn = with_attn
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if (version.parse(torch.__version__) < version.parse('1.4.0')):
self.expand_for_syncbn = True
else:
self.expand_for_syncbn = False
self.relu = nn.ReLU(inplace=True)
self.fc0 = nn.Conv2d(in_channels, ((hid_channels * 3) if with_attn else hid_channels), 1, 1, bias=with_bias)
if sync_bn:
(_, self.bn0) = build_norm_layer(dict(type='SyncBN'), hid_channels)
else:
self.bn0 = nn.BatchNorm1d(hid_channels)
self.fc_names = []
self.bn_names = []
for i in range(1, num_layers):
this_channels = (out_channels if (i == (num_layers - 1)) else hid_channels)
self.add_module('fc{}'.format(i), nn.Conv2d(hid_channels, this_channels, 1, 1, bias=with_bias))
self.fc_names.append('fc{}'.format(i))
if ((i != (num_layers - 1)) or self.with_last_bn):
if sync_bn:
self.add_module('bn{}'.format(i), build_norm_layer(dict(type='SyncBN'), this_channels)[1])
else:
self.add_module('bn{}'.format(i), nn.BatchNorm1d(this_channels))
self.bn_names.append('bn{}'.format(i))
else:
self.bn_names.append(None)
def init_weights(self, init_linear='normal', std=0.01, bias=0.0):
assert (init_linear in ['normal', 'kaiming']), 'Undefined init_linear: {}'.format(init_linear)
for m in self.modules():
if (isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d)):
if (init_linear == 'normal'):
normal_init(m, std=std, bias=bias)
else:
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)):
if (m.weight is not None):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def forward(self, x):
assert (len(x) == 1)
x = x[0]
if self.with_avg_pool:
x = self.avgpool(x)
x = self.fc0(x)
if self.with_attn:
B = x.shape[0]
qkv = x.permute(0, 2, 3, 1).reshape(B, 49, 3, 8, 512).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q k.transpose((- 2), (- 1))) * (512 ** (- 0.5)))
attn = attn.softmax(dim=(- 1))
x = (attn v).transpose(2, 3).reshape(B, 4096, 7, 7)
x = self.bn0(x)
for (fc_name, bn_name) in zip(self.fc_names, self.bn_names):
fc = getattr(self, fc_name)
x = self.relu(x)
x = fc(x)
if (bn_name is not None):
bn = getattr(self, bn_name)
x = bn(x)
return [x] |
def main():
paths = get_default_paths()
ner_input_path = paths['NERBASE']
conll_path = os.path.join(ner_input_path, 'english', 'en_conll03')
ner_output_path = paths['NER_DATA_DIR']
process_dataset('en_conll03', conll_path, ner_output_path) |
def _assert_valid_lists(groundtruth_list, predicted_list):
assert (len(groundtruth_list) == len(predicted_list))
for unique_element in np.unique(groundtruth_list).tolist():
assert (unique_element in [0, 1]) |
class MOTDataReader():
def __init__(self, image_folder, detection_file_name, min_confidence=None):
self.image_folder = image_folder
self.detection_file_name = detection_file_name
self.image_format = os.path.join(self.image_folder, '{0:06d}.jpg')
self.detection = pd.read_csv(self.detection_file_name, header=None)
if (min_confidence is not None):
self.detection = self.detection[(self.detection[6] > min_confidence)]
self.detection_group = self.detection.groupby(0)
self.detection_group_keys = list(self.detection_group.indices.keys())
def __len__(self):
return len(self.detection_group_keys)
def get_detection_by_index(self, index):
if ((index > len(self.detection_group_keys)) or (self.detection_group_keys.count(index) == 0)):
return None
return self.detection_group.get_group(index).values
def get_image_by_index(self, index):
if (index > len(self.detection_group_keys)):
return None
return cv2.imread(self.image_format.format(index))
def __getitem__(self, item):
return (self.get_image_by_index((item + 1)), self.get_detection_by_index((item + 1))) |
def model_creator(data, name, dtypes):
if (name in _model_creator_list):
return _model_creator_list[name](data)
return (data, None) |
class Vocab():
def __init__(self, list_of_tokens: List[str]=None, padding_token: str='<pad>', unknown_token: str='<unk>', bos_token: str='<bos>', eos_token: str='<eos>', reserved_tokens: List[str]=None, token_to_idx: Dict[(str, int)]=None):
self._unknown_token = unknown_token
self._padding_token = padding_token
self._bos_token = bos_token
self._eos_token = eos_token
self._reserved_tokens = reserved_tokens
self._special_tokens = []
for tkn in [self._unknown_token, self._padding_token, self._bos_token, self._eos_token]:
if tkn:
self._special_tokens.append(tkn)
if self._reserved_tokens:
self._special_tokens.extend(self._reserved_tokens)
if list_of_tokens:
self._special_tokens.extend(list(filter((lambda elm: (elm not in self._special_tokens)), list_of_tokens)))
(self._token_to_idx, self._idx_to_token) = self._build(self._special_tokens)
if token_to_idx:
self._sort_index_according_to_user_specification(token_to_idx)
self._embedding = None
def to_indices(self, tokens: Union[(str, List[str])]) -> Union[(int, List[int])]:
if isinstance(tokens, list):
return [(self._token_to_idx[tkn] if (tkn in self._token_to_idx) else self._token_to_idx[self._unknown_token]) for tkn in tokens]
else:
return (self._token_to_idx[tokens] if (tokens in self._token_to_idx) else self._token_to_idx[self._unknown_token])
def to_tokens(self, indices: Union[(int, List[int])]) -> Union[(str, List[str])]:
if isinstance(indices, list):
return [self._idx_to_token[idx] for idx in indices]
else:
return self._idx_to_token[indices]
def _build(self, list_of_tokens):
token_to_idx = {tkn: idx for (idx, tkn) in enumerate(list_of_tokens)}
idx_to_token = list_of_tokens
return (token_to_idx, idx_to_token)
def _sort_index_according_to_user_specification(self, token_to_idx):
if (not set(token_to_idx.keys()).issubset(self._token_to_idx.keys())):
raise ValueError('User-specified token_to_idx mapping can only contain tokens that will be part of the vocabulary.')
if (len(set(token_to_idx.values())) != len(token_to_idx)):
raise ValueError('User-specified indices must not contain duplicates.')
if ((min(token_to_idx.values()) < 0) or (max(token_to_idx.values()) >= len(self._token_to_idx))):
raise ValueError('User-specified indices must not be < 0 or >= the number of tokens that will be in the vocabulary. The current vocab contains {}tokens.'.format(len(self._token_to_idx)))
for (token, new_idx) in token_to_idx.items():
old_idx = self._token_to_idx[token]
ousted_token = self._idx_to_token[new_idx]
self._token_to_idx[token] = new_idx
self._token_to_idx[ousted_token] = old_idx
self._idx_to_token[old_idx] = ousted_token
self._idx_to_token[new_idx] = token
def __len__(self):
return len(self._token_to_idx)
def token_to_idx(self):
return self._token_to_idx
def idx_to_token(self):
return self._idx_to_token
def padding_token(self):
return self._padding_token
def unknown_token(self):
return self._unknown_token
def bos_token(self):
return self._bos_token
def eos_token(self):
return self._eos_token
def embedding(self):
return self._embedding
def embedding(self, array):
self._embedding = array |
def test_ufunc_add_out():
A = np.random.randint(10, size=(10,), dtype=np.int32)
B = np.random.randint(10, size=(10,), dtype=np.int32)
C = np.empty((10,), dtype=np.int32)
ufunc_add_out(A, B, C)
assert np.array_equal((A + B), C) |
def test_model(test_dl, model, scaler):
x_input = []
truth = []
predicted = []
with torch.no_grad():
model.eval()
step = 0
for (x, y, mask) in test_dl:
x = x.to('cuda')
y = y.to('cuda')
output = model(x).float()
x = x.to('cpu')
y = y.to('cpu')
output = output.to('cpu')
x_input.append(scaler.inverse_transform(np.reshape(np.array(x[0].view((- 1)).numpy()), (x.shape[1], 1))))
truth.append(scaler.inverse_transform(np.reshape(np.array(y[0].view((- 1)).numpy()), (y.shape[1], 1))))
predicted.append(scaler.inverse_transform(np.reshape(np.array(output[0].view((- 1)).numpy()), (output.shape[1], 1))))
return (x_input, truth, predicted) |
class VideoLDMUpBlock(CrossAttnUpBlock2D):
def __init__(self, *args, n_frames=8, n_temp_heads=8, **kwargs):
super().__init__(*args, **kwargs)
out_channels = kwargs['out_channels']
num_layers = kwargs['num_layers']
cross_attn_dim = kwargs.get('cross_attention_dim')
conv3ds = []
tempo_attns = []
for i in range(kwargs['num_layers']):
conv3ds.append(Conv3DLayer(in_dim=out_channels, out_dim=out_channels, n_frames=n_frames))
tempo_attns.append(TemporalAttentionLayer(dim=out_channels, n_frames=n_frames, n_heads=n_temp_heads, kv_dim=cross_attn_dim))
self.conv3ds = nn.ModuleList(conv3ds)
self.tempo_attns = nn.ModuleList(tempo_attns)
def forward(self, hidden_states: torch.FloatTensor, res_hidden_states_tuple: Tuple[(torch.FloatTensor, ...)], temb: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, upsample_size: Optional[int]=None, attention_mask: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None):
for (resnet, conv3d, attn, tempo_attn) in zip(self.resnets, self.conv3ds, self.attentions, self.tempo_attns):
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
hidden_states = conv3d(hidden_states)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs).sample
hidden_states = tempo_attn(hidden_states, encoder_hidden_states)
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states |
def download_protein_folder(bucket_name, local_dir=None):
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix='protein'):
target = (obj.key if (local_dir is None) else os.path.join(local_dir, os.path.relpath(obj.key, 'protein')))
if (not os.path.exists(os.path.dirname(target))):
os.makedirs(os.path.dirname(target))
if (obj.key[(- 1)] == '/'):
continue
bucket.download_file(obj.key, target) |
def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, overwrite_b=False, check_finite=True):
if (sort is not None):
raise ValueError("The 'sort' input of qz() has to be None and will be removed in a future release. Use ordqz instead.")
if (output not in ['real', 'complex', 'r', 'c']):
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
else:
a1 = np.asarray(A)
b1 = np.asarray(B)
(a_m, a_n) = a1.shape
(b_m, b_n) = b1.shape
if (not (a_m == a_n == b_m == b_n)):
raise ValueError('Array dimensions must be square and agree')
typa = a1.dtype.char
if ((output in ['complex', 'c']) and (typa not in ['F', 'D'])):
if (typa in _double_precision):
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if ((output in ['complex', 'c']) and (typb not in ['F', 'D'])):
if (typb in _double_precision):
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = (overwrite_a or _datacopied(a1, A))
overwrite_b = (overwrite_b or _datacopied(b1, B))
(gges,) = get_lapack_funcs(('gges',), (a1, b1))
if ((lwork is None) or (lwork == (- 1))):
result = gges((lambda x: None), a1, b1, lwork=(- 1))
lwork = result[(- 2)][0].real.astype(np.int)
sfunction = (lambda x: None)
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a, overwrite_b=overwrite_b, sort_t=0)
info = result[(- 1)]
if (info < 0):
raise ValueError('Illegal value in argument {} of gges'.format((- info)))
elif ((info > 0) and (info <= a_n)):
warnings.warn('The QZ iteration failed. (a,b) are not in Schur form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be correct for J={},...,N'.format((info - 1)), LinAlgWarning, stacklevel=3)
elif (info == (a_n + 1)):
raise LinAlgError('Something other than QZ iteration failed')
elif (info == (a_n + 2)):
raise LinAlgError('After reordering, roundoff changed values of some complex eigenvalues so that leading eigenvalues in the Generalized Schur form no longer satisfy sort=True. This could also be due to scaling.')
elif (info == (a_n + 3)):
raise LinAlgError('Reordering failed in <s,d,c,z>tgsen')
return (result, gges.typecode) |
_decorator(False)
def is_404(html):
soup = BeautifulSoup(html, 'lxml')
try:
if (' in html):
return True
elif (soup.title.text == '404'):
return True
elif (html == ''):
return True
elif (',' in html):
return True
else:
return False
except AttributeError:
return False |
class ImageAugmentation(Layer):
def call(self, x):
one = tf.fill([tf.shape(x[0])[0], 1], 1.0)
zero = tf.fill([tf.shape(x[0])[0], 1], 0.0)
transforms = tf.concat([one, zero, zero, zero, one, zero, zero, zero], axis=1)
rands = tf.concat([tf.truncated_normal([tf.shape(x[0])[0], 6], stddev=x[1]), zero, zero], axis=1)
return images_transform(x[0], (transforms + rands), interpolation='BILINEAR') |
def gpu_mem_usage():
if torch.cuda.is_available():
mem_usage_bytes = torch.cuda.max_memory_allocated()
else:
mem_usage_bytes = 0
return (mem_usage_bytes / (1024 ** 3)) |
class GPT2Tokenizer(object):
def __init__(self, vocab_file=None, special_tokens=None):
self.pad_token = '[PAD]'
self.sep_token = '[SEP]'
self.unk_token = '[UNK]'
self.cls_token = '[CLS]'
self.symbols = []
self.count = []
self.indices = {}
self.pad_token_id = self.add_symbol(self.pad_token)
self.cls_token_id = self.add_symbol(self.cls_token)
self.sep_token_id = self.add_symbol(self.sep_token)
self.unk_token_id = self.add_symbol(self.unk_token)
self.gpt2_encoder = load_vocab(vocab_file)
self.bpe = get_encoder(self.gpt2_encoder['encoder'], self.gpt2_encoder['vocab'])
for (w, n) in self.gpt2_encoder['dict_map']:
self.add_symbol(w, n)
self.mask_token = '[MASK]'
self.mask_id = self.add_symbol(self.mask_token)
self.special_tokens = ['[MASK]', '[SEP]', '[PAD]', '[UNK]', '[CLS]']
if (special_tokens is not None):
for t in special_tokens:
self.add_special_token(t)
self.vocab = self.indices
self.ids_to_tokens = self.symbols
def tokenize(self, text):
bpe = self._encode(text)
return [t for t in bpe.split(' ') if t]
def convert_tokens_to_ids(self, tokens):
return [self.vocab[t] for t in tokens]
def convert_ids_to_tokens(self, ids):
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
def split_to_words(self, text):
return self.bpe.split_to_words(text)
def decode(self, tokens):
return self.bpe.decode([int(t) for t in tokens if (t not in self.special_tokens)])
def add_special_token(self, token):
self.special_tokens.append(token)
return self.add_symbol(token)
def part_of_whole_word(self, token, is_bos=False):
if is_bos:
return True
s = self._decode(token)
if ((len(s) == 1) and (_is_whitespace(list(s)[0]) or _is_control(list(s)[0]) or _is_punctuation(list(s)[0]))):
return False
return (not s.startswith(' '))
def sym(self, id):
return self.ids_to_tokens[id]
def id(self, sym):
return self.vocab[sym]
def _encode(self, x: str) -> str:
return ' '.join(map(str, self.bpe.encode(x)))
def _decode(self, x: str) -> str:
return self.bpe.decode(map(int, x.split()))
def add_symbol(self, word, n=1):
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + n)
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def save_pretrained(self, path: str, filename_prefix: str=None):
import torch
filename = VOCAB_FILES_NAMES[list(VOCAB_FILES_NAMES.keys())[0]]
if (filename_prefix is not None):
filename = ((filename_prefix + '-') + filename)
full_path = os.path.join(path, filename)
torch.save(self.gpt2_encoder, full_path)
return (full_path,) |
def register_Ns3ChannelParams_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::ChannelParams const &', 'arg0')])
cls.add_instance_attribute('m_delaySpread', 'ns3::doubleVector_t', is_const=False)
cls.add_instance_attribute('m_doppler', 'ns3::doubleVector_t', is_const=False)
cls.add_instance_attribute('m_powerFraction', 'ns3::doubleVector_t', is_const=False)
cls.add_instance_attribute('m_rxSpatialMatrix', 'ns3::complex2DVector_t', is_const=False)
cls.add_instance_attribute('m_txSpatialMatrix', 'ns3::complex2DVector_t', is_const=False)
return |
class _EnvironWrapper(_Environ):
def __setitem__(self, name: str, value: str) -> None:
orig = self.get(name, None)
_Environ.__setitem__(self, name, value)
new = self[name]
self._print_diff(name, orig, new)
def __delitem__(self, name: str) -> None:
orig = self.get(name, None)
_Environ.__delitem__(self, name)
new = self.get(name, None)
self._print_diff(name, orig, new)
def pop(self, name: str, default: Optional[str]=None) -> Optional[str]:
orig = self.get(name, None)
value = _Environ.pop(self, name, default)
new = self.get(name, None)
self._print_diff(name, orig, new)
return value
def _print_diff(self, name, orig, new):
G = escape_codes['bold_green']
R = escape_codes['bold_red']
N = escape_codes['reset']
if (orig == new):
return
_CHANGED_ENV[name] = new
p = (lambda v: print(v, file=sys.stderr, flush=True))
if (orig == None):
p(f'{G}:: ENV+ {name}={new}{N}')
elif (new == None):
p(f'{R}:: ENV- {name}={orig}{N}')
elif new.startswith(orig):
l = len(orig)
p(f'{G}:: ENV{N} {name}={new[:l]}{G}{new[l:]}{N}')
elif new.endswith(orig):
l = (len(new) - len(orig))
p(f'{G}:: ENV{N} {name}={G}{new[:l]}{N}{new[l:]}')
else:
p(f'{R}:: ENV- {name}={orig}{N}')
p(f'{G}:: ENV+ {name}={new}{N}')
def get_changed_envs(self):
return dict(_CHANGED_ENV) |
class DCGANTest(tf.test.TestCase):
def test_generator_run(self):
tf.set_random_seed(1234)
noise = tf.random_normal([100, 64])
(image, _) = dcgan.generator(noise)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
image.eval()
def test_generator_graph(self):
tf.set_random_seed(1234)
for (i, batch_size) in zip(xrange(3, 7), xrange(3, 8)):
tf.reset_default_graph()
final_size = (2 ** i)
noise = tf.random_normal([batch_size, 64])
(image, end_points) = dcgan.generator(noise, depth=32, final_size=final_size)
self.assertAllEqual([batch_size, final_size, final_size, 3], image.shape.as_list())
expected_names = ([('deconv%i' % j) for j in xrange(1, i)] + ['logits'])
self.assertSetEqual(set(expected_names), set(end_points.keys()))
for j in range(1, i):
layer = end_points[('deconv%i' % j)]
self.assertEqual((32 * (2 ** ((i - j) - 1))), layer.get_shape().as_list()[(- 1)])
def test_generator_invalid_input(self):
wrong_dim_input = tf.zeros([5, 32, 32])
with self.assertRaises(ValueError):
dcgan.generator(wrong_dim_input)
correct_input = tf.zeros([3, 2])
with self.assertRaisesRegexp(ValueError, 'must be a power of 2'):
dcgan.generator(correct_input, final_size=30)
with self.assertRaisesRegexp(ValueError, 'must be greater than 8'):
dcgan.generator(correct_input, final_size=4)
def test_discriminator_run(self):
image = tf.random_uniform([5, 32, 32, 3], (- 1), 1)
(output, _) = dcgan.discriminator(image)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output.eval()
def test_discriminator_graph(self):
for (i, batch_size) in zip(xrange(1, 6), xrange(3, 8)):
tf.reset_default_graph()
img_w = (2 ** i)
image = tf.random_uniform([batch_size, img_w, img_w, 3], (- 1), 1)
(output, end_points) = dcgan.discriminator(image, depth=32)
self.assertAllEqual([batch_size, 1], output.get_shape().as_list())
expected_names = ([('conv%i' % j) for j in xrange(1, (i + 1))] + ['logits'])
self.assertSetEqual(set(expected_names), set(end_points.keys()))
for j in range(1, (i + 1)):
layer = end_points[('conv%i' % j)]
self.assertEqual((32 * (2 ** (j - 1))), layer.get_shape().as_list()[(- 1)])
def test_discriminator_invalid_input(self):
wrong_dim_img = tf.zeros([5, 32, 32])
with self.assertRaises(ValueError):
dcgan.discriminator(wrong_dim_img)
spatially_undefined_shape = tf.placeholder(tf.float32, [5, 32, None, 3])
with self.assertRaises(ValueError):
dcgan.discriminator(spatially_undefined_shape)
not_square = tf.zeros([5, 32, 16, 3])
with self.assertRaisesRegexp(ValueError, 'not have equal width and height'):
dcgan.discriminator(not_square)
not_power_2 = tf.zeros([5, 30, 30, 3])
with self.assertRaisesRegexp(ValueError, 'not a power of 2'):
dcgan.discriminator(not_power_2) |
def print_csv(fname):
with open(fname, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
ctr = 0
for row in csv_reader:
ctr += 1
print('there are ', ctr, ' rows in the csv file') |
class DataLoader(torch.utils.data.DataLoader):
def __init__(self, input_dir, fn, bert_name, ent2id, rel2id, batch_size, training=False):
print('Reading questions from {}'.format(fn))
self.tokenizer = AutoTokenizer.from_pretrained(bert_name)
self.ent2id = ent2id
self.rel2id = rel2id
self.id2ent = invert_dict(ent2id)
self.id2rel = invert_dict(rel2id)
sub_map = defaultdict(list)
so_map = defaultdict(list)
for line in open(os.path.join(input_dir, 'fbwq_full/train.txt')):
l = line.strip().split('\t')
s = l[0].strip()
p = l[1].strip()
o = l[2].strip()
sub_map[s].append((p, o))
so_map[(s, o)].append(p)
data = []
for line in open(fn):
line = line.strip()
if (line == ''):
continue
line = line.split('\t')
if (len(line) != 2):
continue
question = line[0].split('[')
question_1 = question[0]
question_2 = question[1].split(']')
head = question_2[0].strip()
question_2 = question_2[1]
question = question_1.strip()
ans = line[1].split('|')
entity_range = set()
for (p, o) in sub_map[head]:
entity_range.add(o)
for (p2, o2) in sub_map[o]:
entity_range.add(o2)
entity_range = [ent2id[o] for o in entity_range]
head = [ent2id[head]]
question = self.tokenizer(question.strip(), max_length=64, padding='max_length', return_tensors='pt')
ans = [ent2id[a] for a in ans]
data.append([head, question, ans, entity_range])
print('data number: {}'.format(len(data)))
dataset = Dataset(data, ent2id)
super().__init__(dataset, batch_size=batch_size, shuffle=training, collate_fn=collate) |
def cost(factor, goldfactors):
if (options.cost == 'hamming'):
return hamming_cost(factor, goldfactors)
elif (options.cost == 'recall'):
return recall_oriented_cost(factor, goldfactors)
else:
raise Exception('undefined cost type', options.cost) |
.filterwarnings('ignore::sklearn.exceptions.FitFailedWarning')
.filterwarnings('ignore:Scoring failed:UserWarning')
.filterwarnings('ignore:One or more of the:UserWarning')
.parametrize('HalvingSearch', (HalvingGridSearchCV, HalvingRandomSearchCV))
.parametrize('fail_at', ('fit', 'predict'))
def test_nan_handling(HalvingSearch, fail_at):
n_samples = 1000
(X, y) = make_classification(n_samples=n_samples, random_state=0)
search = HalvingSearch(SometimesFailClassifier(), {f'fail_{fail_at}': [False, True], 'a': range(3)}, resource='n_estimators', max_resources=6, min_resources=1, factor=2)
search.fit(X, y)
assert (not search.best_params_[f'fail_{fail_at}'])
scores = search.cv_results_['mean_test_score']
ranks = search.cv_results_['rank_test_score']
assert np.isnan(scores).any()
unique_nan_ranks = np.unique(ranks[np.isnan(scores)])
assert (unique_nan_ranks.shape[0] == 1)
assert (unique_nan_ranks[0] >= ranks).all() |
def generate_alignment(sequences, ep=0.0, op=1.53):
with tempfile.TemporaryDirectory() as tmp:
tmp_fasta_path = (tmp + '/tmp.fasta')
write_partitioned_fasta(tmp_fasta_path, sequences)
align_out = subprocess.run(['mafft', '--thread', '8', '--maxiterate', '1000', '--globalpair', '--ep', str(ep), '--op', str(op), tmp_fasta_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
align_out.check_returncode()
except:
print(align_out.stderr, sys.stderr)
raise Exception
return parse_fasta_string(align_out.stdout.decode('utf-8'), True) |
def test0():
N = 1500
(X, Y) = np.meshgrid(np.linspace((- 1), 1, N), np.linspace((- 1), 1, N))
r = 0.5
dx = [(2.0 / (N - 1)), (2.0 / (N - 1))]
phi = (((X ** 2) + (Y ** 2)) - (r ** 2))
phi = np.ones_like(phi)
phi[0][0] = (- 1)
t0 = time.time()
d = distance(phi, dx)
t1 = time.time()
print('benchmark time', (t1 - t0))
return d |
_scheduler('multi_step')
class MultiStepScheduler(PythiaScheduler):
def __init__(self, optimizer, *args, **kwargs):
self.use_warmup = kwargs['use_warmup']
self.lr_steps = kwargs['lr_steps']
self.lr_ratio = kwargs['lr_ratio']
self.warmup_iterations = (kwargs['warmup_iterations'] if self.use_warmup else 0)
self.warmup_factor = kwargs['warmup_factor']
assert (self.warmup_iterations < self.lr_steps[0])
super().__init__(optimizer)
def get_lr(self):
if ((self.last_epoch <= self.warmup_iterations) and (self.use_warmup is True)):
alpha = (float(self.last_epoch) / float(self.warmup_iterations))
lr_ratio = ((self.warmup_factor * (1.0 - alpha)) + alpha)
return [(base_lr * lr_ratio) for base_lr in self.base_lrs]
else:
return [(base_lr * (self.lr_ratio ** bisect_right(self.lr_steps, self.last_epoch))) for base_lr in self.base_lrs] |
def sensitive_topics_classifier(batch_size, data):
from .sensitive_checker import sensitive_scorer
(scores, meta_data) = sensitive_scorer(batch_size, data)
return (scores, meta_data) |
(scope='module')
def test_data_xy():
x = np.linspace((- 1), 1, 11)
y = np.linspace(0, 2, 11)
return list(np.meshgrid(x, y)) |
class HGFilter(nn.Module):
def __init__(self, opt):
super(HGFilter, self).__init__()
self.num_modules = opt.num_stack
self.opt = opt
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
if (self.opt.norm == 'batch'):
self.bn1 = nn.BatchNorm2d(64)
elif (self.opt.norm == 'group'):
self.bn1 = nn.GroupNorm(32, 64)
if (self.opt.hg_down == 'conv64'):
self.conv2 = ConvBlock(64, 64, self.opt.norm)
self.down_conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
elif (self.opt.hg_down == 'conv128'):
self.conv2 = ConvBlock(64, 128, self.opt.norm)
self.down_conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1)
elif (self.opt.hg_down == 'ave_pool'):
self.conv2 = ConvBlock(64, 128, self.opt.norm)
else:
raise NameError('Unknown Fan Filter setting!')
self.conv3 = ConvBlock(128, 128, self.opt.norm)
self.conv4 = ConvBlock(128, 256, self.opt.norm)
for hg_module in range(self.num_modules):
self.add_module(('m' + str(hg_module)), HourGlass(1, opt.num_hourglass, 256, self.opt.norm))
self.add_module(('top_m_' + str(hg_module)), ConvBlock(256, 256, self.opt.norm))
self.add_module(('conv_last' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
if (self.opt.norm == 'batch'):
self.add_module(('bn_end' + str(hg_module)), nn.BatchNorm2d(256))
elif (self.opt.norm == 'group'):
self.add_module(('bn_end' + str(hg_module)), nn.GroupNorm(32, 256))
self.add_module(('l' + str(hg_module)), nn.Conv2d(256, opt.hourglass_dim, kernel_size=1, stride=1, padding=0))
if (hg_module < (self.num_modules - 1)):
self.add_module(('bl' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
self.add_module(('al' + str(hg_module)), nn.Conv2d(opt.hourglass_dim, 256, kernel_size=1, stride=1, padding=0))
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)), True)
tmpx = x
if (self.opt.hg_down == 'ave_pool'):
x = F.avg_pool2d(self.conv2(x), 2, stride=2)
elif (self.opt.hg_down in ['conv64', 'conv128']):
x = self.conv2(x)
x = self.down_conv2(x)
else:
raise NameError('Unknown Fan Filter setting!')
normx = x
x = self.conv3(x)
x = self.conv4(x)
previous = x
outputs = []
for i in range(self.num_modules):
hg = self._modules[('m' + str(i))](previous)
ll = hg
ll = self._modules[('top_m_' + str(i))](ll)
ll = F.relu(self._modules[('bn_end' + str(i))](self._modules[('conv_last' + str(i))](ll)), True)
tmp_out = self._modules[('l' + str(i))](ll)
outputs.append(tmp_out)
if (i < (self.num_modules - 1)):
ll = self._modules[('bl' + str(i))](ll)
tmp_out_ = self._modules[('al' + str(i))](tmp_out)
previous = ((previous + ll) + tmp_out_)
return (outputs, tmpx.detach(), normx) |
_dispatch
def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False, workers=None):
return (Dispatchable(x, np.ndarray),) |
def get_evaluation_extra_data_key(evaluation_id):
return 'evaluations/{}_data.bytes'.format(evaluation_id) |
def point_from(arr):
x = int((arr[0] * WIDTH))
y = int((arr[1] * HEIGHT))
return gr.Point(x, y) |
def test_estimator_getstate_using_slots_error_message():
class WithSlots():
__slots__ = ('x',)
class Estimator(BaseEstimator, WithSlots):
pass
msg = 'You cannot use `__slots__` in objects inheriting from `sklearn.base.BaseEstimator`'
with pytest.raises(TypeError, match=msg):
Estimator().__getstate__()
with pytest.raises(TypeError, match=msg):
pickle.dumps(Estimator()) |
def keyword_ifelse(A: dace.float32[N], B: dace.float32[N], C: dace.int32):
if (C == 0):
B[:] = (- A[:])
elif (C == 1):
B[:] = (A[:] * A[:])
else:
B[:] = A |
class TestSuiteAssertionCheckedCoverageFunction(TestSuiteCoverageFunction):
def compute_coverage(self, individual) -> float:
results = self._run_test_suite_chromosome(individual)
merged_trace = analyze_results(results)
tracer = self._executor.tracer
return compute_assertion_checked_coverage(merged_trace, tracer.get_subject_properties()) |
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
last_hidden_state: torch.FloatTensor
past_key_values: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None |
.pure
def test_cast_float_to_long(sdfg_name):
sdfg = dace.SDFG(sdfg_name)
sdfg.add_array('X', [2, 4], dace.float32)
sdfg.add_array('__return', [2, 4], dace.int64)
state = sdfg.add_state()
access_X = state.add_access('X')
access_result = state.add_access('__return')
op_node = donnx.ONNXCast('Cast')
op_node.to = converters.typeclass_to_onnx_tensor_type_int(dace.int64)
state.add_node(op_node)
state.add_edge(access_X, None, op_node, 'input', sdfg.make_array_memlet('X'))
state.add_edge(op_node, 'output', access_result, None, sdfg.make_array_memlet('__return'))
X = np.random.normal(scale=10, size=(2, 4)).astype(np.float32)
sdfg.expand_library_nodes()
assert any((isinstance(n, dace.nodes.MapEntry) for (n, _) in sdfg.all_nodes_recursive()))
result = sdfg(X=X)
assert_allclose(X.astype(np.int64), result) |
def load(path: PathLike) -> dict[(str, Any)]:
try:
with open(path, 'rb') as fd:
return tomli.load(fd)
except FileNotFoundError:
_try_make_config_directory(path)
return {}
except tomli.TOMLDecodeError:
return {} |
def conv3(in_planes, out_planes, stride=2):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, 3, stride, 1), nn.PReLU(out_planes), nn.Conv2d(out_planes, out_planes, 3, 1, 1), nn.PReLU(out_planes), nn.Conv2d(out_planes, out_planes, 3, 1, 1), nn.PReLU(out_planes)) |
def make_and_restore_model(*_, arch, dataset, resume_path=None, parallel=True, pytorch_pretrained=False):
classifier_model = (dataset.get_model(arch, pytorch_pretrained) if isinstance(arch, str) else arch)
model = AttackerModel(classifier_model, dataset)
checkpoint = None
if resume_path:
if os.path.isfile(resume_path):
print("=> loading checkpoint '{}'".format(resume_path))
checkpoint = ch.load(resume_path, pickle_module=dill)
state_dict_path = 'model'
if (not ('model' in checkpoint)):
state_dict_path = 'state_dict'
sd = checkpoint[state_dict_path]
sd = {k[len('module.'):]: v for (k, v) in sd.items()}
model.load_state_dict(sd)
if parallel:
model = ch.nn.DataParallel(model)
model = model.cuda()
print("=> loaded checkpoint '{}' (epoch {})".format(resume_path, checkpoint['epoch']))
else:
error_msg = "=> no checkpoint found at '{}'".format(resume_path)
raise ValueError(error_msg)
return (model, checkpoint) |
def run_demo(model='inpainting', data='mnist', category=0, p_rem=1, type_rem='uniform', Delta=0.001, seed=0, max_iter=1000, save_fig=False, block=False):
if (model == 'inpainting'):
model_params = {'name': 'inpainting', 'N': 784, 'p_rem': p_rem, 'type': type_rem}
elif (model == 'denoising'):
model_params = {'name': 'denoising', 'N': 784, 'Delta': Delta}
else:
raise NotImplementedError('Models available for demo: [inpainting, denoising]')
if (data in ['mnist', 'fashion_mnist']):
data_params = {'name': data, 'category': category}
prior_params = {'name': 'VAE', 'type': data, 'id': '20_relu_400_sigmoid_784_bias'}
else:
raise NotImplementedError('Dataset available for demo: [mnist, fashion_mnist]')
EP = Model_Prior(model_params=model_params, data_params=data_params, prior_params=prior_params, seed=seed)
EP.setup()
(mse_ep, mse) = EP.run_ep(max_iter=max_iter)
if (model == 'inpainting'):
EP.y_true['y'] = EP.y_inp
dic = {'model': model, 'Delta': Delta, 'p_rem': p_rem, 'category': category, 'seed': seed, 'y': EP.y_true['y'], 'x': EP.x_true['x'], 'x_pred': EP.x_pred['x']}
return dic |
def _len(L):
try:
return L.cardinality()
except AttributeError:
return len(L) |
def show_test_anomaly_results(base: Path):
idxs = []
for p in base.glob(f'* - test_anomaly_results.png'):
(idx, _) = p.stem.split(' - ')
idxs.append(int(idx))
idxs = sorted(idxs)
idx = st.slider(label=' ', min_value=min(idxs), max_value=max(idxs), value=min(idxs), step=(idxs[1] - idxs[0]))
result = Image.open((base / f'{idx} - test_anomaly_results.png'))
plt.figure(figsize=(9, 3))
plt.imshow(result)
plt.axis('off')
plt.tight_layout()
st.pyplot() |
def create_model_7(input_shape):
inputs = Input(shape=input_shape, name='input1')
x_bn = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', name='bn1')(inputs)
x_bn2 = BatchNormalization(gamma_initializer='random_normal', beta_initializer='random_normal', name='bn2')(inputs)
x_relu = ReLU()(x_bn2)
outputs = (x_bn + x_relu)
return keras.Model(inputs=inputs, outputs=outputs) |
def check_yaml_vs_script(hparam_file, script_file):
print(('Checking %s...' % hparam_file))
if (not os.path.exists(hparam_file)):
print(('File %s not found!' % (hparam_file,)))
return False
if (not os.path.exists(script_file)):
print(('File %s not found!' % (script_file,)))
return False
var_lst = get_yaml_var(hparam_file)
detected_vars_train = detect_script_vars(script_file, var_lst)
default_run_opt_keys = ['debug', 'debug_batches', 'debug_epochs', 'device', 'cpu', 'data_parallel_backend', 'distributed_launch', 'distributed_backend', 'find_unused_parameters', 'jit_module_keys', 'auto_mix_prec', 'max_grad_norm', 'nonfinite_patience', 'noprogressbar', 'ckpt_interval_minutes', 'grad_accumulation_factor', 'optimizer_step_limit']
unused_vars = list(((set(var_lst) - set(detected_vars_train)) - set(default_run_opt_keys)))
for unused_var in unused_vars:
print(('\tERROR: variable "%s" not used in %s!' % (unused_var, script_file)))
return (len(unused_vars) == 0) |
def build_optimizer(params, train_steps, precision):
_params = dict(deepcopy(params))
lr_params = _params.pop('lr_params', None)
use_moving_average = _params.pop('use_moving_average', None)
moving_average_decay = _params.pop('moving_average_decay', None)
_ = _params.pop('global_clipnorm', None)
_ = _params.pop('clipnorm', None)
_params['learning_rate'] = get_learning_rate_schedule(train_steps, lr_params)
config = {'class_name': _params['name'], 'config': _params}
optimizer = tf.optimizers.get(config)
if use_moving_average:
try:
import tensorflow_addons as tfa
optimizer = tfa.optimizers.MovingAverage(optimizer=optimizer, average_decay=moving_average_decay, dynamic_decay=True)
except ImportError:
logging.warning('Failed to import TensorFlow Addons, building optimizer with `use_moving_average=False`')
if (precision == 'mixed_float16'):
logging.info('Wrapping optimizer with `LossScaleOptimizer` for AMP training')
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, dynamic=True)
logging.info('Optimizer Config:\n{}'.format(json.dumps(optimizer.get_config(), indent=4)))
return optimizer |
def test_depthwise_separable_conv():
with pytest.raises(AssertionError):
DepthwiseSeparableConvModule(4, 8, 2, groups=2)
conv = DepthwiseSeparableConvModule(3, 8, 2)
assert (conv.depthwise_conv.conv.groups == 3)
assert (conv.pointwise_conv.conv.kernel_size == (1, 1))
assert (not conv.depthwise_conv.with_norm)
assert (not conv.pointwise_conv.with_norm)
assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN'))
assert (conv.depthwise_conv.norm_name == 'bn')
assert (not conv.pointwise_conv.with_norm)
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN'))
assert (not conv.depthwise_conv.with_norm)
assert (conv.pointwise_conv.norm_name == 'bn')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act'))
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
assert hasattr(conv.depthwise_conv.conv, 'weight_orig')
assert hasattr(conv.pointwise_conv.conv, 'weight_orig')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, padding_mode='reflect')
assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256)) |
def check_pdf_logpdf_at_endpoints(distfn, args, msg):
points = np.array([0, 1])
vals = distfn.ppf(points, *args)
vals = vals[np.isfinite(vals)]
with suppress_warnings() as sup:
suppress_messsages = ['divide by zero encountered in true_divide', 'divide by zero encountered in log', 'divide by zero encountered in power', 'invalid value encountered in add', 'invalid value encountered in subtract', 'invalid value encountered in multiply']
for msg in suppress_messsages:
sup.filter(category=RuntimeWarning, message=msg)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[((pdf != 0) & np.isfinite(pdf))]
logpdf = logpdf[np.isfinite(logpdf)]
msg += ' - logpdf-log(pdf) relationship'
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg) |
def _random_linkability_attack(n_synthetic: int, n_attacks: int, n_neighbors: int) -> LinkabilityIndexes:
idx_0 = _random_links(n_synthetic=n_synthetic, n_attacks=n_attacks, n_neighbors=n_neighbors)
idx_1 = _random_links(n_synthetic=n_synthetic, n_attacks=n_attacks, n_neighbors=n_neighbors)
return LinkabilityIndexes(idx_0=idx_0, idx_1=idx_1) |
class GaussianLatentVAE(VAEBase):
def __init__(self, representation_size):
super().__init__(representation_size)
self.dist_mu = np.zeros(self.representation_size)
self.dist_std = np.ones(self.representation_size)
def rsample(self, latent_distribution_params):
(mu, logvar) = latent_distribution_params
stds = (0.5 * logvar).exp()
epsilon = ptu.randn(*mu.size())
latents = ((epsilon * stds) + mu)
return latents
def reparameterize(self, latent_distribution_params):
if self.training:
return self.rsample(latent_distribution_params)
else:
return latent_distribution_params[0]
def kl_divergence(self, latent_distribution_params):
(mu, logvar) = latent_distribution_params
return ((- 0.5) * torch.sum((((1 + logvar) - mu.pow(2)) - logvar.exp()), dim=1).mean())
def get_encoding_from_latent_distribution_params(self, latent_distribution_params):
return latent_distribution_params[0].cpu() |
.expansion
class ExpandCholeskyOpenBLAS(ExpandTransformation):
environments = [blas_environments.openblas.OpenBLAS]
def expansion(node, parent_state, parent_sdfg, **kwargs):
return _make_sdfg(node, parent_state, parent_sdfg, 'OpenBLAS') |
class KerasModelBuilder(ModelBuilder):
def __init__(self, inputs_op, output_op, model_compile_dict, model_space=None, gpus=None, **kwargs):
self.model_compile_dict = model_compile_dict
self.input_node = inputs_op
self.output_node = output_op
self.model_space = model_space
self.gpus = gpus
def __call__(self, model_states):
if ((self.gpus is None) or (self.gpus == 1)):
model = build_sequential_model(model_states=model_states, input_state=self.input_node, output_state=self.output_node, model_compile_dict=self.model_compile_dict, model_space=self.model_space)
elif (type(self.gpus) is int):
model = build_multi_gpu_sequential_model(model_states=model_states, input_state=self.input_node, output_state=self.output_node, model_compile_dict=self.model_compile_dict, model_space=self.model_space, gpus=self.gpus)
elif (type(self.gpus) is list):
mirrored_strategy = tf.distribute.MirroredStrategy(devices=self.gpus)
with mirrored_strategy.scope():
model = build_sequential_model(model_states=model_states, input_state=self.input_node, output_state=self.output_node, model_compile_dict=self.model_compile_dict, model_space=self.model_space)
return model |
def criteria_3_is_valid(index_date, start_date, baseline):
return ((index_date - start_date).days >= baseline) |
def get_trainable_quantizer_weights_config(n: BaseNode, weights_quantization_candidates: List[TrainableQuantizerCandidateConfig]=None) -> TrainableQuantizerWeightsConfig:
if (n.final_weights_quantization_cfg is None):
Logger.error(f'Node must have final_weights_quantization_cfg in order to build quantizer configuration')
final_cfg = n.final_weights_quantization_cfg
return TrainableQuantizerWeightsConfig(final_cfg.weights_quantization_method, final_cfg.weights_n_bits, final_cfg.weights_quantization_params, final_cfg.enable_weights_quantization, final_cfg.weights_channels_axis, final_cfg.weights_per_channel_threshold, final_cfg.min_threshold, weights_quantization_candidates) |
def subtokenizer(identifier):
splitter_regex = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
identifiers = re.split('[._\\-]', identifier)
subtoken_list = []
for identifier in identifiers:
matches = splitter_regex.finditer(identifier)
for subtoken in [m.group(0) for m in matches]:
subtoken_list.append(subtoken)
return subtoken_list |
def _extract_return_annotation(sigstr, has_return_anno):
if (not has_return_anno):
return ''
return sigstr.split(')')[1] |
class TrainOptions(BaseOptions):
def __init__(self):
super().__init__()
self.isTrain = True
def initialize(self, parser):
super().initialize(parser)
parser.add_argument('--continue_train', type=util.str2bool, default=False, help='resume training from last checkpoint')
parser.add_argument('--pretrained_name', type=str, default=None, help='Load weights from the checkpoint of another experiment')
return parser |
def TietzeGraph():
g = Graph([(0, 9), (3, 10), (6, 11), (1, 5), (2, 7), (4, 8)], name='Tietze Graph')
g.add_cycle(list(range(9)))
g.add_cycle([9, 10, 11])
g._circle_embedding(list(range(9)))
g._circle_embedding([9, 10, 11], radius=0.5)
return g |
def replace_params(hf_params, tf_params, key_mapping):
list(hf_params.keys())
for (key, value) in tf_params.items():
if (key not in key_mapping):
continue
hf_key = key_mapping[key]
if (('_conv' in key) and ('kernel' in key)):
new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1)
elif ('embeddings' in key):
new_hf_value = torch.from_numpy(value)
elif ('depthwise_kernel' in key):
new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1)
elif ('kernel' in key):
new_hf_value = torch.from_numpy(np.transpose(value))
elif ('temperature' in key):
new_hf_value = value
elif ('bn/gamma' or ('bn/beta' in key)):
new_hf_value = torch.from_numpy(np.transpose(value)).squeeze()
else:
new_hf_value = torch.from_numpy(value)
hf_params[hf_key].copy_(new_hf_value) |
class ObjectOnNode(NodeEnumerator):
def __init__(self, node: Node):
self.surface_node = node
def enumerate(self, state: EnvironmentState, **kwargs):
for n in state.get_nodes():
if state.evaluate(ExistsRelation(NodeInstance(n), Relation.ON, NodeInstanceFilter(self.surface_node))):
(yield n) |
def save_np_arrays(arrays, directory, filename):
save_metadata(arrays, directory, filename=filename, default=numpy_serialize) |
def densenet161(pretrained=False, progress=True, device='cpu', **kwargs):
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, device, **kwargs) |
class BaseResolver():
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if (not ('yaml_implicit_resolvers' in cls.__dict__)):
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
if (first is None):
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
def add_path_resolver(cls, tag, path, kind=None):
if (not ('yaml_path_resolvers' in cls.__dict__)):
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if (len(element) == 2):
(node_check, index_check) = element
elif (len(element) == 1):
node_check = element[0]
index_check = True
else:
raise ResolverError(('Invalid path element: %s' % element))
else:
node_check = None
index_check = element
if (node_check is str):
node_check = ScalarNode
elif (node_check is list):
node_check = SequenceNode
elif (node_check is dict):
node_check = MappingNode
elif ((node_check not in [ScalarNode, SequenceNode, MappingNode]) and (not isinstance(node_check, str)) and (node_check is not None)):
raise ResolverError(('Invalid node checker: %s' % node_check))
if ((not isinstance(index_check, (str, int))) and (index_check is not None)):
raise ResolverError(('Invalid index checker: %s' % index_check))
new_path.append((node_check, index_check))
if (kind is str):
kind = ScalarNode
elif (kind is list):
kind = SequenceNode
elif (kind is dict):
kind = MappingNode
elif ((kind not in [ScalarNode, SequenceNode, MappingNode]) and (kind is not None)):
raise ResolverError(('Invalid node kind: %s' % kind))
cls.yaml_path_resolvers[(tuple(new_path), kind)] = tag
def descend_resolver(self, current_node, current_index):
if (not self.yaml_path_resolvers):
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for (path, kind) in self.resolver_prefix_paths[(- 1)]:
if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
if (len(path) > depth):
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[(path, kind)]
else:
for (path, kind) in self.yaml_path_resolvers:
if (not path):
exact_paths[kind] = self.yaml_path_resolvers[(path, kind)]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if (not self.yaml_path_resolvers):
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
(node_check, index_check) = path[(depth - 1)]
if isinstance(node_check, str):
if (current_node.tag != node_check):
return
elif (node_check is not None):
if (not isinstance(current_node, node_check)):
return
if ((index_check is True) and (current_index is not None)):
return
if (((index_check is False) or (index_check is None)) and (current_index is None)):
return
if isinstance(index_check, str):
if (not (isinstance(current_index, ScalarNode) and (index_check == current_index.value))):
return
elif (isinstance(index_check, int) and (not isinstance(index_check, bool))):
if (index_check != current_index):
return
return True
def resolve(self, kind, value, implicit):
if ((kind is ScalarNode) and implicit[0]):
if (value == ''):
resolvers = self.yaml_implicit_resolvers.get('', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for (tag, regexp) in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[(- 1)]
if (kind in exact_paths):
return exact_paths[kind]
if (None in exact_paths):
return exact_paths[None]
if (kind is ScalarNode):
return self.DEFAULT_SCALAR_TAG
elif (kind is SequenceNode):
return self.DEFAULT_SEQUENCE_TAG
elif (kind is MappingNode):
return self.DEFAULT_MAPPING_TAG |
_reader(nn.GradientReversal)
def GradientReversal_reader(reader, version, obj):
if (version < 2):
setattr(obj, 'lambda', 1) |
def test_IndexedArray_getitem():
content = ak.from_iter([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9], highlevel=False)
index = ak.index.Index64(np.array([3, 2, 2, 5, 0, 7], dtype=np.int64))
array = ak.highlevel.Array(ak.contents.IndexedArray(index, content))
def f1(x, i):
return x[i]
assert ([f1(array, 0), f1(array, 1), f1(array, 2), f1(array, 3)] == [3.3, 2.2, 2.2, 5.5])
def f2(x, i1, i2):
return x[i1:i2]
assert (ak.operations.to_list(f2(array, 1, 5)) == [2.2, 2.2, 5.5, 0]) |
def windows_nvToolsExt_path():
WINDOWS_HOME = 'C:/Program Files/NVIDIA Corporation/NvToolsExt'
NVTOOLEXT_HOME = os.getenv('NVTOOLSEXT_PATH', WINDOWS_HOME)
if os.path.exists(NVTOOLEXT_HOME):
lib_paths = glob.glob((NVTOOLEXT_HOME + '/bin/x64/nvToolsExt*.dll'))
if (len(lib_paths) > 0):
lib_path = lib_paths[0]
return lib_path
return '' |
class Partition0(nn.Module):
LAYER_SCOPES = ['VisionTransformer/PatchEmbed[patch_embed]/Conv2d[proj]', 'VisionTransformer/Dropout[pos_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[0]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[0]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[1]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[1]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[2]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Linear[fc2]']
TENSORS = ['VisionTransformer/Parameter[cls_token]', 'VisionTransformer/Parameter[pos_embed]']
def __init__(self, layers, tensors, device='cuda:0'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1]
self.lookup = {'l_0': 'patch_embed.proj', 'l_1': 'pos_drop', 'l_2': 'blocks.0.norm1', 'l_3': 'blocks.0.attn.qkv', 'l_4': 'blocks.0.attn.attn_drop', 'l_5': 'blocks.0.attn.proj', 'l_6': 'blocks.0.attn.proj_drop', 'l_7': 'blocks.0.drop_path', 'l_8': 'blocks.0.norm2', 'l_9': 'blocks.0.mlp.fc1', 'l_10': 'blocks.0.mlp.act', 'l_11': 'blocks.0.mlp.drop', 'l_12': 'blocks.0.mlp.fc2', 'l_13': 'blocks.0.mlp.drop', 'l_14': 'blocks.0.drop_path', 'l_15': 'blocks.1.norm1', 'l_16': 'blocks.1.attn.qkv', 'l_17': 'blocks.1.attn.attn_drop', 'l_18': 'blocks.1.attn.proj', 'l_19': 'blocks.1.attn.proj_drop', 'l_20': 'blocks.1.drop_path', 'l_21': 'blocks.1.norm2', 'l_22': 'blocks.1.mlp.fc1', 'l_23': 'blocks.1.mlp.act', 'l_24': 'blocks.1.mlp.drop', 'l_25': 'blocks.1.mlp.fc2', 'l_26': 'blocks.1.mlp.drop', 'l_27': 'blocks.1.drop_path', 'l_28': 'blocks.2.norm1', 'l_29': 'blocks.2.attn.qkv', 'l_30': 'blocks.2.attn.attn_drop', 'l_31': 'blocks.2.attn.proj', 'l_32': 'blocks.2.attn.proj_drop', 'l_33': 'blocks.2.drop_path', 'l_34': 'blocks.2.norm2', 'l_35': 'blocks.2.mlp.fc1', 'l_36': 'blocks.2.mlp.act', 'l_37': 'blocks.2.mlp.drop', 'l_38': 'blocks.2.mlp.fc2', 'p_0': 'cls_token', 'p_1': 'pos_embed'}
self.to(self.device)
def forward(self, *args):
x0 = unflatten(args, self.input_structure)[0]
t_0 = x0.shape
t_0 = t_0[0]
t_1 = self.l_0(x0)
t_1 = t_1.flatten(2)
t_1 = t_1.transpose(1, 2)
t_0 = self.p_0.expand(t_0, (- 1), (- 1))
t_1 = (t_0, t_1)
t_1 = torch.cat(t_1, dim=1)
t_1 = (t_1 + self.p_1)
t_1 = self.l_1(t_1)
t_0 = self.l_2(t_1)
t_2 = t_0.shape
t_3 = t_2[0]
t_4 = t_2[1]
t_2 = t_2[2]
t_0 = self.l_3(t_0)
t_5 = (t_2 // 16)
t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5)
t_5 = t_5.permute(2, 0, 3, 1, 4)
t_0 = t_5[0]
t_6 = t_5[1]
t_5 = t_5[2]
t_6 = t_6.transpose((- 2), (- 1))
t_6 = (t_0 t_6)
t_6 = (t_6 * 0.125)
t_6 = t_6.softmax(dim=(- 1))
t_6 = self.l_4(t_6)
t_5 = (t_6 t_5)
t_5 = t_5.transpose(1, 2)
t_2 = t_5.reshape(t_3, t_4, t_2)
t_2 = self.l_5(t_2)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_2 = (t_1 + t_2)
t_1 = self.l_8(t_2)
t_1 = self.l_9(t_1)
t_1 = self.l_10(t_1)
t_1 = self.l_11(t_1)
t_1 = self.l_12(t_1)
t_1 = self.l_13(t_1)
t_1 = self.l_14(t_1)
t_1 = (t_2 + t_1)
t_2 = self.l_15(t_1)
t_4 = t_2.shape
t_3 = t_4[0]
t_5 = t_4[1]
t_4 = t_4[2]
t_2 = self.l_16(t_2)
t_6 = (t_4 // 16)
t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6)
t_6 = t_6.permute(2, 0, 3, 1, 4)
t_2 = t_6[0]
t_0 = t_6[1]
t_6 = t_6[2]
t_0 = t_0.transpose((- 2), (- 1))
t_0 = (t_2 t_0)
t_0 = (t_0 * 0.125)
t_0 = t_0.softmax(dim=(- 1))
t_0 = self.l_17(t_0)
t_6 = (t_0 t_6)
t_6 = t_6.transpose(1, 2)
t_4 = t_6.reshape(t_3, t_5, t_4)
t_4 = self.l_18(t_4)
t_4 = self.l_19(t_4)
t_4 = self.l_20(t_4)
t_4 = (t_1 + t_4)
t_1 = self.l_21(t_4)
t_1 = self.l_22(t_1)
t_1 = self.l_23(t_1)
t_1 = self.l_24(t_1)
t_1 = self.l_25(t_1)
t_1 = self.l_26(t_1)
t_1 = self.l_27(t_1)
t_1 = (t_4 + t_1)
t_4 = self.l_28(t_1)
t_5 = t_4.shape
t_3 = t_5[0]
t_6 = t_5[1]
t_5 = t_5[2]
t_4 = self.l_29(t_4)
t_0 = (t_5 // 16)
t_0 = t_4.reshape(t_3, t_6, 3, 16, t_0)
t_0 = t_0.permute(2, 0, 3, 1, 4)
t_4 = t_0[0]
t_2 = t_0[1]
t_0 = t_0[2]
t_2 = t_2.transpose((- 2), (- 1))
t_2 = (t_4 t_2)
t_2 = (t_2 * 0.125)
t_2 = t_2.softmax(dim=(- 1))
t_2 = self.l_30(t_2)
t_0 = (t_2 t_0)
t_0 = t_0.transpose(1, 2)
t_5 = t_0.reshape(t_3, t_6, t_5)
t_5 = self.l_31(t_5)
t_5 = self.l_32(t_5)
t_5 = self.l_33(t_5)
t_5 = (t_1 + t_5)
t_1 = self.l_34(t_5)
t_1 = self.l_35(t_1)
t_1 = self.l_36(t_1)
t_1 = self.l_37(t_1)
t_1 = self.l_38(t_1)
return list(flatten((t_5, t_1)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class ResnetGenerator_UpsampleBilinear(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert (n_blocks >= 0)
super(ResnetGenerator_UpsampleBilinear, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = (2 ** i)
model += [nn.Conv2d((ngf * mult), ((ngf * mult) * 2), kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(((ngf * mult) * 2)), nn.ReLU(True)]
mult = (2 ** n_downsampling)
for i in range(n_blocks):
model += [ResnetBlock((ngf * mult), padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling):
mult = (2 ** (n_downsampling - i))
model += [nn.UpsamplingBilinear2d(scale_factor=2), nn.Conv2d((ngf * mult), int(((ngf * mult) / 2)), kernel_size=3, stride=1, padding=1, bias=use_bias), norm_layer(int(((ngf * mult) / 2))), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.scale = torch.nn.parameter.Parameter(torch.Tensor(1))
init.uniform_(self.scale, 1.0, 2.0)
self.model = nn.Sequential(*model)
def forward(self, input):
return (self.model(input) * (self.scale * self.scale)) |
.parametrize('hidden_units,use_bn', [(hidden_units, use_bn) for hidden_units in [(), (10,)] for use_bn in [True, False]])
def test_DNN(hidden_units, use_bn):
with CustomObjectScope({'DNN': layers.DNN}):
layer_test(layers.DNN, kwargs={'hidden_units': hidden_units, 'use_bn': use_bn, 'dropout_rate': 0.5}, input_shape=(BATCH_SIZE, EMBEDDING_SIZE)) |
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
self.isTrain = True |
_model
def metaformer_pppf_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
token_mixers = [Pooling, Pooling, Pooling, partial(SpatialFc, spatial_shape=[7, 7])]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = MetaFormer(layers, embed_dims=embed_dims, token_mixers=token_mixers, mlp_ratios=mlp_ratios, norm_layer=GroupNorm, downsamples=downsamples, **kwargs)
model.default_cfg = _cfg(crop_pct=0.9)
if pretrained:
url = model_urls['metaformer_pppf_s12_224']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=True)
model.load_state_dict(checkpoint)
return model |
def levelPlot(data, var=None, time=None, levels=(3, 5), target=None, colors=None, **kwargs):
if (var is not None):
try:
usearr = data[var]
except KeyError:
raise KeyError('Key "{1}" not present in data'.format(var))
elif (not isinstance(data, Mapping)):
usearr = np.asarray(data)
else:
raise TypeError('Data appears to be dict-like without a key being given')
tflag = False
if (time is not None):
from scipy.stats import mode
try:
times = data[time]
except (KeyError, ValueError, IndexError):
times = time
try:
times = matplotlib.dates.date2num(times)
tflag = True
except AttributeError:
times = np.asarray(time)
(stepsize, dum) = mode(np.diff(times), axis=None)
times = np.hstack([times, (times[(- 1)] + stepsize)])
else:
times = np.asarray(range(0, (len(usearr) + 1)))
if (not colors):
if (len(levels) <= 3):
colors = ['lime', 'yellow', 'crimson', 'saddlebrown']
else:
colors = matplotlib.rcParams['axes.color_cycle']
else:
try:
assert (len(colors) > len(levels))
except AssertionError:
colors = (list(colors) * int((1 + (len(levels) / len(colors)))))
if ('alpha' not in kwargs):
kwargs['alpha'] = 0.75
if ('legend' not in kwargs):
legend = False
else:
legend = kwargs['legend']
del kwargs['legend']
(fig, ax) = set_target(target)
subset = np.asarray(dmcopy(usearr))
def fill_between_steps(ax, x, y1, **kwargs):
y2 = np.zeros_like(y1)
stepsxx = x.repeat(2)[1:(- 1)]
stepsyy = y1.repeat(2)
y2 = np.zeros_like(stepsyy)
ax.fill_between(stepsxx, stepsyy, y2, **kwargs)
if (mpl.__version__ < '1.5.0'):
p = plt.Rectangle((0, 0), 0, 0, **kwargs)
ax.add_patch(p)
idx = 0
inds = (usearr > levels[0])
subset[inds] = np.nan
kwargs['label'] = u'{0}'.format(levels[idx])
fill_between_steps(ax, times, subset, color=colors[0], zorder=30, **kwargs)
for idx in range(1, len(levels)):
subset = np.asarray(dmcopy(usearr))
inds = np.bitwise_or((usearr <= levels[(idx - 1)]), (usearr > levels[idx]))
subset[inds] = np.nan
kwargs['label'] = u'>{0},{1}'.format(levels[(idx - 1)], levels[idx])
fill_between_steps(ax, times, subset, color=colors[idx], zorder=(30 - (idx * 2)), **kwargs)
idx += 1
try:
inds = (usearr <= levels[(idx - 1)])
subset = np.asarray(dmcopy(usearr))
subset[inds] = np.nan
kwargs['label'] = '>{0}'.format(levels[(- 1)])
fill_between_steps(ax, times, subset, color=colors[idx], zorder=(30 - (idx * 2)), **kwargs)
except:
pass
if tflag:
try:
applySmartTimeTicks(ax, data[time])
except (IndexError, KeyError):
applySmartTimeTicks(ax, time)
ax.grid(False, which='minor')
if legend:
ncols = (len(levels) + 1)
if (ncols > 3):
ncols = (ncols // 2)
ax.legend(loc='upper left', ncol=ncols)
return ax |
class QDQBertForTokenClassification(metaclass=DummyObject):
_backends = ['pytorch_quantization', 'torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['pytorch_quantization', 'torch']) |
def _build_events_df(events):
events = pd.DataFrame(list(events), columns=['start', 'end', 'score'])
events['start'] = events['start'].astype('int64')
events['end'] = events['end'].astype('int64')
return events |
def test():
A = dace.ndarray((2,), dace.uint32)
B = dace.ndarray((1,), dace.uint32)
A[:] = 5
B[:] = 0
cpp_tasklet(A, B)
assert (B[0] == 5) |
class MCPEvent(Structure):
_fields_ = [('size', c_uint32), ('event_type', c_int32), ('timestamp', c_double), ('event_data', MCPEventData)] |
def simSetExplicitHandling(generalObjectHandle, explicitFlags):
ret = lib.simSetExplicitHandling(generalObjectHandle, explicitFlags)
_check_return(ret) |
def main(args):
saver = Saver()
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.batch_size is not None)), 'Must specify batch size either with --max-tokens or --batch-size'
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
logger.info(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('task: {} ({})'.format(args.task, task.__class__.__name__))
logger.info('model: {} ({})'.format(args.arch, model.__class__.__name__))
logger.info('criterion: {} ({})'.format(args.criterion, criterion.__class__.__name__))
logger.info('num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad))))
if (args.quantization_config_path is not None):
quantizer = quantization_utils.Quantizer(config_path=args.quantization_config_path, max_epoch=args.max_epoch, max_update=args.max_update)
else:
quantizer = None
if (args.model_parallel_size == 1):
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info('training on {} devices (GPUs/TPUs)'.format(args.distributed_world_size))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.batch_size))
(extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer, disable_iterator_cache=task.has_sharded_data('train'))
max_epoch = (args.max_epoch or math.inf)
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while ((lr > args.min_lr) and (epoch_itr.next_epoch_idx <= max_epoch)):
(valid_losses, should_stop) = train(args, trainer, task, epoch_itr, saver)
if should_stop:
break
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(epoch_itr.next_epoch_idx, load_dataset=task.has_sharded_data('train'), disable_iterator_cache=task.has_sharded_data('train'))
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum)) |
def register_Ns3AmpduTag_methods(root_module, cls):
cls.add_constructor([param('ns3::AmpduTag const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetRemainingAmpduDuration', 'ns3::Time', [], is_const=True)
cls.add_method('GetRemainingNbOfMpdus', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetRemainingAmpduDuration', 'void', [param('ns3::Time', 'duration')])
cls.add_method('SetRemainingNbOfMpdus', 'void', [param('uint8_t', 'nbofmpdus')])
return |
def LoadCrossNet(Graph, Table, SrcCol, DstCol, EdgeAttrV):
return _snap.LoadCrossNet(Graph, Table, SrcCol, DstCol, EdgeAttrV) |
def wrap_objective(objective, data, pdf, stitch_pars, do_grad=False, jit_pieces=None):
(tensorlib, _) = get_backend()
if do_grad:
raise exceptions.Unsupported('Numpy does not support autodifferentiation.')
def func(pars):
pars = tensorlib.astensor(pars)
constrained_pars = stitch_pars(pars)
return objective(constrained_pars, data, pdf)[0]
return func |
def _sweep_poly_phase(t, poly):
intpoly = polyint(poly)
phase = ((2 * pi) * polyval(intpoly, t))
return phase |
class PNGraph(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError('No constructor defined')
__repr__ = _swig_repr
def New():
return _snap.PNGraph_New()
New = staticmethod(New)
__swig_destroy__ = _snap.delete_PNGraph
def Save(self, SOut):
return _snap.PNGraph_Save(self, SOut)
def __deref__(self):
return _snap.PNGraph___deref__(self)
def __ref__(self):
return _snap.PNGraph___ref__(self)
def __call__(self):
return _snap.PNGraph___call__(self)
def Empty(self):
return _snap.PNGraph_Empty(self)
def Clr(self):
return _snap.PNGraph_Clr(self)
def GetRefs(self):
return _snap.PNGraph_GetRefs(self)
def Load(self, SIn):
return _snap.PNGraph_Load(self, SIn)
def HasFlag(self, Flag):
return _snap.PNGraph_HasFlag(self, Flag)
def GetNodes(self):
return _snap.PNGraph_GetNodes(self)
def AddNode(self, *args):
return _snap.PNGraph_AddNode(self, *args)
def AddNodeUnchecked(self, NId=(- 1)):
return _snap.PNGraph_AddNodeUnchecked(self, NId)
def DelNode(self, *args):
return _snap.PNGraph_DelNode(self, *args)
def IsNode(self, NId):
return _snap.PNGraph_IsNode(self, NId)
def BegNI(self, *args):
return _snap.PNGraph_BegNI(self, *args)
def EndNI(self, *args):
return _snap.PNGraph_EndNI(self, *args)
def GetNI(self, *args):
return _snap.PNGraph_GetNI(self, *args)
def GetMxNId(self):
return _snap.PNGraph_GetMxNId(self)
def GetEdges(self):
return _snap.PNGraph_GetEdges(self)
def AddEdge(self, *args):
return _snap.PNGraph_AddEdge(self, *args)
def AddEdgeUnchecked(self, SrcNId, DstNId):
return _snap.PNGraph_AddEdgeUnchecked(self, SrcNId, DstNId)
def AddEdge2(self, SrcNId, DstNId):
return _snap.PNGraph_AddEdge2(self, SrcNId, DstNId)
def DelEdge(self, SrcNId, DstNId, IsDir=True):
return _snap.PNGraph_DelEdge(self, SrcNId, DstNId, IsDir)
def IsEdge(self, SrcNId, DstNId, IsDir=True):
return _snap.PNGraph_IsEdge(self, SrcNId, DstNId, IsDir)
def BegEI(self, *args):
return _snap.PNGraph_BegEI(self, *args)
def EndEI(self, *args):
return _snap.PNGraph_EndEI(self, *args)
def GetEI(self, *args):
return _snap.PNGraph_GetEI(self, *args)
def GetRndNId(self, *args):
return _snap.PNGraph_GetRndNId(self, *args)
def GetRndNI(self, *args):
return _snap.PNGraph_GetRndNI(self, *args)
def GetNIdV(self, NIdV):
return _snap.PNGraph_GetNIdV(self, NIdV)
def Reserve(self, Nodes, Edges):
return _snap.PNGraph_Reserve(self, Nodes, Edges)
def ReserveNIdInDeg(self, NId, InDeg):
return _snap.PNGraph_ReserveNIdInDeg(self, NId, InDeg)
def ReserveNIdOutDeg(self, NId, OutDeg):
return _snap.PNGraph_ReserveNIdOutDeg(self, NId, OutDeg)
def SortNodeAdjV(self):
return _snap.PNGraph_SortNodeAdjV(self)
def Defrag(self, OnlyNodeLinks=False):
return _snap.PNGraph_Defrag(self, OnlyNodeLinks)
def IsOk(self, ThrowExcept=True):
return _snap.PNGraph_IsOk(self, ThrowExcept)
def Dump(self, *args):
return _snap.PNGraph_Dump(self, *args)
def GetSmallGraph(self):
return _snap.PNGraph_GetSmallGraph(self) |
def _evaluate_hparams(text_embeddings, text_labels, label_embeddings, folds, loss_type, hparams):
predictions = []
references = []
for (fold_index, test_indexes) in enumerate(folds):
train_indexes = _get_complement(folds, test_indexes)
assert ((len(train_indexes) + len(test_indexes)) == len(text_labels))
tr_text_embeddings = text_embeddings[train_indexes]
tr_text_labels = text_labels[train_indexes]
new_label_embeddings = label_tuning(tr_text_embeddings, tr_text_labels, label_embeddings, loss_type=loss_type, **hparams)
te_text_embeddings = text_embeddings[test_indexes]
scores = np.inner(te_text_embeddings, new_label_embeddings)
predictions.append(scores.argmax((- 1)))
references.append(text_labels[test_indexes].argmax((- 1)))
references = np.concatenate(references, axis=0)
predictions = np.concatenate(predictions, axis=0)
mf1 = _mf1(references, predictions)
return mf1 |
class MinTotalDurationPolicyWithPerf(Policy):
def __init__(self, solver, num_threads=None):
self._num_threads = num_threads
Policy.__init__(self, solver)
self._name = 'MinTotalDuration_Perf'
def get_allocation_helper(self, throughputs, scale_factors_array):
x = cp.Variable(throughputs.shape)
inv_M = cp.Variable()
objective = cp.Maximize(inv_M)
constraints = self.get_base_constraints(x, scale_factors_array)
constraints.append((cp.sum(cp.multiply(throughputs, x), axis=1) >= (self._num_steps_remaining * inv_M)))
cvxprob = cp.Problem(objective, constraints)
kwargs = {}
if (self._solver == 'MOSEK'):
import mosek
if (self._num_threads is None):
self._num_threads = 1
kwargs['mosek_params'] = {mosek.iparam.num_threads: self._num_threads}
result = cvxprob.solve(solver=self._solver, **kwargs)
return (cvxprob.status, x)
def get_allocation(self, unflattened_throughputs, scale_factors, num_steps_remaining, cluster_spec):
(throughputs, index) = super().flatten(unflattened_throughputs, cluster_spec)
if (index is None):
return None
(m, n) = throughputs.shape
(job_ids, _) = index
self._num_steps_remaining = np.array([num_steps_remaining[job_id] for job_id in job_ids])
if (throughputs is None):
return None
scale_factors_array = self.scale_factors_array(scale_factors, job_ids, m, n)
(status, last_feasible_x) = self.get_allocation_helper(throughputs, scale_factors_array)
assert (last_feasible_x is not None)
return super().unflatten(last_feasible_x.value.clip(min=0.0).clip(max=1.0), index) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, help='Root of Common Voice 7.0 directory.')
parser.add_argument('--lang', type=str, help='Language abbreviation.')
parser.add_argument('--out', type=str, help='Path to output directory.')
parser.add_argument('--accent', type=str, default='none', help='English accent')
parser.add_argument('--hours', type=float, default=(- 1), help='Maximum hours used.')
args = parser.parse_args()
os.makedirs(args.out, exist_ok=True)
os.makedirs(join(args.out, args.lang), exist_ok=True)
for s in ['train', 'dev', 'test']:
data_list = read_tsv(join(args.root, args.lang, (s + '.tsv')), join(args.root, args.lang, 'clips'), args.lang, accent=args.accent, hours=args.hours)
if (data_list[0].get('len', (- 1)) > 0):
data_list = sorted(data_list, reverse=True, key=(lambda x: x['len']))
write_tsv(data_list, join(args.out, args.lang, (s + '.tsv')))
if (s == 'train'):
write_txt(data_list, join(args.out, args.lang, (s + '.txt'))) |
('name,rbf_class', list(rbf_class_mapping.items()))
def test_num_rbf(name, rbf_class, num_rbf=20):
rbf = rbf_class(num_rbf=num_rbf)
y = rbf(torch.linspace(0, 10, 100))
assert (y.ndim == 2), 'Failed to expand the dimension.'
assert (y.size(1) == num_rbf), f'Found {y.size(1)} values but expected {num_rbf}.' |
class ExactTermMonoid(TermWithCoefficientMonoid):
Element = ExactTerm
def _convert_construction_(self, kwds_construction):
if (('parent' in kwds_construction) and isinstance(kwds_construction['parent'], BTermMonoid)):
try:
del kwds_construction['valid_from']
except KeyError:
pass
def _repr_(self):
return ('Exact Term Monoid %s with coefficients in %s' % (self.growth_group._repr_short_(), self.coefficient_ring)) |
class BLDLDmat(SpectralMatrix):
def assemble(self, method):
(test, trial) = (self.testfunction, self.trialfunction)
assert isinstance(test[0], LD)
assert isinstance(trial[0], LD)
d0 = get_norm_sq(test[0], trial[0], method)
d = {0: (d0[:(- 1)] + d0[1:]), (- 1): d0[1:(- 1)]}
d[1] = d[(- 1)].copy()
return d |
class BigBirdPegasusForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class MetaLearnerStage():
name: str
params: Dict[(str, Any)] = field(default_factory=dict)
prev_stage: Optional['MetaLearnerStage'] = None
def full_name(self) -> MLStageFullName:
fn: MLStageFullName
if (self.prev_stage is None):
fn = (self.name,)
else:
fn = (*self.prev_stage.full_name(), self.name)
return fn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.