code stringlengths 101 5.91M |
|---|
class IntLayerNorm(nn.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant='none'):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if (force_dequant in ['nonlinear', 'layernorm']):
logger.info('Force dequantize layernorm')
self.quant_mode = False
self.register_buffer('shift', torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = (y_int ** 2)
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = torch.log2(torch.sqrt((var_int / (2 ** self.max_bit)))).ceil().max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f'Dynamic shift adjustment: {int(shift_old)} -> {int(self.shift)}')
def overflow_fallback(self, y_int):
self.set_shift(y_int)
y_int_shifted = floor_ste.apply((y_int / (2 ** self.shift)))
y_sq_int = (y_int_shifted ** 2)
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if (not self.quant_mode):
mean = x.mean(axis=2, keepdim=True)
y = (x - mean)
var = torch.mean((y ** 2), axis=2, keepdim=True)
x = (y / torch.sqrt((self.eps + var)))
x = ((x * self.weight) + self.bias)
return (x, None)
if (self.dim_sqrt is None):
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
x_int = (x / scaling_factor)
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = (x_int - mean_int)
y_int_shifted = floor_ste.apply((y_int / (2 ** self.shift)))
y_sq_int = (y_int_shifted ** 2)
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
if self.training:
if (var_int.max() >= (2 ** self.max_bit)):
var_int = self.overflow_fallback(y_int)
assert (var_int.max() < ((2 ** self.max_bit) + 0.1)), 'Error detected in overflow handling: `var_int` exceeds `self.max_bit` (the maximum possible bit width)'
std_int = (floor_ste.apply(torch.sqrt(var_int)) * (2 ** self.shift))
factor = floor_ste.apply(((2 ** 31) / std_int))
y_int = floor_ste.apply(((y_int * factor) / 2))
scaling_factor = (self.dim_sqrt / (2 ** 30))
bias = (self.bias.data.detach() / self.weight.data.detach())
bias_int = floor_ste.apply((bias / scaling_factor))
y_int = (y_int + bias_int)
scaling_factor = (scaling_factor * self.weight)
x = (y_int * scaling_factor)
return (x, scaling_factor) |
class ChunkingIterDataPipe(torch.utils.data.IterDataPipe):
def __init__(self, dataset: torch.utils.data.IterableDataset, chunking, *, min_chunk_size=0):
super().__init__()
from returnn.datasets.basic import Dataset as ReturnnDataset
self._dataset = dataset
(self._chunk_size, self._chunk_step, custom_chunk_func) = ReturnnDataset._parse_chunking(chunking)
self._min_chunk_size = NumbersDict(min_chunk_size)
assert (not custom_chunk_func), f'Custom chunking function not supported, {chunking!r}'
def __iter__(self):
chunking_data_keys = list(self._chunk_size.keys())
for data_dict in self._dataset:
if (not chunking_data_keys):
chunking_data_keys = list(data_dict.keys())
chunking_data_key_black_list = ['seq_tag']
for key in chunking_data_key_black_list:
if (key in chunking_data_keys):
chunking_data_keys.remove(key)
assert chunking_data_keys, 'Dataset produced sequence without any data.'
data_chunks = {}
num_chunks = None
for data_key in chunking_data_keys:
chunk_size = self._chunk_size[data_key]
chunk_step = self._chunk_step[data_key]
min_chunk_size = self._min_chunk_size[data_key]
data = data_dict[data_key]
chunks = [data[start_index:(start_index + chunk_size)] for start_index in range(0, len(data), chunk_step) if (len(data[start_index:(start_index + chunk_size)]) >= min_chunk_size)]
if (num_chunks is None):
num_chunks = len(chunks)
else:
assert (num_chunks == len(chunks)), 'Chunking resulted in different number of chunks for different data keys.'
data_chunks[data_key] = chunks
if (num_chunks == 0):
continue
assert num_chunks, 'Bug: no chunk produced from current sequence.'
for chunk_index in range(num_chunks):
chunk_data = {data_key: data_chunks[data_key][chunk_index] for data_key in data_chunks.keys()}
non_chunked_data = {data_key: data for (data_key, data) in data_dict.items() if (data_key not in chunk_data)}
if non_chunked_data:
chunk_data.update(deepcopy(non_chunked_data))
(yield chunk_data)
def __getitem__(self, index):
raise Exception(f'{self.__class__.__name__}.__getitem__ not supported')
def _parse_chunking(chunking):
if (not isinstance(chunking, (tuple, list))):
chunking = (chunking, None)
(chunk_size, chunk_step) = chunking
if (chunk_size is None):
chunk_size = 0
assert isinstance(chunk_size, (int, dict))
chunk_size = NumbersDict(chunk_size)
assert (chunk_size.min_value() > 0), 'chunk size must not be negative'
if (chunk_step in (None, 0)):
chunk_step = chunk_size
assert isinstance(chunk_step, (int, dict, NumbersDict))
chunk_step = NumbersDict(chunk_step)
assert (sorted(chunk_step.keys()) == sorted(chunk_size.keys()))
assert (chunk_step.min_value() > 0), 'chunking step must be positive'
return (chunk_size, chunk_step) |
def convert_3d_images_to_uint8(images, drange=[(- 1), 1], nchwd_to_nhwdc=False, shrink=1):
images = tf.cast(images, tf.float32)
if (shrink > 1):
ksize = [1, 1, shrink, shrink, shrink]
images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHWD')
if nchwd_to_nhwdc:
images = tf.transpose(images, [0, 2, 3, 4, 1])
scale = (255 / (drange[1] - drange[0]))
images = (((images - drange[0]) * scale) + 0.5)
return tf.saturate_cast(images, tf.uint8) |
class AttnPlainNet(nn.Module):
def __init__(self, feat_len, num_class, hidden=[10, 10], dropout=[0, 0]):
super(AttnPlainNet, self).__init__()
self.feat1 = FeatBrd1d(in_channels=1, out_channels=hidden[0])
self.acvt1 = nn.Sequential(nn.BatchNorm1d(hidden[0]), nn.Softsign(), nn.Dropout(dropout[0]))
self.feat2 = FeatBrd1d(in_channels=hidden[0], out_channels=hidden[1])
self.acvt2 = nn.Sequential(nn.BatchNorm1d(hidden[1]), nn.Softsign(), nn.Dropout(dropout[1]))
self.classifier = nn.Sequential(nn.Flatten(), nn.Linear((feat_len * hidden[1]), num_class))
self.att1 = nn.Linear(feat_len, 1, bias=False)
self.att2 = nn.Linear(feat_len, 1, bias=False)
self.norm = nn.Sequential(nn.Softmax(dim=0))
def forward(self, x, neighbor):
(x, neighbor) = (nf.normalize(x), [nf.normalize(n) for n in neighbor])
fadj = self.feature_adjacency(x, neighbor)
x = self.acvt1(self.feat1(x, fadj))
x = self.acvt2(self.feat2(x, fadj))
return self.classifier(x)
def feature_adjacency(self, x, y):
(B, C, F) = x.shape
w = [self.norm(torch.einsum('ci,nkl->n', self.att1(x[i]), self.att2(y[i]))) for i in range(B)]
fadj = torch.stack([torch.einsum('ca, ncb, n -> ab', x[i], y[i], w[i]) for i in range(B)])
fadj += fadj.transpose((- 2), (- 1))
return self.row_normalize(self.sgnroot(fadj))
_grad()
def sgnroot(self, x):
return (x.sign() * x.abs().sqrt())
_grad()
def row_normalize(self, x):
x = (x / (x.abs().sum(1, keepdim=True) + 1e-07))
x[torch.isnan(x)] = 0
return x |
def support_sz(sz):
def wrapper(f):
f.support_sz = sz
return f
return wrapper |
def parse_memlet_subset(array: data.Data, node: Union[(ast.Name, ast.Subscript)], das: Dict[(str, Any)], parsed_slice: Any=None) -> Tuple[(subsets.Range, List[int], List[int])]:
ndslice = [(0, (s - 1), 1) for s in array.shape]
extra_dims = []
arrdims: Dict[(int, str)] = {}
if isinstance(node, ast.Subscript):
if parsed_slice:
cnode = copy.copy(node)
cnode.slice = parsed_slice
else:
cnode = node
ast_ndslices = astutils.subscript_to_ast_slice_recursive(cnode)
offsets = list(range(len(array.shape)))
subset_array = []
for (idx, ast_ndslice) in enumerate(ast_ndslices):
narray = copy.deepcopy(array)
narray.shape = [s for (i, s) in enumerate(array.shape) if (i in offsets)]
(ndslice, offsets, new_extra_dims, arrdims) = _fill_missing_slices(das, ast_ndslice, narray, offsets)
if (new_extra_dims and (idx != (len(ast_ndslices) - 1))):
raise NotImplementedError('New axes only implemented for last slice')
if (arrdims and (len(ast_ndslices) != 1)):
raise NotImplementedError('Array dimensions not implemented for consecutive subscripts')
extra_dims = new_extra_dims
subset_array.append(_ndslice_to_subset(ndslice))
subset = subset_array[0]
for i in range(1, len(subset_array)):
subset = subset.compose(subset_array[i])
else:
subset = _ndslice_to_subset(ndslice)
return (subset, extra_dims, arrdims) |
def torch_nn_conv1d(self, input):
l_in = input.shape[(- 1)]
shape = None
padding = self.padding
if (padding == 'valid'):
padding = (0, 0)
if (padding == 'same'):
shape = list(input.shape)
if (shape is None):
shape = list(input.shape)
l_out = math.floor((((((l_in + (2 * padding[0])) - (self.dilation[0] * (self.kernel_size[0] - 1))) - 1) / self.stride[0]) + 1))
shape[(- 1)] = l_out
shape[(- 2)] = self.out_channels
return torch.empty(shape, device='meta') |
def parse_options(option, name, value, parser):
dest = option.dest
options = dict(getattr(parser.values, dest, {}))
for opt in value.split(','):
if ('=' in opt):
(n, v) = opt.split('=', 1)
v = (v.lower() not in ('false', 'f', '0', 'no'))
else:
(n, v) = (opt, True)
options[n] = v
setattr(parser.values, dest, options) |
class TestF77ReturnInteger(TestReturnInteger):
code = '\n function t0(value)\n integer value\n integer t0\n t0 = value\n end\n function t1(value)\n integer*1 value\n integer*1 t1\n t1 = value\n end\n function t2(value)\n integer*2 value\n integer*2 t2\n t2 = value\n end\n function t4(value)\n integer*4 value\n integer*4 t4\n t4 = value\n end\n function t8(value)\n integer*8 value\n integer*8 t8\n t8 = value\n end\n\n subroutine s0(t0,value)\n integer value\n integer t0\ncf2py intent(out) t0\n t0 = value\n end\n subroutine s1(t1,value)\n integer*1 value\n integer*1 t1\ncf2py intent(out) t1\n t1 = value\n end\n subroutine s2(t2,value)\n integer*2 value\n integer*2 t2\ncf2py intent(out) t2\n t2 = value\n end\n subroutine s4(t4,value)\n integer*4 value\n integer*4 t4\ncf2py intent(out) t4\n t4 = value\n end\n subroutine s8(t8,value)\n integer*8 value\n integer*8 t8\ncf2py intent(out) t8\n t8 = value\n end\n '
.slow
.parametrize('name', 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name)) |
def evaluate_weighting(dpr_dict, bm25_dict, qrels, output_dir, output_file, weight_dpr, weight_bm25, measurements):
run = {}
for query_id in dpr_dict.keys():
run.update({query_id: {}})
for doc in dpr_dict.get(query_id).keys():
run.get(query_id).update({doc: (weight_dpr * dpr_dict.get(query_id).get(doc)[1])})
for doc in bm25_dict.get(query_id).keys():
if run.get(query_id).get(doc):
run.get(query_id).update({doc: (run.get(query_id).get(doc) + (weight_bm25 * bm25_dict.get(query_id).get(doc)[1]))})
else:
run.get(query_id).update({doc: (weight_bm25 * bm25_dict.get(query_id).get(doc)[1])})
measures = ranking_eval(qrels, run, output_dir, output_file, measurements)
return (run, measures) |
class Normalize(nn.Module):
def __init__(self):
super(Normalize, self).__init__()
def forward(self, input):
return ((input - 0.5) / 0.5) |
def _impl(x, weight, ddof, axis, keepdims, mask_identity, highlevel, behavior, attrs):
axis = regularize_axis(axis)
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
(x_layout, weight_layout) = ensure_same_backend(ctx.unwrap(x, allow_record=False, primitive_policy='error'), ctx.unwrap(weight, allow_record=False, allow_unknown=False, primitive_policy='error', none_policy='pass-through'))
x = ctx.wrap(x_layout)
weight = ctx.wrap(weight_layout, allow_other=True)
with np.errstate(invalid='ignore', divide='ignore'):
out = ufuncs.sqrt(ak.operations.ak_var._impl(x, weight, ddof, axis, keepdims=True, mask_identity=True, highlevel=True, behavior=ctx.behavior, attrs=ctx.attrs))
if (not mask_identity):
out = ak.operations.fill_none(out, np.nan, axis=(- 1), behavior=ctx.behavior, highlevel=True, attrs=ctx.attrs)
if (axis is None):
if (not keepdims):
out = out[((0,) * out.ndim)]
elif (not keepdims):
posaxis = maybe_posaxis(out.layout, axis, 1)
out = out[(((slice(None, None),) * posaxis) + (0,))]
return ctx.wrap(maybe_highlevel_to_lowlevel(out), highlevel=highlevel, allow_other=True) |
class CaffeTransformer(ModelTransformer):
def __init__(self, model_name, model_def, model_data, input_shapes: list=[], output_names: list=[], preprocessor: dict={}):
super().__init__(model_name, model_def)
self.model_data = model_data
from transform.CaffeConverter import CaffeConverter
self.converter = CaffeConverter(self.model_name, self.model_def, model_data, input_shapes, output_names, preprocessor)
def origin_inference(self, inputs: dict):
from tools.model_runner import caffe_inference
return caffe_inference(inputs, self.model_def, self.model_data) |
def get_fuzzer_files(fuzzer: Fuzzer) -> Tuple[(List[Path], List[Path])]:
global ARGS
coverage_files = []
crash_files = []
fuzzer_root_dir = get_fuzzer_root(fuzzer)
assert fuzzer_root_dir
if (not fuzzer_root_dir.exists()):
return ([], [])
assert fuzzer_root_dir
if utils.fuzzer_has_subdir(FuzzerType(fuzzer)):
for subdir in fuzzer_root_dir.iterdir():
if subdir.is_dir():
if (subdir.parts[(- 1)] == 'autofz'):
continue
watcher.init_watcher(fuzzer, subdir)
else:
watcher.init_watcher(fuzzer, fuzzer_root_dir)
if (fuzzer not in watcher.WATCHERS):
return ([], [])
watchers = watcher.WATCHERS[fuzzer]
for w in watchers:
last_index = LAST_INDEX.get(w, (- 1))
queue_len = len(w.test_case_queue)
for i in range((last_index + 1), queue_len):
test_case_path = w.test_case_queue[i]
if w._ignore_test_case(test_case_path):
continue
seed_type = w._get_test_case_type(test_case_path)
if ((seed_type == SeedType.NORMAL) or (seed_type == SeedType.HANG)):
coverage_files.append(test_case_path)
elif (seed_type == SeedType.CRASH):
crash_files.append(test_case_path)
else:
assert False, 'unknow seed type'
LAST_INDEX[w] = queue_len
return (coverage_files, crash_files) |
class Payload(object):
def __init__(self, msg):
self._msg = msg
def raw(self):
return self._msg.payloadBytes[:]
def mic(self):
return None
def _calculateMIC(self):
return None
def verifyMIC(self):
currentMIC = self.mic
if (currentMIC is None):
return None
calculatedMIC = self._calculateMIC()
return (tuple(currentMIC) == tuple(calculatedMIC))
def updateMIC(self):
calculatedMIC = self._calculateMIC()
if (calculatedMIC is not None):
self.mic = calculatedMIC
def defaultPayload():
raise NotImplementedError()
def print(self, depth=0):
pad = (depth * ' ')
mic = self.mic
micString = ''
if (mic is not None):
micString = (((pad + 'MIC: ') + hexToStr(mic)) + ' ')
try:
v = self.verifyMIC()
micString += ('(verified)' if v else '(invalid)')
except MissingKeyException as ex:
micString += '(not verifyable, missing {})'.format(ex.keyname)
except Exception as ex:
micString += '(not verifyable: {})'.format(ex)
micString += '\n'
return micString |
class PAM_Module(nn.Module):
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim // 8), kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim // 8), kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(m_batchsize, C, height, width) = x.size()
proj_query = self.query_conv(x).view(m_batchsize, (- 1), (width * height)).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, (- 1), (width * height))
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, (- 1), (width * height))
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = ((self.gamma * out) + x)
return out |
def make_open3d_registration_feature(data):
feats = o3d.pipelines.registration.Feature()
feats.data = data.T
return feats |
def get_vmaf_test_sequence(frame_numbers: List[int]):
assert (len(camera_configs['siggraph_vmaf']) == 1)
return list(zip(itertools.repeat(camera_configs['siggraph_vmaf'][0]), frame_numbers[::3])) |
def load_data(args):
questions = []
answers = []
decoder = json.JSONDecoder()
if (args.dataset == 'gsm8k'):
with open(args.dataset_path) as f:
lines = f.readlines()
for line in lines:
json_res = decoder.raw_decode(line)[0]
questions.append(json_res['question'].strip())
answers.append(json_res['answer'].split('#### ')[(- 1)].replace(',', ''))
elif (args.dataset == 'aqua'):
with open(args.dataset_path) as f:
lines = f.readlines()
for line in lines:
json_res = decoder.raw_decode(line)[0]
qes = (json_res['question'].strip() + ' Answer Choices:')
for opt in json_res['options']:
opt = opt.replace(')', ') ')
qes += f' ({opt}'
questions.append(qes)
answers.append(json_res['correct'])
elif (args.dataset == 'svamp'):
with open(args.dataset_path) as f:
json_data = json.load(f)
for line in json_data:
q = ((line['Body'].strip() + ' ') + line['Question'].strip())
a = str(line['Answer'])
if (a[(- 2):] == '.0'):
a = a[:(- 2)]
questions.append(q)
answers.append(a)
elif (args.dataset == 'asdiv'):
with open(args.dataset_path) as f:
json_data = json.load(f)['Instances']
for line in json_data:
q = line['input'].strip()
a = line['output'][0]
questions.append(q)
answers.append(a)
elif (args.dataset in ('addsub', 'singleeq', 'multiarith')):
with open(args.dataset_path) as f:
json_data = json.load(f)
for line in json_data:
q = line['sQuestion'].strip()
a = str(line['lSolutions'][0])
if (a[(- 2):] == '.0'):
a = a[:(- 2)]
questions.append(q)
answers.append(a)
elif (args.dataset == 'csqa'):
with open(args.dataset_path) as f:
lines = f.readlines()
for line in lines:
json_res = decoder.raw_decode(line)[0]
choice = 'Answer Choices:'
for c in json_res['question']['choices']:
choice += ' ('
choice += c['label']
choice += ') '
choice += c['text']
questions.append(((json_res['question']['stem'].strip() + ' ') + choice))
answers.append(json_res['answerKey'])
elif (args.dataset == 'strategyqa'):
if ('task' in args.dataset_path):
with open(args.dataset_path) as f:
json_data = json.load(f)['examples']
for line in json_data:
q = line['input'].strip()
a = int(line['target_scores']['Yes'])
if (a == 1):
a = 'yes'
else:
a = 'no'
questions.append(q)
answers.append(a)
else:
with open(args.dataset_path, encoding='utf-8') as f:
json_data = json.load(f)
for line in json_data:
q = line['question'].strip()
if line['answer']:
a = 'yes'
else:
a = 'no'
questions.append(q)
answers.append(a)
elif (args.dataset in ('coin_flip', 'last_letters')):
with open(args.dataset_path) as f:
json_data = json.load(f)
json_data = json_data['examples']
for line in json_data:
q = line['question']
a = line['answer']
questions.append(q)
answers.append(a)
elif (args.dataset == 'time_zone'):
with open(args.dataset_path) as f:
json_data = json.load(f)
for line in json_data:
q = line['question'].strip()
a = line['answer']
questions.append(q)
answers.append(a)
else:
raise NotImplementedError
print(f'dataset: {args.dataset}')
print(f'dataset_size: {len(answers)}')
args.dataset_size = len(answers)
return (questions, answers) |
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if ('dropout' not in args._get_kwargs()):
args.dropout = 0
if ('alternate_corr' not in args._get_kwargs()):
args.alternate_corr = False
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=(hdim + cdim), norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=(hdim + cdim), norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
(N, C, H, W) = img.shape
coords0 = coords_grid(N, (H // 8), (W // 8)).to(img.device)
coords1 = coords_grid(N, (H // 8), (W // 8)).to(img.device)
return (coords0, coords1)
def upsample_flow(self, flow, mask):
(N, _, H, W) = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold((8 * flow), [3, 3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum((mask * up_flow), dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, (8 * H), (8 * W))
def forward(self, image1, image2, iters=12, flow_init=None, test_mode=True):
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
with autocast(enabled=self.args.mixed_precision):
(fmap1, fmap2) = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
(net, inp) = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
(coords0, coords1) = self.initialize_flow(image1)
if (flow_init is not None):
coords1 = (coords1 + flow_init)
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1)
flow = (coords1 - coords0)
with autocast(enabled=self.args.mixed_precision):
(net, up_mask, delta_flow) = self.update_block(net, inp, corr, flow)
coords1 = (coords1 + delta_flow)
if (up_mask is None):
flow_up = upflow8((coords1 - coords0))
else:
flow_up = self.upsample_flow((coords1 - coords0), up_mask)
flow_predictions.append(flow_up)
if test_mode:
return ((coords1 - coords0), flow_up)
return flow_predictions |
class FixedBatchSizeBatchSampler():
def __init__(self, data_source, batch_size: int, shuffle: bool=False, seed: int=) -> None:
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
if shuffle:
self.generator = torch.Generator()
self.sampler = RandomSampler(data_source, generator=self.generator)
else:
self.sampler = SequentialSampler(data_source)
def set_epoch(self, epoch: int) -> None:
if self.shuffle:
self.generator.manual_seed((self.seed + epoch))
def _evaluate_reduced_timestamps(self, batch_indices):
return self.reduce_func([self.timestamps[indice] for indice in batch_indices])
def __iter__(self):
batch_sampler = BatchSampler(self.sampler, batch_size=self.batch_size, drop_last=False)
return iter(batch_sampler)
def __len__(self):
return len(list(iter(self))) |
def read_answers(gold_file):
answers = {}
with tf.io.gfile.GFile(gold_file, 'r') as f:
for (i, line) in enumerate(f):
example = json.loads(line)
if ((i == 0) and ('header' in example)):
continue
for qa in example['qas']:
answers[qa['qid']] = qa['answers']
return answers |
_utils.test(arch=supported_archs_taichi_ndarray)
def test_different_shape():
n1 = 4
x = ti.ndarray(dtype=ti.f32, shape=(n1, n1))
def init(d: ti.i32, arr: ti.types.ndarray()):
for (i, j) in arr:
arr[(i, j)] = d
init(2, x)
assert (x.to_numpy() == (np.ones(shape=(n1, n1)) * 2)).all()
n2 = 8
y = ti.ndarray(dtype=ti.f32, shape=(n2, n2))
init(3, y)
assert (y.to_numpy() == (np.ones(shape=(n2, n2)) * 3)).all() |
def imconvert(img, src, dst):
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
out_img = cv2.cvtColor(img, code)
return out_img |
class TestLeftMatrixMinimization(unittest.TestCase):
def test_minimize(self):
power_signals_d = np.array([[0.0, 0.0, 0.0, 0.0], [1., 1., 0., 0.], [1., 1., 1., 1.], [1., 1., 1., 0.]])
rank_k = 4
weights = np.array([0.0, 0.0, 0., 0.])
tau = 0.9
mu_l = 500.0
initial_l_cs_value = np.array([[0., (- 0.), (- 0.), 0.], [0., (- 0.), (- 0.), 0.], [0., (- 0.), (- 0.), 0.], [0., (- 0.0822263), (- 0.), 0.]])
initial_r_cs_value = np.array([[7., 11., 11., 8.], [0., (- 1.), (- 1.), (- 1.)], [0., 0., 0., 0.453427], [(- 0.), (- 3.), (- 4.), (- 1.)]])
initial_beta_value = 0.0
initial_component_r0 = np.array([1., 2., 4., 5.])
expected_l_cs_value = np.array([[2.610888e-14, (- 1.027025e-14), 1.481367e-14, (- 1.786423e-14)], [0., (- 5.028329e-14), (- 4.090143e-14), 1.891483e-13], [0.1353818, (- 8.877942e-14), 4.614613e-15, (- 1.047267e-14)], [0.2030726, 1.49516e-13, 1.955246e-14, (- 1.573292e-13)]])
expected_r_cs_value = initial_r_cs_value
expected_beta_value = initial_beta_value
left_matrix_minimization = LeftMatrixMinimization(power_signals_d, rank_k, weights, tau, mu_l, solver_type='ECOS')
(actual_l_cs_value, actual_r_cs_value, actual_beta_value) = left_matrix_minimization.minimize(initial_l_cs_value, initial_r_cs_value, initial_beta_value, initial_component_r0)
np.testing.assert_almost_equal(actual_l_cs_value, expected_l_cs_value, decimal=6)
np.testing.assert_array_equal(actual_r_cs_value, expected_r_cs_value)
np.testing.assert_array_equal(actual_beta_value, expected_beta_value)
def test_minimize_with_large_data(self):
input_power_signals_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'three_years_power_signals_d_1.csv'))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
rank_k = 6
weights_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'three_years_weights.csv'))
with open(weights_file_path) as file:
weights = np.loadtxt(file, delimiter=',')
tau = 0.9
mu_l = 500.0
initial_l_cs_value_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'three_years_initial_l_cs_value.csv'))
with open(initial_l_cs_value_file_path) as file:
initial_l_cs_value = np.loadtxt(file, delimiter=',')
initial_r_cs_value_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'three_years_initial_r_cs_value.csv'))
with open(initial_r_cs_value_file_path) as file:
initial_r_cs_value = np.loadtxt(file, delimiter=',')
initial_beta_value = 0.0
l_cs_value_after_iteration_1_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'l_cs_value_after_left_matrix_minimization_iteration_1_NEW.csv'))
with open(l_cs_value_after_iteration_1_file_path) as file:
expected_l_cs_value = np.loadtxt(file, delimiter=',')
r_cs_value_after_iteration_1_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'r_cs_value_after_left_matrix_minimization_iteration_1.csv'))
with open(r_cs_value_after_iteration_1_file_path) as file:
expected_r_cs_value = np.loadtxt(file, delimiter=',')
expected_beta_value = initial_beta_value
initial_r0_value_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../fixtures/left_matrix_minimization', 'three_years_initial_component_r0.csv'))
with open(initial_r0_value_file_path) as file:
initial_component_r0_value = np.loadtxt(file, delimiter=',')
left_matrix_minimization = LeftMatrixMinimization(power_signals_d, rank_k, weights, tau, mu_l, solver_type='MOSEK')
try:
(actual_l_cs_value, actual_r_cs_value, actual_beta_value) = left_matrix_minimization.minimize(initial_l_cs_value, initial_r_cs_value, initial_beta_value, initial_component_r0_value)
except cvx.SolverError:
self.skipTest((('This test uses MOSEK solver' + 'because default ECOS solver fails with large data. ') + 'Unless MOSEK is installed, this test fails.'))
else:
np.testing.assert_array_almost_equal(actual_l_cs_value, expected_l_cs_value, decimal=2)
np.testing.assert_array_almost_equal(actual_r_cs_value, expected_r_cs_value, decimal=2)
np.testing.assert_array_almost_equal(actual_beta_value, expected_beta_value, decimal=2) |
class PQLinear(nn.Module):
def __init__(self, centroids, assignments, bias, in_features, out_features):
super(PQLinear, self).__init__()
self.block_size = centroids.size(1)
self.n_centroids = centroids.size(0)
self.in_features = in_features
self.out_features = out_features
if ((self.in_features % self.block_size) != 0):
raise ValueError('Wrong PQ sizes')
if ((len(assignments) % self.out_features) != 0):
raise ValueError('Wrong PQ sizes')
self.centroids = nn.Parameter(centroids, requires_grad=True)
self.register_buffer('assignments', assignments)
self.register_buffer('counts', torch.bincount(assignments).type_as(centroids))
if (bias is not None):
self.bias = nn.Parameter(bias)
else:
self.register_parameter('bias', None)
def weight(self):
return self.centroids[self.assignments].reshape((- 1), self.out_features, self.block_size).permute(1, 0, 2).flatten(1, 2)
def forward(self, x):
return F.linear(x, self.weight, self.bias)
def extra_repr(self):
return f'in_features={self.in_features}, out_features={self.out_features}, n_centroids={self.n_centroids}, block_size={self.block_size}, bias={(self.bias is not None)}' |
class InventoryManagementSystemTrackInventory(VirtualFunctionTool):
name = 'InventoryManagementSystemTrackInventory'
summary = 'Track inventory levels and receive notifications when stock levels are low.'
parameters: List[ArgParameter] = [{'name': 'threshold', 'type': 'integer', 'description': 'The stock level threshold, must be a positive integer.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'low_stock_items', 'type': 'array', 'description': "A list of objects containing the 'item_id', 'item_name', 'category', and 'quantity' of each item with stock levels below the threshold."}]
exceptions: List[ArgException] = [{'name': 'InvalidRequestException', 'description': "The 'threshold' parameter is not a positive integer."}] |
def mutate_size_func(info):
def mutate_size_func(parent_arch):
child_arch = deepcopy(parent_arch)
child_arch = child_arch.split(':')
index = random.randint(0, (len(child_arch) - 1))
child_arch[index] = str(random.choice(info['candidates']))
return ':'.join(child_arch)
return mutate_size_func |
class CompositionFilter(Filter):
def __init__(self, fs):
self.fs = fs
def __call__(self, x, update=True):
for f in self.fs:
x = f(x)
return x
def output_shape(self, input_space):
out = input_space.shape
for f in self.fs:
out = f.output_shape(out)
return out |
def Conv1x1BNReLU(in_channels, out_channels):
return nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)) |
def _read_fmt_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack((fmt + 'I'), fid.read(4))[0]
bytes_read = 0
if (size < 16):
raise ValueError('Binary structure of wave file is not compliant')
res = struct.unpack((fmt + 'HHIIHH'), fid.read(16))
bytes_read += 16
(format_tag, channels, fs, bytes_per_second, block_align, bit_depth) = res
if ((format_tag == WAVE_FORMAT_EXTENSIBLE) and (size >= (16 + 2))):
ext_chunk_size = struct.unpack((fmt + 'H'), fid.read(2))[0]
bytes_read += 2
if (ext_chunk_size >= 22):
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[(2 + 4):((2 + 4) + 16)]
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xaa\x008\x9bq'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xaa\x008\x9bq'
if raw_guid.endswith(tail):
format_tag = struct.unpack((fmt + 'I'), raw_guid[:4])[0]
else:
raise ValueError('Binary structure of wave file is not compliant')
if (format_tag not in KNOWN_WAVE_FORMATS):
raise ValueError('Unknown wave file format')
if (size > bytes_read):
fid.read((size - bytes_read))
return (size, format_tag, channels, fs, bytes_per_second, block_align, bit_depth) |
def evaluate_model(model, config, _logger, cuda_device, eval_tsv, eval_batch_count, use_cache=False):
model.eval()
validation_results = {}
fill_cache = False
cached_batches = None
try:
if use_cache:
global evaluate_cache
if (eval_tsv not in evaluate_cache):
fill_cache = True
evaluate_cache[eval_tsv] = []
cached_batches = evaluate_cache[eval_tsv]
if ((not use_cache) or fill_cache):
(validation_queue, validation_processes, validation_exit) = get_multiprocess_batch_queue('eval-batches', multiprocess_validation_loader, glob.glob(eval_tsv), config, _logger, queue_size=200)
_logger.info(('[eval_model] --- Start validation with queue.size:' + str(validation_queue.qsize())))
else:
_logger.info(('[eval_model] --- Start validation with cache size:' + str(len(cached_batches))))
with torch.no_grad():
for i in Tqdm.tqdm(range(0, eval_batch_count), disable=config['tqdm_disabled']):
if ((not use_cache) or fill_cache):
batch_orig = validation_queue.get()
if fill_cache:
cached_batches.append(batch_orig)
else:
batch_orig = cached_batches[i]
batch = move_to_device(copy.deepcopy(batch_orig), cuda_device)
output = model.forward(batch['query_tokens'], batch['doc_tokens'], batch['query_length'], batch['doc_length'])
output = output.cpu()
for (sample_i, sample_query_id) in enumerate(batch_orig['query_id']):
sample_query_id = int(sample_query_id)
sample_doc_id = int(batch_orig['doc_id'][sample_i])
if (sample_query_id not in validation_results):
validation_results[sample_query_id] = []
validation_results[sample_query_id].append((sample_doc_id, float(output[sample_i])))
if ((not use_cache) or fill_cache):
if (validation_queue.qsize() != 0):
_logger.error('validation_queue.qsize() is not empty after evaluation')
validation_exit.set()
except BaseException as e:
_logger.info(('-' * 89))
_logger.exception('[eval_model] Got exception: ')
print('----- Attention! - something went wrong in eval_model (see logger) ----- ')
if ((not use_cache) or fill_cache):
for proc in validation_processes:
if proc.is_alive():
proc.terminate()
raise e
return validation_results |
('grammar', 'idiom_ast')
class IdiomAstGrammar():
def __init__(self, base_grammar, template_file, root_type=None, all_sections_rewritten=False):
self.base_grammar = registry.construct('grammar', base_grammar)
self.templates = json.load(open(template_file))
self.all_sections_rewritten = all_sections_rewritten
self.pointers = self.base_grammar.pointers
self.ast_wrapper = copy.deepcopy(self.base_grammar.ast_wrapper)
self.base_ast_wrapper = self.base_grammar.ast_wrapper
self.root_type = self.base_grammar.root_type
if (base_grammar['name'] == 'python'):
self.root_type = 'mod'
singular_types_with_single_seq_field = set((name for (name, type_info) in self.ast_wrapper.singular_types.items() if ((len(type_info.fields) == 1) and type_info.fields[0].seq)))
seq_fields = {'{}-{}'.format(name, field.name): SeqField(name, field) for (name, type_info) in self.ast_wrapper.singular_types.items() for field in type_info.fields if field.seq}
templates_by_head_type = collections.defaultdict(list)
for template in self.templates:
head_type = template['idiom'][0]
if (head_type in singular_types_with_single_seq_field):
field = self.ast_wrapper.singular_types[head_type].fields[0]
templates_by_head_type[field.type].append((template, SeqField(head_type, field)))
templates_by_head_type[head_type].append((template, None))
elif (head_type in seq_fields):
seq_field = seq_fields[head_type]
templates_by_head_type[seq_field.field.type].append((template, seq_field))
else:
templates_by_head_type[head_type].append((template, None))
types_to_replace = {}
for (head_type, templates) in templates_by_head_type.items():
(constructors, seq_fragment_constructors) = ([], [])
for (template, seq_field) in templates:
if seq_field:
if (head_type in self.ast_wrapper.product_types):
seq_type = '{}_plus_templates'.format(head_type)
else:
seq_type = head_type
seq_fragment_constructors.append(self._template_to_constructor(template, '_{}_seq'.format(seq_type), seq_field))
else:
constructors.append(self._template_to_constructor(template, '', seq_field))
if (head_type in self.ast_wrapper.constructors):
assert constructors
assert (not seq_fragment_constructors)
self.ast_wrapper.add_constructors_to_sum_type(self.ast_wrapper.constructor_to_sum_type[head_type], constructors)
elif (head_type in self.ast_wrapper.sum_types):
assert (not constructors)
assert seq_fragment_constructors
self.ast_wrapper.add_seq_fragment_type(head_type, seq_fragment_constructors)
elif (head_type in self.ast_wrapper.product_types):
orig_prod_type = self.ast_wrapper.product_types[head_type]
new_constructor_for_prod_type = asdl.Constructor(name=head_type, fields=orig_prod_type.fields)
self.ast_wrapper.remove_product_type(head_type)
name = '{}_plus_templates'.format(head_type)
self.ast_wrapper.add_sum_type(name, asdl.Sum(types=(constructors + [new_constructor_for_prod_type])))
self.ast_wrapper.add_seq_fragment_type(name, seq_fragment_constructors)
types_to_replace[head_type] = name
elif (head_type in self.ast_wrapper.primitive_types):
raise NotImplementedError('built-in type as head type of idiom unsupported: {}'.format(head_type))
else:
raise NotImplementedError('Unable to handle head type of idiom: {}'.format(head_type))
for constructor_or_product in self.ast_wrapper.singular_types.values():
for field in constructor_or_product.fields:
if (field.type in types_to_replace):
field.type = types_to_replace[field.type]
self.templates_containing_placeholders = {}
for (name, constructor) in self.ast_wrapper.singular_types.items():
if (not hasattr(constructor, 'template')):
continue
hole_values = {}
for field in constructor.fields:
hole_id = self.get_hole_id(field.name)
placeholder = ast_util.HoleValuePlaceholder(id=hole_id, field_name=field.name, type=field.type, is_seq=field.seq, is_opt=field.opt)
if field.seq:
hole_values[hole_id] = [placeholder]
else:
hole_values[hole_id] = placeholder
self.templates_containing_placeholders[name] = constructor.template(hole_values)
if (root_type is not None):
if isinstance(root_type, (list, tuple)):
for choice in root_type:
if ((choice in self.ast_wrapper.singular_types) or (choice in self.ast_wrapper.sum_types)):
self.root_type = choice
break
else:
self.root_type = root_type
def parse(self, code, section):
if (self.all_sections_rewritten or (section == 'train')):
return self.convert_idiom_ast(code, template_id=None)()
else:
return self.base_grammar.parse(code, section)
def unparse(self, tree, item):
expanded_tree = self._expand_templates(tree)
self.base_ast_wrapper.verify_ast(expanded_tree)
return self.base_grammar.unparse(expanded_tree, item)
def tokenize_field_value(self, field_value):
return self.base_grammar.tokenize_field_value(field_value)
def get_hole_id(cls, field):
m = re.match('^hole(\\d+)$', field)
if (not m):
raise ValueError('Unexpected field name: {}'.format(field))
return int(m.group(1))
def _expand_templates(self, tree):
if (not isinstance(tree, dict)):
return tree
node_type = tree['_type']
constructor = self.ast_wrapper.constructors.get(node_type)
expanded_fields = {}
for (field, value) in tree.items():
if (field == '_type'):
continue
if isinstance(value, (list, tuple)):
result = []
for item in value:
converted = self._expand_templates(item)
if (isinstance(item, dict) and re.match('^Template\\d+_.*_seq$', item['_type'])):
item_type_info = self.ast_wrapper.constructors[converted['_type']]
assert (len(item_type_info.fields) == 1)
assert item_type_info.fields[0].seq
result += converted.get(item_type_info.fields[0].name, [])
else:
result.append(converted)
expanded_fields[field] = result
else:
expanded_fields[field] = self._expand_templates(value)
if ((constructor is None) or (not hasattr(constructor, 'template'))):
return {'_type': node_type, **expanded_fields}
template = constructor.template
hole_values = {}
for (field, expanded_value) in expanded_fields.items():
hole_id = self.get_hole_id(field)
hole_values[hole_id] = expanded_value
return template(hole_values)
def _template_to_constructor(self, template_dict, suffix, seq_field):
hole_node_types = {}
stack = [(None, template_dict['idiom'], None)]
while stack:
(parent, node, child_index) = stack.pop()
(node_type, ref_symbols, hole_id, children) = node
if (hole_id is not None):
assert (hole_id not in hole_node_types)
hyphenated_node_type = None
unhyphenated_node_type = None
hole_type_str = template_dict['holes'][hole_id]['type']
if (hole_type_str == 'AddChild'):
node_type_for_field_type = node_type
elif (hole_type_str == 'ReplaceSelf'):
if ('-' in node_type):
node_type_for_field_type = node_type
else:
node_type_for_field_type = parent[0]
field_info = self._get_field_info_from_name(node_type_for_field_type)
if (field_info.seq and (hole_type_str == 'ReplaceSelf') and ('-' not in node_type)):
assert (child_index in (0, 1))
seq = (child_index == 1)
else:
seq = field_info.seq
hole_node_types[hole_id] = (field_info.type, seq, field_info.opt)
stack += [(node, child, i) for (i, child) in enumerate(children)]
fields = []
for hole in template_dict['holes']:
i = hole['id']
(field_type, seq, opt) = hole_node_types[i]
field = asdl.Field(type=field_type, name='hole{}'.format(i), seq=seq, opt=opt)
field.hole_type = HoleType[hole['type']]
fields.append(field)
constructor = asdl.Constructor('Template{}{}'.format(template_dict['id'], suffix), fields)
constructor.template = self.convert_idiom_ast(template_dict['idiom'], template_id=template_dict['id'], seq_field=seq_field)
return constructor
def _get_field_info_from_name(self, node_type):
if ('-' in node_type):
(type_name, field_name) = node_type.split('-')
type_info = self.ast_wrapper.singular_types[type_name]
(field_info,) = [field for field in type_info.fields if (field.name == field_name)]
else:
type_info = self.ast_wrapper.singular_types[node_type]
assert (len(type_info.fields) == 1)
field_info = type_info.fields[0]
return field_info
def _node_type(cls, node):
if isinstance(node[0], dict):
if ('nt' in node[0]):
return node[0]['nt']
elif ('template_id' in node[0]):
return 'Template{}'.format(node[0]['template_id'])
else:
return node[0]
def convert_idiom_ast(self, idiom_ast, template_id=None, seq_fragment_type=None, seq_field=None):
if (template_id is not None):
(node_type, ref_symbols, hole, children) = idiom_ast
else:
(node_type, ref_symbols, children) = idiom_ast
is_template_node = False
extra_types = []
if isinstance(node_type, dict):
if seq_fragment_type:
suffix = '_{}_seq'.format(seq_fragment_type)
else:
suffix = ''
if ('template_id' in node_type):
node_type = 'Template{}{}'.format(node_type['template_id'], suffix)
is_template_node = True
elif (('nt' in node_type) and ('mt' in node_type)):
extra_types = ['Template{}{}'.format(i, suffix) for i in node_type['mt']]
node_type = node_type['nt']
if (seq_field is None):
field_infos = self.ast_wrapper.singular_types[node_type].fields
else:
field_infos = [seq_field.field]
children_to_convert = []
if is_template_node:
assert (len(children) == len(field_infos))
for (field, child) in zip(field_infos, children):
if ((field.hole_type == HoleType.ReplaceSelf) and field.seq):
children_to_convert.append((field, child))
else:
assert (not field.seq)
dummy_node = list(idiom_ast)
dummy_node[0] = '{}-{}'.format(node_type, field.name)
dummy_node[(- 1)] = [child]
children_to_convert.append((field, dummy_node))
else:
fields_by_name = {f.name: f for f in field_infos}
if (len(field_infos) == 0):
pass
elif (len(field_infos) == 1):
children_to_convert.append((field_infos[0], idiom_ast))
else:
prefix_len = (len(node_type) + 1)
for child in children:
field_name = self._node_type(child)[prefix_len:]
children_to_convert.append((fields_by_name[field_name], child))
assert (set((field.name for (field, _) in children_to_convert)) == set((field.name for field in field_infos)))
def result_creator(hole_values={}):
if ((template_id is not None) and (hole is not None) and (self.templates[template_id]['holes'][hole]['type'] == 'ReplaceSelf')):
return hole_values.get(hole, MissingValue)
result = {}
for (field, child_node) in children_to_convert:
if (field.type in self.ast_wrapper.primitive_types):
convert = (lambda node: (lambda hole_values: self.convert_builtin_type(field.type, self._node_type(node))))
else:
convert = functools.partial(self.convert_idiom_ast, template_id=template_id)
if field.seq:
value = []
while True:
if ((template_id is not None) and (child_node[2] is not None)):
hole_value = hole_values.get(child_node[2], [])
assert isinstance(hole_value, list)
value += hole_value
assert (len(child_node[(- 1)]) == 0)
break
(child_type, child_children) = (child_node[0], child_node[(- 1)])
if (isinstance(child_type, dict) and ('template_id' in child_type)):
value.append(convert(child_node, seq_fragment_type=(field.type if field.seq else None))(hole_values))
break
if (len(child_children) == 1):
assert (self._node_type(child_children[0]) == 'End')
break
elif (len(child_children) == 2):
value.append(convert(child_children[0])(hole_values))
child_node = child_children[1]
else:
raise ValueError('Unexpected number of children: {}'.format(len(child_children)))
present = bool(value)
elif field.opt:
if ((template_id is not None) and (child_node[2] is not None)):
assert (len(child_node[(- 1)]) == 0)
present = (child_node[2] in hole_values)
value = hole_values.get(child_node[2])
else:
assert (len(child_node[(- 1)]) == 1)
if (self._node_type(child_node[(- 1)][0]) == 'Null'):
value = None
present = False
else:
value = convert(child_node[(- 1)][0])(hole_values)
present = (value is not MissingValue)
elif ((template_id is not None) and (child_node[2] is not None)):
assert (len(child_node[(- 1)]) == 0)
value = hole_values[child_node[2]]
present = True
else:
assert (len(child_node[(- 1)]) == 1)
value = convert(child_node[(- 1)][0])(hole_values)
present = True
if present:
result[field.name] = value
result['_type'] = node_type
result['_extra_types'] = extra_types
return result
return result_creator
def convert_builtin_type(self, field_type, value):
if ((field_type == 'singleton') and (value == 'Null')):
return None
return value |
class SAUNet(nn.Module):
def __init__(self, num_classes=4, num_filters=32, pretrained=True, is_deconv=True):
super(SAUNet, self).__init__()
self.num_classes = num_classes
print('SAUNet w/ Shape Stream')
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.densenet121(pretrained=pretrained)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.c3 = nn.Conv2d(256, 1, kernel_size=1)
self.c4 = nn.Conv2d(512, 1, kernel_size=1)
self.c5 = nn.Conv2d(1024, 1, kernel_size=1)
self.d0 = nn.Conv2d(128, 64, kernel_size=1)
self.res1 = ResBlock(64, 64)
self.d1 = nn.Conv2d(64, 32, kernel_size=1)
self.res2 = ResBlock(32, 32)
self.d2 = nn.Conv2d(32, 16, kernel_size=1)
self.res3 = ResBlock(16, 16)
self.d3 = nn.Conv2d(16, 8, kernel_size=1)
self.fuse = nn.Conv2d(8, 1, kernel_size=1, padding=0, bias=False)
self.cw = nn.Conv2d(2, 1, kernel_size=1, padding=0, bias=False)
self.gate1 = gsc.GatedSpatialConv2d(32, 32)
self.gate2 = gsc.GatedSpatialConv2d(16, 16)
self.gate3 = gsc.GatedSpatialConv2d(8, 8)
self.expand = nn.Sequential(nn.Conv2d(1, num_filters, kernel_size=1), Norm2d(num_filters), nn.ReLU(inplace=True))
self.conv1 = nn.Sequential(self.encoder.features.conv0, self.encoder.features.norm0)
self.conv2 = self.encoder.features.denseblock1
self.conv2t = self.encoder.features.transition1
self.conv3 = self.encoder.features.denseblock2
self.conv3t = self.encoder.features.transition2
self.conv4 = self.encoder.features.denseblock3
self.conv4t = self.encoder.features.transition3
self.conv5 = nn.Sequential(self.encoder.features.denseblock4, self.encoder.features.norm5)
self.center = conv3x3_bn_relu(1024, ((num_filters * 8) * 2))
self.dec5 = DualAttBlock(inchannels=[512, 1024], outchannels=512)
self.dec4 = DualAttBlock(inchannels=[512, 512], outchannels=256)
self.dec3 = DualAttBlock(inchannels=[256, 256], outchannels=128)
self.dec2 = DualAttBlock(inchannels=[128, 128], outchannels=64)
self.dec1 = DecoderBlock(64, 48, num_filters, is_deconv)
self.dec0 = conv3x3_bn_relu((num_filters * 2), num_filters)
self.final = nn.Conv2d(num_filters, self.num_classes, kernel_size=1)
def forward(self, x, return_att=False):
x_size = x.size()
conv1 = self.conv1(x)
conv2 = self.conv2t(self.conv2(conv1))
conv3 = self.conv3t(self.conv3(conv2))
conv4 = self.conv4t(self.conv4(conv3))
conv5 = self.conv5(conv4)
ss = F.interpolate(self.d0(conv2), x_size[2:], mode='bilinear', align_corners=True)
ss = self.res1(ss)
c3 = F.interpolate(self.c3(conv3), x_size[2:], mode='bilinear', align_corners=True)
ss = self.d1(ss)
(ss, g1) = self.gate1(ss, c3)
ss = self.res2(ss)
ss = self.d2(ss)
c4 = F.interpolate(self.c4(conv4), x_size[2:], mode='bilinear', align_corners=True)
(ss, g2) = self.gate2(ss, c4)
ss = self.res3(ss)
ss = self.d3(ss)
c5 = F.interpolate(self.c5(conv5), x_size[2:], mode='bilinear', align_corners=True)
(ss, g3) = self.gate3(ss, c5)
ss = self.fuse(ss)
ss = F.interpolate(ss, x_size[2:], mode='bilinear', align_corners=True)
edge_out = self.sigmoid(ss)
im_arr = np.mean(x.cpu().numpy(), axis=1).astype(np.uint8)
canny = np.zeros((x_size[0], 1, x_size[2], x_size[3]))
for i in range(x_size[0]):
canny[i] = cv2.Canny(im_arr[i], 10, 100)
canny = torch.from_numpy(canny).cuda().float()
cat = torch.cat([edge_out, canny], dim=1)
acts = self.cw(cat)
acts = self.sigmoid(acts)
edge = self.expand(acts)
conv2 = F.interpolate(conv2, scale_factor=2, mode='bilinear', align_corners=True)
conv3 = F.interpolate(conv3, scale_factor=2, mode='bilinear', align_corners=True)
conv4 = F.interpolate(conv4, scale_factor=2, mode='bilinear', align_corners=True)
center = self.center(self.pool(conv5))
(dec5, att5) = self.dec5([center, conv5])
(dec4, att4) = self.dec4([dec5, conv4])
(dec3, att3) = self.dec3([dec4, conv3])
(dec2, att2) = self.dec2([dec3, conv2])
dec1 = self.dec1(dec2)
dec0 = self.dec0(torch.cat([dec1, edge], dim=1))
x_out = self.final(dec0)
att2 = F.interpolate(att2, scale_factor=2, mode='bilinear', align_corners=True)
att3 = F.interpolate(att3, scale_factor=4, mode='bilinear', align_corners=True)
att4 = F.interpolate(att4, scale_factor=8, mode='bilinear', align_corners=True)
att5 = F.interpolate(att5, scale_factor=16, mode='bilinear', align_corners=True)
if return_att:
return (x_out, edge_out, [att2, att3, att4, att5, g1, g2, g3])
return (x_out, edge_out)
def pad(self, x, y):
diffX = (y.shape[3] - x.shape[3])
diffY = (y.shape[2] - x.shape[2])
return nn.functional.pad(x, ((diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2)))) |
def log_agent(agent, file_path):
question = agent.question
g_truth = agent.key
correct = agent.is_correct()
reward = agent.reward()[0]
halted = agent.is_halted()
error = agent.run_error
prompt = agent._build_agent_prompt()
save_dict = {'question': question, 'answer': g_truth, 'correct': correct, 'reward': reward, 'halted': halted, 'error': error, 'prompt': prompt}
with open(file_path, 'a') as f:
json.dump(save_dict, f)
f.write('\n') |
def _format(val: Any, output_format: str='standard', split: bool=False, errors: str='coarse') -> Any:
val = str(val)
result: Any = []
if (val in NULL_VALUES):
return [np.nan]
if (not validate_ean(val)):
if (errors == 'raise'):
raise ValueError(f'Unable to parse value {val}')
error_result = (val if (errors == 'ignore') else np.nan)
return [error_result]
result = ([ean.compact(val)] + result)
return result |
def DFG_python(root_node, index_to_code, states):
assignment = ['assignment', 'augmented_assignment', 'for_in_clause']
if_statement = ['if_statement']
for_statement = ['for_statement']
while_statement = ['while_statement']
do_first_statement = ['for_in_clause']
def_statement = ['default_parameter']
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_python(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
if (root_node.type == 'for_in_clause'):
right_nodes = [root_node.children[(- 1)]]
left_nodes = [root_node.child_by_field_name('left')]
else:
if (root_node.child_by_field_name('right') is None):
return ([], states)
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
DFG = []
for node in right_nodes:
(temp, states) = DFG_python(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if (child.type not in ['elif_clause', 'else_clause']):
(temp, current_states) = DFG_python(child, index_to_code, current_states)
DFG += temp
else:
(temp, new_states) = DFG_python(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for i in range(2):
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
for node in right_nodes:
(temp, states) = DFG_python(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
if (root_node.children[(- 1)].type == 'block'):
(temp, states) = DFG_python(root_node.children[(- 1)], index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states) |
class CustomModel(torch.nn.Module):
def __init__(self, input_size, rnn_size=256, projection=128, layers=2):
super().__init__()
self.layers = torch.nn.ModuleList()
for i in range(layers):
self.layers.append(torch.nn.LSTM(input_size=(input_size if (i == 0) else projection), hidden_size=rnn_size, bidirectional=True))
linear_size = (input_size if (i == (layers - 1)) else projection)
self.layers.append(torch.nn.Linear(in_features=(rnn_size * 2), out_features=linear_size))
self.layers.append(torch.nn.ReLU())
def forward(self, x):
x = x.transpose(0, 1)
for layer in self.layers:
x = layer(x)
if isinstance(x, tuple):
x = x[0]
x = x.transpose(0, 1)
return x |
def UniversalTransformerEncoderWithLayer(layer=TransformerEncoderLayer):
return (lambda *args, **kwargs: UniversalTransformerEncoder(layer, *args, **kwargs)) |
class _Simplex(Constraint):
def check(self, value):
return ((value >= 0).all() & ((value.sum((- 1), True) - 1).abs() < 1e-06).all()) |
class cd():
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath) |
def QDM_45_7_1_1_9():
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing as AdditiveCyclic
G = AdditiveCyclic(45)
M = [[None, None, None, None, None, None, None, None, None], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 27, 16, 7, (- 1), (- 27), (- 16), (- 7), 3], [24, 40, 1, 35, (- 24), (- 40), (- 1), (- 35), 7], [10, 30, 22, 44, (- 10), (- 30), (- 22), (- 44), 7], [5, 18, 14, 33, (- 5), (- 18), (- 14), (- 33), 3], [30, 16, 33, 27, (- 30), (- 16), (- 33), (- 27), 0]]
Mb = []
for R in zip(*M):
for c in range(7):
Mb.append(cyclic_shift(R, c))
return (G, Mb) |
def superconduct(wd):
df = pd.read_csv((wd + 'superconduct.csv'), header=None)
df.columns = ([('X_' + str(i)) for i in range((len(df.columns) - 1))] + ['y'])
return df |
def check_sampling_strategy(sampling_strategy, y, sampling_type, **kwargs):
if (sampling_type not in SAMPLING_KIND):
raise ValueError(f"'sampling_type' should be one of {SAMPLING_KIND}. Got '{sampling_type} instead.")
if (np.unique(y).size <= 1):
raise ValueError(f"The target 'y' needs to have more than 1 class. Got {np.unique(y).size} class instead")
if (sampling_type in ('ensemble', 'bypass')):
return sampling_strategy
if isinstance(sampling_strategy, str):
if (sampling_strategy not in SAMPLING_TARGET_KIND.keys()):
raise ValueError(f"When 'sampling_strategy' is a string, it needs to be one of {SAMPLING_TARGET_KIND}. Got '{sampling_strategy}' instead.")
return OrderedDict(sorted(SAMPLING_TARGET_KIND[sampling_strategy](y, sampling_type).items()))
elif isinstance(sampling_strategy, dict):
return OrderedDict(sorted(_sampling_strategy_dict(sampling_strategy, y, sampling_type).items()))
elif isinstance(sampling_strategy, list):
return OrderedDict(sorted(_sampling_strategy_list(sampling_strategy, y, sampling_type).items()))
elif isinstance(sampling_strategy, Real):
if ((sampling_strategy <= 0) or (sampling_strategy > 1)):
raise ValueError(f"When 'sampling_strategy' is a float, it should be in the range (0, 1]. Got {sampling_strategy} instead.")
return OrderedDict(sorted(_sampling_strategy_float(sampling_strategy, y, sampling_type).items()))
elif callable(sampling_strategy):
sampling_strategy_ = sampling_strategy(y, **kwargs)
return OrderedDict(sorted(_sampling_strategy_dict(sampling_strategy_, y, sampling_type).items())) |
def test_landscape():
bds = np.array([[1, 1], [1, 2]])
ldsp = PersImage.to_landscape(bds)
np.testing.assert_array_equal(ldsp, [[1, 0], [1, 1]]) |
class OnsagerAlgebra(LieAlgebraWithGenerators, IndexedGenerators):
def __init__(self, R):
cat = LieAlgebras(R).WithBasis()
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
IndexedGenerators.__init__(self, FiniteEnumeratedSet([0, 1]))
LieAlgebraWithGenerators.__init__(self, R, index_set=self._indices, names=('A0', 'A1'), category=cat)
def _repr_(self):
return 'Onsager algebra over {}'.format(self.base_ring())
def _latex_(self):
from sage.misc.latex import latex
return '\\mathcal{{O}}_{{{}}}'.format(latex(self.base_ring()))
def _repr_generator(self, m):
if (m[0] == 0):
return 'A[{}]'.format(m[1])
return 'G[{}]'.format(m[1])
def _latex_generator(self, m):
if (m[0] == 0):
return 'A_{{{}}}'.format(m[1])
return 'G_{{{}}}'.format(m[1])
_repr_term = _repr_generator
_latex_term = _latex_generator
_method
def basis(self):
from sage.rings.integer_ring import ZZ
from sage.sets.disjoint_union_enumerated_sets import DisjointUnionEnumeratedSets
from sage.sets.positive_integers import PositiveIntegers
I = DisjointUnionEnumeratedSets([ZZ, PositiveIntegers()], keepkey=True, facade=True)
return Family(I, self.monomial, name='Onsager monomial')
_method
def lie_algebra_generators(self):
d = {'A0': self.basis()[(0, 0)], 'A1': self.basis()[(0, 1)]}
return Family(self._names, d.__getitem__)
def bracket_on_basis(self, x, y):
if (x[0] == 1):
return self.zero()
R = self.base_ring()
if (y[0] == 1):
d = {(0, (x[1] - y[1])): R((- 2)), (0, (x[1] + y[1])): R(2)}
return self.element_class(self, d)
return self.element_class(self, {(1, (y[1] - x[1])): (- R.one())})
def _an_element_(self):
B = self.basis()
return ((B[(0, 2)] - (2 * B[(0, (- 3))])) + (3 * B[(1, 2)]))
def some_elements(self):
B = self.basis()
return [B[(0, 0)], B[(0, 2)], B[(0, (- 1))], B[(1, 4)], self.an_element()]
def quantum_group(self, q=None, c=None):
if (q is None):
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
q = PolynomialRing(self.base_ring(), 'q').fraction_field().gen()
if (c is None):
c = q
else:
c = q.parent()(c)
return QuantumOnsagerAlgebra(self, q, c)
def alternating_central_extension(self):
return OnsagerAlgebraACE(self.base_ring())
Element = LieAlgebraElement |
def simGetObjectMatrix(objectHandle, relativeToObjectHandle):
matrix = ffi.new('float[12]')
ret = lib.simGetObjectMatrix(objectHandle, relativeToObjectHandle, matrix)
_check_return(ret)
return list(matrix) |
class TestCaseCoverageFunction(TestCaseChromosomeComputation, CoverageFunction, metaclass=abc.ABCMeta): |
def test_gaussian_filter():
data = numpy.array([1], dtype=numpy.float16)
sigma = 1.0
with assert_raises(RuntimeError):
ndimage.gaussian_filter(data, sigma) |
def ttest(A: dace.float32[(M, N, K)], B: dace.float32[(M, N, K)]):
s = np.ndarray(shape=(K, N, M), dtype=np.int32)
t = np.ndarray(A.shape, A.dtype)
for i in dace.map[0:M]:
for j in dace.map[0:N]:
for k in dace.map[0:K]:
s[(k, j, i)] = t[(i, j, k)]
t[(i, j, k)] = 1.0
s[(k, j, i)] = t[(i, j, k)]
t += (5 * A)
B -= t |
def preprocess_for_lm(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
assert (conversation_lib.default_conversation.version not in ('v1', 'mpt'))
conversations = []
for source in sources:
header = DEFAULT_CONVERSATION_HEADER
conversation = sentences_to_formatted_conversation(header, source)
conversations.append(conversation)
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized['input_ids']
targets = copy.deepcopy(input_ids)
for (target, source) in zip(targets, sources):
tokenized_lens = _tokenize_fn(([header] + [s['value'] for s in source]), tokenizer)['input_ids_lens']
speakers = [sentence['from'] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets) |
def print_hparams(hparams, skip_patterns=None, header=None):
if header:
print_out(('%s' % header))
values = hparams.values()
for key in sorted(values.keys()):
if ((not skip_patterns) or all([(skip_pattern not in key) for skip_pattern in skip_patterns])):
print_out((' %s=%s' % (key, str(values[key])))) |
def _minimize_level(G):
from sage.groups.matrix_gps.finitely_generated import MatrixGroup
from .congroup_gamma import Gamma_constructor as Gamma
Glist = list(G)
N = G.base_ring().characteristic()
i = Gamma(N).index()
for d in N.divisors()[:(- 1)]:
j = Gamma(d).index()
k = len([g for g in Glist if (g.matrix().change_ring(Zmod(d)) == 1)])
if (k == (i // j)):
if (d == 1):
return ZZ(1)
G = MatrixGroup([g.matrix().change_ring(Zmod(d)) for g in G.gens()])
N = d
break
new_gens = [x.matrix() for x in G.gens() if (x.matrix() != 1)]
all((x.set_immutable() for x in new_gens))
new_gens = list(Set(new_gens))
if (not new_gens):
return ZZ(N)
return MatrixGroup(new_gens) |
def check_readme(overwrite=False):
info = LOCALIZED_READMES['README.md']
(models, start_index, end_index, lines) = _find_text_in_file(os.path.join(REPO_PATH, 'README.md'), info['start_prompt'], info['end_prompt'])
models_in_readme = [re.search('\\*\\*\\[([^\\]]*)', line).groups()[0] for line in models.strip().split('\n')]
model_names_mapping = transformers_module.models.auto.configuration_auto.MODEL_NAMES_MAPPING
absents = [(key, name) for (key, name) in model_names_mapping.items() if (SPECIAL_MODEL_NAMES.get(name, name) not in models_in_readme)]
absents = [(key, name) for (key, name) in absents if (name not in MODELS_NOT_IN_README)]
if ((len(absents) > 0) and (not overwrite)):
print(absents)
raise ValueError("The main README doesn't contain all models, run `make fix-copies` to fill it with the missing model(s) then complete the generated entries.\nIf the model is not supposed to be in the main README, add it to the list `MODELS_NOT_IN_README` in utils/check_copies.py.\nIf it has a different name in the repo than in the README, map the correspondence in `SPECIAL_MODEL_NAMES` in utils/check_copies.py.")
new_models = [README_TEMPLATE.format(model_name=name, model_type=key) for (key, name) in absents]
all_models = (models.strip().split('\n') + new_models)
all_models = sorted(all_models, key=(lambda x: re.search('\\*\\*\\[([^\\]]*)', x).groups()[0].lower()))
all_models = ('\n'.join(all_models) + '\n')
if (all_models != models):
if overwrite:
print('Fixing the main README.')
with open(os.path.join(REPO_PATH, 'README.md'), 'w', encoding='utf-8', newline='\n') as f:
f.writelines(((lines[:start_index] + [all_models]) + lines[end_index:]))
else:
raise ValueError('The main README model list is not properly sorted. Run `make fix-copies` to fix this.') |
class MetaLearner(BaseMetaLearner):
def __init__(self, sampler, policy, baseline, optimizer, gamma=0.95, fast_lr=0.5, tau=1.0):
self.sampler = sampler
self.policy = policy
self.baseline = baseline
self.gamma = gamma
self.fast_lr = fast_lr
self.tau = tau
self.optimizer = optimizer
self.params = list()
def inner_loss(self, episodes, params=None):
values = self.baseline(episodes)
advantages = episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=episodes.mask)
pi = self.policy(episodes.observations, params=params)
log_probs = pi.log_prob(episodes.actions)
if (len(log_probs.shape) > 2):
log_probs = tf.reduce_sum(log_probs, axis=2)
loss = (- weighted_mean((log_probs * advantages), axis=0, weights=episodes.mask))
return loss
def adapt(self, episodes, first_order=False):
self.baseline.fit(episodes)
with tf.GradientTape() as tape:
loss = self.inner_loss(episodes)
grads = tape.gradient(loss, self.policy.get_trainable_variables())
params = self.policy.update_params(grads, step_size=self.fast_lr)
return params
def sample(self, tasks, first_order=False):
episodes = []
for (idx, task) in enumerate(tasks):
self.sampler.reset_task(task)
train_episodes = self.sampler.sample(self.policy, gamma=self.gamma)
params = self.adapt(train_episodes, first_order=first_order)
self.params.append(params)
valid_episodes = self.sampler.sample(self.policy, params=params, gamma=self.gamma)
episodes.append((train_episodes, valid_episodes))
return episodes
def surrogate_loss(self, episodes, old_pis=None):
(losses, kls, pis) = ([], [], [])
if (old_pis is None):
old_pis = ([None] * len(episodes))
for ((train_episodes, valid_episodes), old_pi) in zip(episodes, old_pis):
params = self.adapt(train_episodes)
pi = self.policy(valid_episodes.observations, params=params)
pis.append(detach_distribution(pi))
if (old_pi is None):
old_pi = detach_distribution(pi)
values = self.baseline(valid_episodes)
advantages = valid_episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=valid_episodes.mask)
log_ratio = (pi.log_prob(valid_episodes.actions) - old_pi.log_prob(valid_episodes.actions))
if (len(log_ratio.shape) > 2):
log_ratio = tf.reduce_sum(log_ratio, axis=2)
ratio = tf.exp(log_ratio)
loss = (- weighted_mean((ratio * advantages), axis=0, weights=valid_episodes.mask))
losses.append(loss)
mask = valid_episodes.mask
if (len(valid_episodes.actions.shape) > 2):
mask = tf.expand_dims(mask, axis=2)
kl = weighted_mean(old_pi.kl_divergence(pi), axis=0, weights=mask)
kls.append(kl)
mean_outer_kl = tf.reduce_mean(tf.stack(kls, axis=0))
meta_objective = tf.reduce_mean(tf.stack(losses, axis=0))
return (meta_objective, mean_outer_kl, pis)
def step(self, episodes):
train_vars = self.policy.get_trainable_variables()
with tf.GradientTape() as tape:
(old_loss, _, old_pis) = self.surrogate_loss(episodes)
grads = tape.gradient(old_loss, train_vars)
grads = flatgrad(grads, train_vars)
assert np.isfinite(grads).all(), 'gradient not finite'
self.optimizer.optimize(grads, episodes, self.params) |
def plot_SP_histogram_by_class(ax, spcorr, yhat, bins=30):
sprho = np.array([x[0] for x in spcorr])
sppval = np.array([x[1] for x in spcorr])
measures = {'pval_sig': {}, 'mean': {}, 'std': {}}
measures['pval_sig']['Overall'] = '{:.2f}'.format(((sppval <= 0.05).sum() / len(sppval)))
measures['mean']['Overall'] = np.mean(sprho)
measures['std']['Overall'] = np.std(sprho)
unique_y = None
if ((len(yhat.shape) == 1) or (yhat.shape[1] == 1)):
yhat = yhat.flatten()
yhat = np.round(yhat)
unique_y = np.sort(np.unique(yhat))
if ((unique_y is not None) and (len(unique_y) < 4)):
for y in unique_y:
rho = sprho[(yhat == y)]
pval = sppval[(yhat == y)]
measures['pval_sig'][str(int(y))] = '{:.2f}'.format(((pval <= 0.05).sum() / len(pval)))
measures['mean'][str(int(y))] = np.mean(rho)
measures['std'][str(int(y))] = np.std(rho)
ax.hist(rho, bins=bins, range=((- 1.0), 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=(np.ones(len(rho)) / len(rho)))
else:
ax.hist(sprho, bins=bins, range=((- 1.0), 1.0), alpha=0.6, linewidth=0.5, edgecolor='k', weights=(np.ones(len(sprho)) / len(sprho)))
return pd.DataFrame(measures) |
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = ((labels >= 0) & (labels != ignore_index))
inds = torch.nonzero((valid_mask & (labels < label_channels)), as_tuple=False)
if (inds.numel() > 0):
bin_labels[(inds, labels[inds])] = 1
valid_mask = valid_mask.view((- 1), 1).expand(labels.size(0), label_channels).float()
if (label_weights is None):
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view((- 1), 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return (bin_labels, bin_label_weights, valid_mask) |
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False
len_ = len(toks)
idx = start_idx
sql = {}
if (toks[idx] == '('):
isBlock = True
idx += 1
(from_end_idx, table_units, conds, default_tables) = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
(_, select_col_units) = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
(idx, where_conds) = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
(idx, group_col_units) = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
(idx, having_conds) = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
(idx, order_col_units) = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
(idx, limit_val) = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert (toks[idx] == ')')
idx += 1
idx = skip_semicolon(toks, idx)
for op in SQL_OPS:
sql[op] = None
if ((idx < len_) and (toks[idx] in SQL_OPS)):
sql_op = toks[idx]
idx += 1
(idx, IUE_sql) = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return (idx, sql) |
def convex_combination(classes, TP, TOP, P, class_name, modified=False):
try:
class_number = len(classes)
alpha = 1
if (class_number == 2):
alpha = 0
matrix_sum = sum(list(TOP.values()))
TP_sum = sum(list(TP.values()))
up = (TOP[class_name] + P[class_name])
down = (2 * matrix_sum)
if modified:
down -= (alpha * TP_sum)
up -= TP[class_name]
return (up / down)
except Exception:
return 'None' |
class OAuth2ClientCredentialsAuthorizationDef(BaseDef):
type: str = Field('OAuth2', const=True)
grant_type: str = Field('ClientCredentials', const=True)
token_server_url: str
def build(self, req_data: Dict[(str, Any)], params: Dict[(str, Any)], storage: Optional[Dict[(str, Any)]]=None) -> None:
if (storage is None):
raise ValueError('storage is required for OAuth2')
if (('access_token' not in storage) or (storage.get('expires_at', 0) < time())):
validate_auth({'client_id', 'client_secret'}, params)
ckey = params['client_id']
csecret = params['client_secret']
b64cred = b64encode(f'{ckey}:{csecret}'.encode('ascii')).decode()
headers = {'Authorization': f'Basic {b64cred}', 'Content-Type': 'application/x-www-form-urlencoded'}
params = {'grant_type': 'client_credentials'}
requests = Request(self.token_server_url)
response = requests.post(_headers=headers, _data=params)
resp: Dict[(str, Any)] = json.loads(response.read())
if (resp['token_type'].lower() != 'bearer'):
raise RuntimeError('token_type is not bearer')
access_token = resp['access_token']
storage['access_token'] = access_token
if ('expires_in' in resp):
storage['expires_at'] = ((time() + resp['expires_in']) - 60)
req_data['headers']['Authorization'] = f"Bearer {storage['access_token']}" |
def get_erased_3D_path_blocks(path, item=AIR):
return {(pos[0], pos[2], pos[1]): item for pos in path} |
def reset_session():
if _is_tf_1():
K.clear_session()
else:
tf.keras.backend.clear_session() |
class PermissionsException(SkyplaneException):
def pretty_print_str(self):
err = f'[bold][red]PermissionsException: {str(self)}[/red][/bold]'
return err |
def extract_ljspeech(data_folder, splits, kmeans_folder, encoder, layer, save_folder, sample_rate=16000, skip_extract=False):
logger = setup_logger()
if skip_extract:
return
conf = {'data_folder': data_folder, 'splits': splits, 'save_folder': save_folder, 'kmeans_folder': kmeans_folder, 'encoder': encoder, 'layer': layer}
save_folder = pl.Path(save_folder)
if skip(splits, save_folder, conf):
logger.info('Skipping code extraction, completed in previous run.')
return
device = get_device(use_cuda=True)
save_opt = (save_folder / OPT_FILE)
data_folder = pl.Path(data_folder)
kmeans_folder = pl.Path(kmeans_folder)
kmeans_ckpt = (kmeans_folder / 'kmeans.ckpt')
encoder_save_path = (kmeans_folder / 'pretrained_models')
code_folder = (save_folder / 'codes')
code_folder.mkdir(parents=True, exist_ok=True)
logger.info(f'Loading encoder: {encoder} ...')
encoder = HuggingFaceWav2Vec2(encoder, encoder_save_path.as_posix(), output_all_hiddens=True, output_norm=False, freeze_feature_extractor=True, freeze=True).to(device)
logger.info(f'Loading K-means model from {kmeans_ckpt} ...')
kmeans_model = joblib.load(open(kmeans_ckpt, 'rb'))
kmeans_model.verbose = False
for split in splits:
dataset_path = (data_folder / f'{split}.json')
logger.info(f'Reading dataset from {dataset_path} ...')
meta_json = json.load(open(dataset_path))
for key in tqdm(meta_json.keys()):
item = meta_json[key]
wav = item['wav']
with torch.no_grad():
info = torchaudio.info(wav)
audio = sb.dataio.dataio.read_audio(wav)
audio = torchaudio.transforms.Resample(info.sample_rate, sample_rate)(audio)
audio = audio.unsqueeze(0).to(device)
feats = encoder.extract_features(audio)
feats = feats[layer]
feats = np_array(feats)
pred = kmeans_model.predict(feats)
np.save((code_folder / f'{key}.npy'), pred)
logger.info('Extraction completed.')
save_pkl(conf, save_opt) |
def test__reset_cache_for_result():
test_case = MagicMock()
result = MagicMock(test_case_chromosomes=[test_case])
with mock.patch.object(test_case, 'invalidate_cache') as test_case_cache_mock:
with mock.patch.object(test_case, 'remove_last_execution_result') as test_case_result_mock:
with mock.patch.object(result, 'invalidate_cache') as result_cache_mock:
gen._reset_cache_for_result(result)
result_cache_mock.assert_called_once()
test_case_cache_mock.assert_called_once()
test_case_result_mock.assert_called_once() |
def save_dataset(dataset: xr.Dataset, outdir: os.PathLike, use_hdf5: Optional[bool]=True, job_type: Optional[str]=None, **kwargs) -> Path:
if use_hdf5:
fname = ('dataset.h5' if (job_type is None) else f'{job_type}_data.h5')
outfile = Path(outdir).joinpath(fname)
try:
dataset_to_h5pyfile(outfile, dataset=dataset, **kwargs)
except TypeError:
log.warning('Unable to save as `.h5` file, falling back to `netCDF4`')
save_dataset(dataset, outdir=outdir, use_hdf5=False, job_type=job_type, **kwargs)
else:
fname = ('dataset.nc' if (job_type is None) else f'{job_type}_dataset.nc')
outfile = Path(outdir).joinpath(fname)
mode = ('a' if outfile.is_file() else 'w')
log.info(f'Saving dataset to: {outfile.as_posix()}')
outfile.parent.mkdir(exist_ok=True, parents=True)
dataset.to_netcdf(outfile.as_posix(), mode=mode)
return outfile |
def lower_abbreviation_in_string(string_to_format: str):
tokens_opening = string_to_format.split('[')
valid_string = True
final_string = tokens_opening[0]
for tok in tokens_opening[1:]:
if (len(tok) > 1):
token_closing = tok.split(']')
if (len(token_closing) == 2):
final_string += ((('[' + token_closing[0].lower()) + ']') + token_closing[1])
else:
valid_string = False
else:
final_string += ']'
if valid_string:
return final_string
else:
return '' |
class SENet(nn.Module):
def __init__(self, channel, type_of_connection=BasicLearningBlock):
super(SENet, self).__init__()
self.attention = SEBlock(channel, 16)
def forward(self, feature, mask):
(_, _, w, _) = feature.size()
(_, _, mw, _) = mask.size()
mask = torch.round(F.avg_pool2d(mask, 2, stride=(mw // w)))
result = self.attention(feature)
return result |
class RouterNetTopo(Topo):
ALL_GROUP = 'groups'
ASYNC = 'async'
BSM_NODE = 'BSMNode'
GROUP = 'group'
IP = 'ip'
IS_PARALLEL = 'is_parallel'
LOOKAHEAD = 'lookahead'
MEET_IN_THE_MID = 'meet_in_the_middle'
MEMO_ARRAY_SIZE = 'memo_size'
PORT = 'port'
PROC_NUM = 'process_num'
QUANTUM_ROUTER = 'QuantumRouter'
def __init__(self, conf_file_name: str):
self.bsm_to_router_map = {}
super().__init__(conf_file_name)
def _load(self, filename):
with open(filename, 'r') as fh:
config = load(fh)
self._get_templates(config)
if (not config[self.IS_PARALLEL]):
self._add_qconnections(config)
self._add_timeline(config)
self._map_bsm_routers(config)
self._add_nodes(config)
self._add_bsm_node_to_router()
self._add_qchannels(config)
self._add_cchannels(config)
self._add_cconnections(config)
self._generate_forwarding_table(config)
def _add_timeline(self, config):
stop_time = config.get(Topo.STOP_TIME, float('inf'))
if config.get(self.IS_PARALLEL, False):
raise Exception("Please install 'psequence' package for parallel simulations.")
else:
self.tl = Timeline(stop_time)
def _map_bsm_routers(self, config):
for qc in config[Topo.ALL_Q_CHANNEL]:
(src, dst) = (qc[Topo.SRC], qc[Topo.DST])
if (dst in self.bsm_to_router_map):
self.bsm_to_router_map[dst].append(src)
else:
self.bsm_to_router_map[dst] = [src]
def _add_nodes(self, config):
for node in config[Topo.ALL_NODE]:
seed = node[Topo.SEED]
node_type = node[Topo.TYPE]
name = node[Topo.NAME]
template_name = node.get(Topo.TEMPLATE, None)
template = self.templates.get(template_name, {})
if (node_type == self.BSM_NODE):
others = self.bsm_to_router_map[name]
node_obj = BSMNode(name, self.tl, others, component_templates=template)
elif (node_type == self.QUANTUM_ROUTER):
memo_size = node.get(self.MEMO_ARRAY_SIZE, 0)
node_obj = QuantumRouter(name, self.tl, memo_size, component_templates=template)
else:
raise ValueError("Unknown type of node '{}'".format(node_type))
node_obj.set_seed(seed)
if (node_type in self.nodes):
self.nodes[node_type].append(node_obj)
else:
self.nodes[node_type] = [node_obj]
def _add_bsm_node_to_router(self):
for bsm in self.bsm_to_router_map:
(r0_str, r1_str) = self.bsm_to_router_map[bsm]
r0 = self.tl.get_entity_by_name(r0_str)
r1 = self.tl.get_entity_by_name(r1_str)
if (r0 is not None):
r0.add_bsm_node(bsm, r1_str)
if (r1 is not None):
r1.add_bsm_node(bsm, r0_str)
def _add_qconnections(self, config):
for q_connect in config.get(Topo.ALL_QC_CONNECT, []):
node1 = q_connect[Topo.CONNECT_NODE_1]
node2 = q_connect[Topo.CONNECT_NODE_2]
attenuation = q_connect[Topo.ATTENUATION]
distance = (q_connect[Topo.DISTANCE] // 2)
channel_type = q_connect[Topo.TYPE]
cc_delay = []
for cc in config.get(self.ALL_C_CHANNEL, []):
if ((cc[self.SRC] == node1) and (cc[self.DST] == node2)):
delay = cc.get(self.DELAY, (cc.get(self.DISTANCE, 1000) / 0.0002))
cc_delay.append(delay)
elif ((cc[self.SRC] == node2) and (cc[self.DST] == node1)):
delay = cc.get(self.DELAY, (cc.get(self.DISTANCE, 1000) / 0.0002))
cc_delay.append(delay)
for cc in config.get(self.ALL_CC_CONNECT, []):
if (((cc[self.CONNECT_NODE_1] == node1) and (cc[self.CONNECT_NODE_2] == node2)) or ((cc[self.CONNECT_NODE_1] == node2) and (cc[self.CONNECT_NODE_2] == node1))):
delay = cc.get(self.DELAY, (cc.get(self.DISTANCE, 1000) / 0.0002))
cc_delay.append(delay)
if (len(cc_delay) == 0):
assert 0, q_connect
cc_delay = (mean(cc_delay) // 2)
if (channel_type == self.MEET_IN_THE_MID):
bsm_name = 'BSM.{}.{}.auto'.format(node1, node2)
bsm_seed = q_connect.get(Topo.SEED, 0)
bsm_template_name = q_connect.get(Topo.TEMPLATE, None)
bsm_info = {self.NAME: bsm_name, self.TYPE: self.BSM_NODE, self.SEED: bsm_seed, self.TEMPLATE: bsm_template_name}
config[self.ALL_NODE].append(bsm_info)
for src in [node1, node2]:
qc_name = 'QC.{}.{}'.format(src, bsm_name)
qc_info = {self.NAME: qc_name, self.SRC: src, self.DST: bsm_name, self.DISTANCE: distance, self.ATTENUATION: attenuation}
if (self.ALL_Q_CHANNEL not in config):
config[self.ALL_Q_CHANNEL] = []
config[self.ALL_Q_CHANNEL].append(qc_info)
cc_name = 'CC.{}.{}'.format(src, bsm_name)
cc_info = {self.NAME: cc_name, self.SRC: src, self.DST: bsm_name, self.DISTANCE: distance, self.DELAY: cc_delay}
if (self.ALL_C_CHANNEL not in config):
config[self.ALL_C_CHANNEL] = []
config[self.ALL_C_CHANNEL].append(cc_info)
cc_name = 'CC.{}.{}'.format(bsm_name, src)
cc_info = {self.NAME: cc_name, self.SRC: bsm_name, self.DST: src, self.DISTANCE: distance, self.DELAY: cc_delay}
config[self.ALL_C_CHANNEL].append(cc_info)
else:
raise NotImplementedError('Unknown type of quantum connection')
def _generate_forwarding_table(self, config):
graph = Graph()
for node in config[Topo.ALL_NODE]:
if (node[Topo.TYPE] == self.QUANTUM_ROUTER):
graph.add_node(node[Topo.NAME])
costs = {}
if config[self.IS_PARALLEL]:
for qc in config[self.ALL_Q_CHANNEL]:
(router, bsm) = (qc[self.SRC], qc[self.DST])
if (bsm not in costs):
costs[bsm] = [router, qc[self.DISTANCE]]
else:
costs[bsm] = ([router] + costs[bsm])
costs[bsm][(- 1)] += qc[self.DISTANCE]
else:
for qc in self.qchannels:
(router, bsm) = (qc.sender.name, qc.receiver)
if (bsm not in costs):
costs[bsm] = [router, qc.distance]
else:
costs[bsm] = ([router] + costs[bsm])
costs[bsm][(- 1)] += qc.distance
graph.add_weighted_edges_from(costs.values())
for src in self.nodes[self.QUANTUM_ROUTER]:
for dst_name in graph.nodes:
if (src.name == dst_name):
continue
try:
if (dst_name > src.name):
path = dijkstra_path(graph, src.name, dst_name)
else:
path = dijkstra_path(graph, dst_name, src.name)[::(- 1)]
next_hop = path[1]
routing_protocol = src.network_manager.protocol_stack[0]
routing_protocol.add_forwarding_rule(dst_name, next_hop)
except exception.NetworkXNoPath:
pass |
def __getattr__(name):
return _sub_module_deprecation(sub_package='constants', module='codata', private_modules=['_codata'], all=__all__, attribute=name) |
def test_Interval():
assert (Interval(0, oo) == Interval(0, oo, False, True))
assert (Interval((- oo), 0) == Interval((- oo), 0, True, False))
assert (Interval(oo, (- oo)) == EmptySet())
assert (Interval(oo, oo) == EmptySet())
assert (Interval((- oo), (- oo)) == EmptySet())
assert isinstance(Interval(1, 1), FiniteSet)
assert (Interval(1, 0) == EmptySet())
assert (Interval(1, 1, False, True) == EmptySet())
assert (Interval(1, 1, True, False) == EmptySet())
assert (Interval(1, 1, True, True) == EmptySet())
assert (Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)) |
class MLP(torch.nn.Module):
def __init__(self, num_mlp_layers=5, emb_dim=300, drop_ratio=0):
super(MLP, self).__init__()
self.num_mlp_layers = num_mlp_layers
self.emb_dim = emb_dim
self.drop_ratio = drop_ratio
module_list = [torch.nn.Linear(2048, self.emb_dim), torch.nn.BatchNorm1d(self.emb_dim), torch.nn.ReLU(), torch.nn.Dropout(p=self.drop_ratio)]
for i in range((self.num_mlp_layers - 1)):
module_list += [torch.nn.Linear(self.emb_dim, self.emb_dim), torch.nn.BatchNorm1d(self.emb_dim), torch.nn.ReLU(), torch.nn.Dropout(p=self.drop_ratio)]
module_list += [torch.nn.Linear(self.emb_dim, 1)]
self.mlp = torch.nn.Sequential(*module_list)
def forward(self, x):
output = self.mlp(x)
if self.training:
return output
else:
return torch.clamp(output, min=0, max=50) |
def printHistogram(items, title='Items'):
items.sort(key=(lambda item: item[1]))
maxValue = max([v for (_, v) in items])
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = (inc * (10 ** power))
N = int(math.ceil((maxValue / barH)))
if (N > 10):
break
elif (inc == 1):
power -= 1
histo = [set() for i in range(N)]
for (name, v) in items:
bin = min(int(((N * v) / maxValue)), (N - 1))
histo[bin].add(name)
barW = 40
hr = ('-' * (barW + 34))
print(('\nSlowest %s:' % title))
print(hr)
for (name, value) in items[(- 20):]:
print(('%.2fs: %s' % (value, name)))
print(('\n%s Times:' % title))
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, (3 - pDigits))
if pfDigits:
pDigits += (pfDigits + 1)
cDigits = int(math.ceil(math.log(len(items), 10)))
print(('[%s] :: [%s] :: [%s]' % ('Range'.center((((pDigits + 1) * 2) + 3)), 'Percentage'.center(barW), 'Count'.center(((cDigits * 2) + 1)))))
print(hr)
for (i, row) in enumerate(histo):
pct = (float(len(row)) / len(items))
w = int((barW * pct))
print(('[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]' % (pDigits, pfDigits, (i * barH), pDigits, pfDigits, ((i + 1) * barH), ('*' * w), (' ' * (barW - w)), cDigits, len(row), cDigits, len(items)))) |
('/data/<id>', methods=['GET'])
def get_item(id):
response = table.get_item(Key={'id': id})
if ('Item' in response):
return jsonify(response['Item'])
else:
return ('Item not found', 404) |
def dump_candidates_file_for_split(split):
CacheBackend.init_cache_backend('webqsp')
OntologyInfo.init_ontology_info()
if (split == 'test'):
generate_candidate_file('test')
elif (split == 'pdev'):
generate_candidate_file('pdev', use_gt_entities=False, lnk_split='train')
elif (split == 'ptrain'):
generate_candidate_file('ptrain', use_gt_entities=True, lnk_split='train')
elif (split == 'train'):
generate_candidate_file('train', use_gt_entities=True)
else:
raise RuntimeError('invalid split')
CacheBackend.exit_cache_backend() |
def DFG_go(root_node, index_to_code, states):
assignment = ['assignment_statement']
def_statement = ['var_spec']
increment_statement = ['inc_statement']
if_statement = ['if_statement', 'else']
for_statement = ['for_statement']
enhanced_for_statement = []
while_statement = []
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type == 'string')) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_go(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = root_node.child_by_field_name('left')
right_nodes = root_node.child_by_field_name('right')
DFG = []
(temp, states) = DFG_go(right_nodes, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(left_nodes, index_to_code)
value_indexs = tree_to_variable_index(right_nodes, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_go(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_go(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in states:
if (key not in new_states):
new_states[key] = states[key]
else:
new_states[key] += states[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
elif (child.type == 'for_clause'):
if (child.child_by_field_name('update') is not None):
(temp, states) = DFG_go(child.child_by_field_name('update'), index_to_code, states)
DFG += temp
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states) |
def convert_trax_checkpoint_to_pytorch(trax_model_pkl_path, config_file, pytorch_dump_path):
config = ReformerConfig.from_json_file(config_file)
print(f'Building PyTorch model from configuration: {config}')
model = ReformerModelWithLMHead(config)
with open(trax_model_pkl_path, 'rb') as f:
model_weights = pickle.load(f)['weights']
set_model_weights_in_torch(model_weights, model, config.hidden_size)
print(f'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict(), pytorch_dump_path) |
_seed
.slow
.parametrize('num_steps, acquisition_rule', [pytest.param(25, DiscreteThompsonSampling(1000, 8), id='DiscreteThompsonSampling'), pytest.param(25, EfficientGlobalOptimization(ParallelContinuousThompsonSampling(), num_query_points=4), id='ParallelContinuousThompsonSampling'), pytest.param(12, EfficientGlobalOptimization(GreedyContinuousThompsonSampling(), num_query_points=4), id='GreedyContinuousThompsonSampling', marks=pytest.mark.skip(reason='too fragile'))])
def test_bayesian_optimizer_with_dgp_finds_minima_of_scaled_branin(num_steps: int, acquisition_rule: AcquisitionRule[(TensorType, SearchSpace, DeepGaussianProcess)]) -> None:
_test_optimizer_finds_minimum(DeepGaussianProcess, num_steps, acquisition_rule, optimize_branin=True) |
def read_train_test_directory_to_image(directory, image_shape=(128, 64)):
reshape_fn = ((lambda x: x) if (image_shape == IMAGE_SHAPE[:2]) else (lambda x: cv2.resize(x, image_shape[::(- 1)])))
(filenames, ids, camera_indices, tracklet_indices) = read_train_test_directory_to_str(directory)
images = np.zeros((((len(filenames),) + image_shape) + (3,)), np.uint8)
for (i, filename) in enumerate(filenames):
if ((i % 1000) == 0):
print(('Reading %s, %d / %d' % (directory, i, len(filenames))))
image = cv2.imread(filename, cv2.IMREAD_COLOR)
images[i] = reshape_fn(image)
ids = np.asarray(ids, dtype=np.int64)
camera_indices = np.asarray(camera_indices, dtype=np.int64)
tracklet_indices = np.asarray(tracklet_indices, dtype=np.int64)
return (images, ids, camera_indices, tracklet_indices) |
class MyDataset(Dataset):
def __init__(self, data, label):
self.data = data
self.label = label
def __getitem__(self, index):
return (torch.tensor(self.data[index], dtype=torch.float), torch.tensor(self.label[index], dtype=torch.long))
def __len__(self):
return len(self.data) |
_module()
class TPNHead(TSNHead):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if (self.spatial_type == 'avg'):
self.avg_pool3d = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
self.avg_pool3d = None
self.avg_pool2d = None
self.new_cls = None
def _init_new_cls(self):
self.new_cls = nn.Conv3d(self.in_channels, self.num_classes, 1, 1, 0)
if next(self.fc_cls.parameters()).is_cuda:
self.new_cls = self.new_cls.cuda()
self.new_cls.weight.copy_(self.fc_cls.weight[(..., None, None, None)])
self.new_cls.bias.copy_(self.fc_cls.bias)
def forward(self, x, num_segs=None, fcn_test=False):
if fcn_test:
if self.avg_pool3d:
x = self.avg_pool3d(x)
if (self.new_cls is None):
self._init_new_cls()
cls_score_feat_map = self.new_cls(x)
return cls_score_feat_map
if (self.avg_pool2d is None):
kernel_size = (1, x.shape[(- 2)], x.shape[(- 1)])
self.avg_pool2d = nn.AvgPool3d(kernel_size, stride=1, padding=0)
if (num_segs is None):
x = self.avg_pool3d(x)
else:
x = self.avg_pool2d(x)
x = x.reshape((((- 1), num_segs) + x.shape[1:]))
x = self.consensus(x)
x = x.squeeze(1)
if (self.dropout is not None):
x = self.dropout(x)
x = x.view(x.size(0), (- 1))
cls_score = self.fc_cls(x)
return cls_score |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear((512 * block.expansion), num_classes)
self.linear1 = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out1 = self.layer1(out)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out5 = F.avg_pool2d(out4, 4)
out5 = out5.view(out5.size(0), (- 1))
out = self.linear(out5)
out_cons = self.linear1(out5)
return (out, out_cons, out5, [out1, out2, out3, out4]) |
def calc_same_padding(kernel_size):
pad = (kernel_size // 2)
return (pad, (pad - ((kernel_size + 1) % 2))) |
def load_original_image(path_images, filename):
jpg_name = Path((str(filename)[:(- 4)] + '.jpg'))
x = open_image((path_images / jpg_name))
return x |
class TFSeq2SeqSequenceClassifierOutput(ModelOutput):
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
decoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
decoder_attentions: Optional[Tuple[tf.Tensor]] = None
encoder_last_hidden_state: Optional[tf.Tensor] = None
encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None
encoder_attentions: Optional[Tuple[tf.Tensor]] = None |
.skipif(longdouble_longer_than_double, reason='BUG #2376')
.skipif(string_to_longdouble_inaccurate, reason='Need strtold_l')
def test_format():
o = (1 + LD_INFO.eps)
assert_(('{0:.40g}'.format(o) != '1')) |
def build_variated_query(string, ranges_and_utterances):
variated_string = ''
current_ix = 0
for (rng, u) in ranges_and_utterances:
start = rng[START]
end = rng[END]
variated_string += string[current_ix:start]
variated_string += u
current_ix = end
variated_string += string[current_ix:]
return variated_string |
.parametrize('value, expected', (('On', True), ('F', False), ('/tmp/cert.pem', '/tmp/cert.pem')))
def test_convert_request_tls_verify(value, expected):
assert (callbacks.convert_boolean_string(None, None, value) == expected) |
class SSLViT(nn.Module):
def __init__(self, cfg):
super(SSLViT, self).__init__()
if ('prompt' in cfg.MODEL.TRANSFER_TYPE):
prompt_cfg = cfg.MODEL.PROMPT
else:
prompt_cfg = None
if ((cfg.MODEL.TRANSFER_TYPE != 'end2end') and ('prompt' not in cfg.MODEL.TRANSFER_TYPE)):
self.froze_enc = True
else:
self.froze_enc = False
if (cfg.MODEL.TRANSFER_TYPE == 'adapter'):
adapter_cfg = cfg.MODEL.ADAPTER
else:
adapter_cfg = None
self.build_backbone(prompt_cfg, cfg, adapter_cfg)
self.cfg = cfg
self.setup_side()
self.setup_head(cfg)
def setup_side(self):
if (self.cfg.MODEL.TRANSFER_TYPE != 'side'):
self.side = None
else:
self.side_alpha = nn.Parameter(torch.tensor(0.0))
m = models.alexnet(pretrained=True)
self.side = nn.Sequential(OrderedDict([('features', m.features), ('avgpool', m.avgpool)]))
self.side_projection = nn.Linear(9216, self.feat_dim, bias=False)
def setup_head(self, cfg):
self.head = MLP(input_dim=self.feat_dim, mlp_dims=(([self.feat_dim] * self.cfg.MODEL.MLP_NUM) + [cfg.DATA.NUMBER_CLASSES]), special_bias=True)
def build_backbone(self, prompt_cfg, cfg, adapter_cfg):
if ('moco' in cfg.DATA.FEATURE):
build_fn = build_mocov3_model
elif ('mae' in cfg.DATA.FEATURE):
build_fn = build_mae_model
(self.enc, self.feat_dim) = build_fn(cfg.DATA.FEATURE, cfg.DATA.CROPSIZE, prompt_cfg, cfg.MODEL.MODEL_ROOT, adapter_cfg=adapter_cfg)
transfer_type = cfg.MODEL.TRANSFER_TYPE
if (transfer_type == 'partial-1'):
total_layer = len(self.enc.blocks)
for (k, p) in self.enc.named_parameters():
if (('blocks.{}'.format((total_layer - 1)) not in k) and ('fc_norm' not in k) and (k != 'norm')):
p.requires_grad = False
elif (transfer_type == 'partial-2'):
total_layer = len(self.enc.blocks)
for (k, p) in self.enc.named_parameters():
if (('blocks.{}'.format((total_layer - 1)) not in k) and ('blocks.{}'.format((total_layer - 2)) not in k) and ('fc_norm' not in k) and (k != 'norm')):
p.requires_grad = False
elif (transfer_type == 'partial-4'):
total_layer = len(self.enc.blocks)
for (k, p) in self.enc.named_parameters():
if (('blocks.{}'.format((total_layer - 1)) not in k) and ('blocks.{}'.format((total_layer - 2)) not in k) and ('blocks.{}'.format((total_layer - 3)) not in k) and ('blocks.{}'.format((total_layer - 4)) not in k) and ('fc_norm' not in k) and (k != 'norm')):
p.requires_grad = False
elif ((transfer_type == 'linear') or (transfer_type == 'sidetune')):
for (k, p) in self.enc.named_parameters():
p.requires_grad = False
elif (transfer_type == 'tinytl-bias'):
for (k, p) in self.enc.named_parameters():
if ('bias' not in k):
p.requires_grad = False
elif (transfer_type == 'prompt+bias'):
for (k, p) in self.enc.named_parameters():
if (('prompt' not in k) and ('bias' not in k)):
p.requires_grad = False
elif ((transfer_type == 'prompt') and (prompt_cfg.LOCATION == 'below')):
for (k, p) in self.enc.named_parameters():
if (('prompt' not in k) and ('patch_embed.proj.weight' not in k) and ('patch_embed.proj.bias' not in k)):
p.requires_grad = False
elif (transfer_type == 'prompt'):
for (k, p) in self.enc.named_parameters():
if ('prompt' not in k):
p.requires_grad = False
elif (transfer_type == 'end2end'):
logger.info('Enable all parameters update during training')
elif (transfer_type == 'adapter'):
for (k, p) in self.enc.named_parameters():
if ('adapter' not in k):
p.requires_grad = False
else:
raise ValueError('transfer type {} is not supported'.format(transfer_type))
for (k, p) in self.enc.named_parameters():
if ('gate' in k):
p.requires_grad = True
if ('temp' in k):
p.requires_grad = True
def forward(self, x, return_feature=False):
if (self.side is not None):
side_output = self.side(x)
side_output = side_output.view(side_output.size(0), (- 1))
side_output = self.side_projection(side_output)
if (self.froze_enc and self.enc.training):
self.enc.eval()
x = self.enc(x)
if (self.side is not None):
alpha_squashed = torch.sigmoid(self.side_alpha)
x = ((alpha_squashed * x) + ((1 - alpha_squashed) * side_output))
if return_feature:
return (x, x)
x = self.head(x)
return x
def forward_cls_layerwise(self, x):
cls_embeds = self.enc.forward_cls_layerwise(x)
return cls_embeds
def get_features(self, x):
x = self.enc(x)
return x |
def test_sac():
config = make_sac_agent(args=Namespace(env='InvertedPendulum-v2', tb='', parent_folder='/tmp/mrl', layers=(32, 1), num_envs=1, num_eval_envs=1, device='cpu'))
agent = mrl.config_to_agent(config)
agent.train(num_steps=1)
assert (len(agent.eval(num_episodes=1).rewards) == 1) |
def register_conv_template(template: Conversation, override: bool=False):
if (not override):
assert (template.name not in conv_templates), f'{template.name} has been registered.'
conv_templates[template.name] = template |
(gxpacket_spec)
class GXPacket(object):
def __init__(self, location, direction, energy_rf, energy_cmf, nu_rf, nu_cmf, status, shell, time_current):
self.location = location
self.direction = direction
self.energy_rf = energy_rf
self.energy_cmf = energy_cmf
self.nu_rf = nu_rf
self.nu_cmf = nu_cmf
self.status = status
self.shell = shell
self.time_current = time_current
self.tau = (- np.log(np.random.random()))
def get_location_r(self):
return np.sqrt((((self.location[0] ** 2.0) + (self.location[1] ** 2.0)) + (self.location[2] ** 2.0))) |
def update_config_with_experiment_setting(config: OmegaConf, **kwargs) -> OmegaConf:
if ('k_shots' in kwargs.keys()):
config.project.k_shots = kwargs['k_shots']
if ('runs' in kwargs.keys()):
config.project.runs = kwargs['runs']
if (('coreset_ratio' in kwargs.keys()) and (kwargs['coreset_ratio'] is not None)):
config.model.coreset.apply = True
config.model.coreset.coreset_sampling_ratio = kwargs['coreset_ratio']
if ('number_transforms' in kwargs.keys()):
config.project.number_transforms = kwargs['number_transforms']
if ('dataset' in kwargs.keys()):
config.dataset.name = kwargs['dataset']
if (config.dataset.name == 'visa'):
config.dataset.path = './datasets/VisA/'
elif (config.dataset.name == 'mvtec'):
config.dataset.path = './datasets/mvtec'
else:
raise ValueError('Dataset {config.dataset.name} not supported')
if ('augment' in kwargs.keys()):
config.project.augment = kwargs['augment']
if ('number_transforms' in kwargs.keys()):
config.project.number_transforms = kwargs['number_transforms']
if ('batch_size' in kwargs.keys()):
config.project.batch_size = kwargs['batch_size']
if (('image_size' in kwargs.keys()) and (kwargs['image_size'] is not None)):
config.dataset.image_size = kwargs['image_size']
return config |
def get_nonspade_norm_layer(opt, norm_type='instance'):
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
else:
subnorm_type = norm_type
if ((subnorm_type == 'none') or (len(subnorm_type) == 0)):
return layer
if (getattr(layer, 'bias', None) is not None):
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if (subnorm_type == 'batch'):
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif (subnorm_type == 'sync_batch'):
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif (subnorm_type == 'instance'):
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
elif (subnorm_type == 'instanceaffine'):
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=True)
else:
raise ValueError(('normalization layer %s is not recognized' % subnorm_type))
return nn.Sequential(layer, norm_layer)
return add_norm_layer |
def save(w, file_name=None, file_write_mode='w', L2norm_fractional_tolerance=1e-10, log_frame=None, shuffle_widths=default_shuffle_widths):
return rpdmb.save(w, file_name=file_name, file_write_mode=file_write_mode, L2norm_fractional_tolerance=L2norm_fractional_tolerance, log_frame=log_frame, shuffle_widths=shuffle_widths, diff=xor, formats=sxs_formats) |
def test_combine_mul_float_tensors():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor([1.0, 2.0, 3.0])
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32')
result = (a * b)
result_alt1 = rf.combine(a, '*', b)
result_alt2 = rf.combine(a, 'mul', b)
assert (result.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 6.0]))
assert (result_alt1.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 6.0]))
assert (result_alt2.raw_tensor.tolist() == pytest.approx([2.0, 4.0, 6.0]))
assert (result.dtype == result_alt1.dtype == result_alt2.dtype == 'float32') |
def get_mask_fast(inp: str, bad_words=neg_complex_tokens, min_bad_score=0, aggressive=True):
sentences = [tokenizer.encode(inp, add_special_tokens=True)]
sentences_torch = torch.tensor(sentences)
masks = torch.zeros_like(sentences_torch)
for (sent_id, sent) in enumerate(sentences):
for (first_tok_id, tok) in enumerate(sent):
for hypothesis in bad_words.get(tok, []):
if (sent[first_tok_id:(first_tok_id + len(hypothesis))] == hypothesis):
for step in range(len(hypothesis)):
masks[(sent_id, (first_tok_id + step))] = 1
if ((sum(masks[sent_id].numpy()) == 0) or aggressive):
scored_words = []
for (indices, word) in toks_to_words(sent):
score = word2coef.get(word)
if score:
scored_words.append([indices, word, score])
if scored_words:
max_score = max((s[2] for s in scored_words))
if (max_score > min_bad_score):
for (indices, word, score) in scored_words:
if (score >= max(min_bad_score, (max_score * 0.5))):
masks[(sent_id, indices)] = 1
return (sentences_torch, masks) |
_utils.test()
def test_redefining_template_args():
def foo(a: ti.template()):
a = 5
with pytest.raises(ti.TaichiSyntaxError, match='Kernel argument "a" is immutable in the kernel'):
foo(1) |
class NoMask(Layer):
def __init__(self, **kwargs):
super(NoMask, self).__init__(**kwargs)
def build(self, input_shape):
super(NoMask, self).build(input_shape)
def call(self, x, mask=None, **kwargs):
return x
def compute_mask(self, inputs, mask):
return None |
def fht(a, dln, mu, offset=0.0, bias=0.0):
xp = array_namespace(a)
n = a.shape[(- 1)]
if (bias != 0):
j_c = ((n - 1) / 2)
j = xp.arange(n, dtype=xp.float64)
a = (a * xp.exp((((- bias) * (j - j_c)) * dln)))
u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias))
A = _fhtq(a, u, xp=xp)
if (bias != 0):
A *= xp.exp(((- bias) * (((j - j_c) * dln) + offset)))
return A |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.