code stringlengths 101 5.91M |
|---|
def register_Ns3RrcConnectionReconfigurationCompleteHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::RrcConnectionReconfigurationCompleteHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'bIterator')], is_virtual=True)
cls.add_method('GetMessage', 'ns3::LteRrcSap::RrcConnectionReconfigurationCompleted', [], is_const=True)
cls.add_method('GetRrcTransactionIdentifier', 'uint8_t', [], is_const=True)
cls.add_method('PreSerialize', 'void', [], is_const=True, is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('SetMessage', 'void', [param('ns3::LteRrcSap::RrcConnectionReconfigurationCompleted', 'msg')])
return |
class Datagen_tree():
def __init__(self, X, Y, batch_size, code_dic, nl_dic, train=True, binary=False):
self.X = X
self.Y = Y
self.batch_size = batch_size
self.code_dic = code_dic
self.nl_dic = nl_dic
self.train = train
self.binary = binary
def __len__(self):
return len(range(0, len(self.X), self.batch_size))
def __call__(self, epoch=0):
return GeneratorLen(BackgroundGenerator(self.gen(epoch), 1), len(self))
def gen(self, epoch):
if self.train:
np.random.seed(epoch)
newindex = list(np.random.permutation(len(self.X)))
X = [self.X[i] for i in newindex]
Y = [self.Y[i] for i in newindex]
else:
X = [x for x in self.X]
Y = [y for y in self.Y]
for i in range(0, len(self.X), self.batch_size):
x = X[i:(i + self.batch_size)]
y = Y[i:(i + self.batch_size)]
x_raw = [read_pickle(n) for n in x]
if self.binary:
x_raw = tree2binary(x_raw)
y_raw = [[self.nl_dic[t] for t in s] for s in y]
x = [consult_tree(n, self.code_dic) for n in x_raw]
x_raw = [traverse_label(n) for n in x_raw]
y = tf.keras.preprocessing.sequence.pad_sequences(y, min(max([len(s) for s in y]), 100), padding='post', truncating='post', value=(- 1.0))
(yield (tree2tensor(x), y, x_raw, y_raw)) |
def accuracy_ent(network, loader, weights, device, adapt=False):
correct = 0
total = 0
weights_offset = 0
ent = 0
network.eval()
with torch.no_grad():
for (x, y) in loader:
x = x.to(device)
y = y.to(device)
if (adapt is None):
p = network(x)
else:
p = network(x, adapt)
if (weights is None):
batch_weights = torch.ones(len(x))
else:
batch_weights = weights[weights_offset:(weights_offset + len(x))]
weights_offset += len(x)
batch_weights = batch_weights.to(device)
if (p.size(1) == 1):
correct += (p.gt(0).eq(y).float() * batch_weights.view((- 1), 1)).sum().item()
else:
correct += (p.argmax(1).eq(y).float() * batch_weights).sum().item()
total += batch_weights.sum().item()
ent += softmax_entropy(p).sum().item()
network.train()
return ((correct / total), (ent / total)) |
class Sampler():
def __init__(self, sp_i_train):
random.seed(42)
self._train = sp_i_train
def step(self, users: int, batch_size: int):
train = self._train
shuffled_list = random.sample(range(users), users)
for start_idx in range(0, users, batch_size):
end_idx = min((start_idx + batch_size), users)
(yield train[shuffled_list[start_idx:end_idx]].toarray()) |
class BatchGenerator():
def __init__(self, weather_data, val_ratio, test_ratio, normalize_flag, params):
self.weather_data = weather_data
self.val_ratio = val_ratio
self.test_ratio = test_ratio
self.dataset_params = params
self.normalize_flag = normalize_flag
if self.normalize_flag:
self.normalizer = AdaptiveNormalizer(output_dim=params['output_dim'])
else:
self.normalizer = None
self.data_dict = self.__split_data(self.weather_data)
self.dataset_dict = self.__create_sets()
def __split_data(self, in_data):
data_len = len(in_data)
val_count = int((data_len * self.val_ratio))
test_count = int((data_len * self.test_ratio))
train_count = ((data_len - val_count) - test_count)
data_dict = {'train': in_data[:train_count], 'val': in_data[train_count:(train_count + val_count)], 'train_val': in_data[:(train_count + val_count)], 'test': (in_data[(train_count + val_count):] if (test_count > 0) else None)}
return data_dict
def __create_sets(self):
hurricane_dataset = {}
for i in ['train', 'val', 'train_val', 'test']:
if (self.data_dict[i] is not None):
dataset = WeatherDataset(weather_data=self.data_dict[i], normalizer=self.normalizer, **self.dataset_params)
hurricane_dataset[i] = dataset
else:
hurricane_dataset[i] = None
return hurricane_dataset
def num_iter(self, dataset_name):
return self.dataset_dict[dataset_name].num_iter
def generate(self, dataset_name):
selected_loader = self.dataset_dict[dataset_name]
(yield from selected_loader.next()) |
class FileBatchGenerator(BatchGenerator):
def __init__(self, file, n_jobs: int=1, batch_size: Optional[int]=None, read_csv_params: dict=None):
super().__init__(batch_size, n_jobs)
self.file = file
(self.offsets, self.cnts) = get_file_offsets(file, n_jobs, batch_size)
if (read_csv_params is None):
read_csv_params = {}
self.read_csv_params = read_csv_params
def __len__(self) -> int:
return len(self.cnts)
def __getitem__(self, idx):
return FileBatch(self.file, self.offsets[idx], self.cnts[idx], self.read_csv_params) |
class DNNLowPChannelShuffleOpsTest(hu.HypothesisTestCase):
(channels_per_group=st.integers(min_value=1, max_value=5), groups=st.sampled_from([1, 4, 8, 9]), n=st.integers(0, 2), order=st.sampled_from(['NCHW', 'NHWC']), **hu.gcs_cpu_only)
(max_examples=10, deadline=None)
def test_channel_shuffle(self, channels_per_group, groups, n, order, gc, dc):
X = np.round((np.random.rand(n, (channels_per_group * groups), 5, 6) * 255)).astype(np.float32)
if (n != 0):
X[(0, 0, 0, 0)] = 0
X[(0, 0, 0, 1)] = 255
if (order == 'NHWC'):
X = utils.NCHW2NHWC(X)
net = core.Net('test_net')
quantize = core.CreateOperator('Quantize', ['X'], ['X_q'], engine='DNNLOWP')
channel_shuffle = core.CreateOperator('ChannelShuffle', ['X_q'], ['Y_q'], group=groups, kernel=1, order=order, engine='DNNLOWP')
dequantize = core.CreateOperator('Dequantize', ['Y_q'], ['Y'], engine='DNNLOWP')
net.Proto().op.extend([quantize, channel_shuffle, dequantize])
workspace.FeedBlob('X', X)
workspace.RunNetOnce(net)
Y = workspace.FetchBlob('Y')
def channel_shuffle_ref(X):
if (order == 'NHWC'):
X = utils.NHWC2NCHW(X)
Y_r = X.reshape(X.shape[0], groups, (X.shape[1] // groups), X.shape[2], X.shape[3])
Y_trns = Y_r.transpose((0, 2, 1, 3, 4))
Y_reshaped = Y_trns.reshape(X.shape)
if (order == 'NHWC'):
Y_reshaped = utils.NCHW2NHWC(Y_reshaped)
return Y_reshaped
Y_ref = channel_shuffle_ref(X)
np.testing.assert_allclose(Y, Y_ref)
(channels_per_group=st.integers(min_value=32, max_value=128), n=st.integers(0, 2), **hu.gcs_cpu_only)
(max_examples=10, deadline=None)
def test_channel_shuffle_fast_path(self, channels_per_group, n, gc, dc):
order = 'NHWC'
groups = 4
X = np.round((np.random.rand(n, (channels_per_group * groups), 5, 6) * 255)).astype(np.float32)
if (n != 0):
X[(0, 0, 0, 0)] = 0
X[(0, 0, 0, 1)] = 255
X = utils.NCHW2NHWC(X)
net = core.Net('test_net')
quantize = core.CreateOperator('Quantize', ['X'], ['X_q'], engine='DNNLOWP')
channel_shuffle = core.CreateOperator('ChannelShuffle', ['X_q'], ['Y_q'], group=groups, kernel=1, order=order, engine='DNNLOWP')
dequantize = core.CreateOperator('Dequantize', ['Y_q'], ['Y'], engine='DNNLOWP')
net.Proto().op.extend([quantize, channel_shuffle, dequantize])
workspace.FeedBlob('X', X)
workspace.RunNetOnce(net)
Y = workspace.FetchBlob('Y')
def channel_shuffle_ref(X):
if (order == 'NHWC'):
X = utils.NHWC2NCHW(X)
Y_r = X.reshape(X.shape[0], groups, (X.shape[1] // groups), X.shape[2], X.shape[3])
Y_trns = Y_r.transpose((0, 2, 1, 3, 4))
Y_reshaped = Y_trns.reshape(X.shape)
if (order == 'NHWC'):
Y_reshaped = utils.NCHW2NHWC(Y_reshaped)
return Y_reshaped
Y_ref = channel_shuffle_ref(X)
np.testing.assert_allclose(Y, Y_ref) |
def force_image_sizes(dataset, image_size=(96, 96)):
reshape_images = (lambda image, label: (tf.image.resize(image, image_size), label))
dataset = dataset.map(reshape_images, num_parallel_calls=AUTO)
return dataset |
class BlurFunction(Function):
def forward(ctx, input, kernel, kernel_flip):
ctx.save_for_backward(kernel, kernel_flip)
output = F.conv2d(input, kernel, padding=1, groups=input.shape[1])
return output
def backward(ctx, grad_output):
(kernel, kernel_flip) = ctx.saved_tensors
grad_input = BlurFunctionBackward.apply(grad_output, kernel, kernel_flip)
return (grad_input, None, None) |
def random_new_basis_modp(N, p, k, LWBModp, TotalBasisModp, elldash, bound):
R = LWBModp[0][0].parent()
if (k == 0):
TotalBasisModp[(0, 0)] = 1
return [[]]
di = dimension_modular_forms(N, k)
diminus1 = dimension_modular_forms(N, (k - (p - 1)))
mi = (di - diminus1)
NewBasisCode = []
rk = diminus1
for i in range(1, (mi + 1)):
while (rk < (diminus1 + i)):
exps = random_solution((bound // 2), (k // 2))
TotalBasisi = R(1)
TotalBasisiCode = []
for j in range(len(exps)):
for l in range(exps[j]):
a = ZZ.random_element(len(LWBModp[j]))
TotalBasisi = (TotalBasisi * LWBModp[j][a])
TotalBasisiCode.append([j, a])
TotalBasisModp[rk] = [TotalBasisi[j] for j in range(elldash)]
TotalBasisModp.echelonize()
rk = TotalBasisModp.rank()
NewBasisCode.append(TotalBasisiCode)
return NewBasisCode |
_numpy_output(validation_func=(lambda a: (- a)))
def test_ufunc_negative_u(A: dace.uint32[10]):
return np.negative(A) |
def download_file_from_google_drive(id, destination):
def get_confirm_token(response):
for (key, value) in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in tqdm(response.iter_content(CHUNK_SIZE)):
if chunk:
f.write(chunk)
URL = '
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination) |
def get_worker_runtimes(jobs, num_jobs=None):
runtimes = {}
if (num_jobs is None):
num_jobs = len(jobs)
max_end_time = get_job_end_times(jobs)[(num_jobs - 1)][0]
overall_execution_time = get_overall_execution_time(jobs, max_end_time)
for job_id in jobs:
job = jobs[job_id]
metadata = zip(job.start_times, job.end_times, job.worker_ids, job.worker_types)
for (start_time, end_time, worker_id, worker_type) in metadata:
if (start_time >= max_end_time):
continue
if ((worker_id, worker_type) not in runtimes):
runtimes[(worker_id, worker_type)] = 0
runtimes[(worker_id, worker_type)] += (min(end_time, max_end_time) - start_time)
return (runtimes, overall_execution_time) |
class Simulator():
def __init__(self, task: Task, simulator: Callable, max_calls: Optional[int]=None):
self.simulator = simulator
self.max_calls = max_calls
self.num_simulations = 0
self.name = task.name
self.dim_data = task.dim_data
self.dim_parameters = task.dim_parameters
self.flatten_data = task.flatten_data
self.unflatten_data = task.unflatten_data
def __call__(self, parameters: torch.Tensor, **kwargs: Any) -> torch.Tensor:
if (parameters.ndim == 1):
parameters = parameters.reshape(1, (- 1))
assert (parameters.ndim == 2)
assert (parameters.shape[1] == self.dim_parameters)
requested_simulations = parameters.shape[0]
if ((self.max_calls is not None) and ((self.num_simulations + requested_simulations) > self.max_calls)):
raise SimulationBudgetExceeded
data = self.simulator(parameters, **kwargs)
self.num_simulations += requested_simulations
return self.flatten_data(data) |
_utils.test(require=ti.extension.bls)
def test_gather_1d_trivial():
_test_bls_stencil(1, 128, bs=32, stencil=((0,),)) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, act_cfg=dict(type='ReLU'), conv_cfg=None, norm_cfg=dict(type='BN'), with_cp=False):
super(Bottleneck, self).__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.act_cfg = act_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv1_stride = 1
self.conv2_stride = stride
self.with_cp = with_cp
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.activate = build_activation_layer(act_cfg)
self.downsample = downsample
def norm1(self):
return getattr(self, self.norm1_name)
def norm2(self):
return getattr(self, self.norm2_name)
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.activate(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.activate(out)
out = self.conv3(out)
out = self.norm3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.activate(out)
return out |
def filter_not_valid(df_keypoints):
def check_valid(x):
kp_array = pose_utils.load_pose_cords_from_strings(x['keypoints_y'], x['keypoints_x'])
distractor = (x['name'].startswith('-1') or x['name'].startswith('0000'))
return (pose_check_valid(kp_array) and (not distractor))
return df_keypoints[df_keypoints.apply(check_valid, axis=1)].copy() |
def _replace_stopwords(text: Any, value: str, stopwords: Optional[Set[str]]=None) -> Any:
if pd.isna(text):
return text
stopwords = (english_stopwords if (not stopwords) else stopwords)
return ' '.join(((word if (word.lower() not in stopwords) else value) for word in str(text).split())) |
class GraphAppendingTracer(TracerBase):
def __init__(self, graph: Graph):
super().__init__()
self.graph = graph |
def run_ngram_baseline(train_fpath, test_fpath):
train_df = pd.read_csv(train_fpath, sep='\t')
test_df = pd.read_csv(test_fpath, sep='\t')
pipeline = Pipeline([('ngrams', TfidfVectorizer(ngram_range=(1, 1))), ('clf', SVC(C=1, gamma=0.75, kernel='rbf', random_state=0))])
pipeline.fit(train_df['tweet_text'], train_df['check_worthiness'])
results_fpath = join(ROOT_DIR, ('baselines/data/task1_ngram_baseline_%s' % os.path.basename(test_fpath)))
with open(results_fpath, 'w') as results_file:
predicted_distance = pipeline.decision_function(test_df['tweet_text'])
for (i, line) in test_df.iterrows():
dist = predicted_distance[i]
results_file.write('{}\t{}\t{}\t{}\n'.format(line['topic_id'], line['tweet_id'], dist, 'ngram')) |
def average_seed(all_seed_dict):
result_mean = {}
result_std = {}
for term in RESULTS_TERMS:
result = []
for seed in all_seed_dict.keys():
if (term in all_seed_dict[seed].keys()):
result.append(all_seed_dict[seed][term])
elif (term == 'Closed-set OA'):
result.append(all_seed_dict[seed]['Closed Set Accuracy']['OA'])
elif (term == 'Closed-set AA'):
result.append(all_seed_dict[seed]['Closed Set Accuracy']['AA'])
else:
result.append(all_seed_dict[seed]['OSR Accuracy'][term])
result_mean[term] = round(np.mean(result), 2)
result_std[term] = round(np.std(result), 2)
return (result_mean, result_std) |
class JasperBlock(nn.Module):
def __init__(self, num_sub_blocks: int, in_channels: int, out_channels: int, kernel_size: int, stride: int=1, dilation: int=1, bias: bool=True, dropout_p: float=0.2, activation: str='relu') -> None:
super(JasperBlock, self).__init__()
padding = self.get_same_padding(kernel_size, stride, dilation)
self.layers = nn.ModuleList([JasperSubBlock(in_channels=(in_channels if (i == 0) else out_channels), out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, dropout_p=dropout_p, activation=activation) for i in range(num_sub_blocks)])
def get_same_padding(self, kernel_size: int, stride: int, dilation: int):
if ((stride > 1) and (dilation > 1)):
raise ValueError('Only stride OR dilation may be greater than 1')
return ((kernel_size // 2) * dilation)
def forward(self, inputs: Tensor, input_lengths: Tensor, residual: Tensor) -> Tuple[(Tensor, Tensor)]:
for layer in self.layers[:(- 1)]:
(inputs, input_lengths) = layer(inputs, input_lengths)
(output, output_lengths) = self.layers[(- 1)](inputs, input_lengths, residual)
return (output, output_lengths) |
def scorep_env(tmp_path):
env = os.environ.copy()
env['SCOREP_ENABLE_PROFILING'] = 'false'
env['SCOREP_ENABLE_TRACING'] = 'true'
env['SCOREP_PROFILING_MAX_CALLPATH_DEPTH'] = '98'
env['SCOREP_TOTAL_MEMORY'] = '3G'
env['SCOREP_EXPERIMENT_DIRECTORY'] = str((tmp_path / 'test_bindings_dir'))
return env |
class ResidualCNN(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
def forward(self, x):
residual = x
x = self.layer_norm1(x)
x = F.tanh(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = F.tanh(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x |
def spearman(x, y, impute_nan=True):
if impute_nan:
x = torch.nan_to_num(x)
y = torch.nan_to_num(y)
if (torch.all((y == y[1])) or torch.all((x == x[1]))):
x = np.array(x.cpu())
y = np.array(y.cpu())
return np.array([0.5 for (_, _) in zip(np.rollaxis(x, 1), np.rollaxis(y, 1))])
x = np.array(x.cpu())
y = np.array(y.cpu())
rho = [(spearmanr(xs, ys)[0] if (not ((xs[0] == xs).all() or (ys[0] == ys).all())) else 0.0) for (xs, ys) in zip(np.rollaxis(x, 1), np.rollaxis(y, 1))]
return np.array(rho) |
class FillPlan(BenchmarkPlan):
def __init__(self, arch: str):
super().__init__('fill', arch, basic_repeat_times=10)
fill_container = Container()
fill_container.update({'sparse': None})
self.create_plan(fill_container, DataType(), DataSize(), MetricType())
self.add_func(['field'], fill_default)
self.add_func(['ndarray'], fill_default)
self.add_func(['sparse'], fill_sparse) |
_checkpoint_hooks
class CyclicLRScheduler():
def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000.0, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle'):
super(CyclicLRScheduler, self).__init__()
self.losses = []
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
self.gamma = gamma
if (scale_fn is None):
if (self.mode == 'triangular'):
self.scale_fn = (lambda x: 1.0)
self.scale_mode = 'cycle'
elif (self.mode == 'triangular2'):
self.scale_fn = (lambda x: (1 / (2.0 ** (x - 1))))
self.scale_mode = 'cycle'
elif (self.mode == 'exp_range'):
self.scale_fn = (lambda x: (gamma ** x))
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.clr_iterations = 0.0
self._reset()
def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None):
if (new_base_lr is not None):
self.base_lr = new_base_lr
if (new_max_lr is not None):
self.max_lr = new_max_lr
if (new_step_size is not None):
self.step_size = new_step_size
self.clr_iterations = 0.0
def __call__(self, epoch):
old_lr = self.current_lr
new_lr = self.clr((self.clr_iterations + 1))
return (old_lr, new_lr)
def clr(self, clr_iterations):
cycle = math.floor((1 + (clr_iterations / (2 * self.step_size))))
x = abs((((clr_iterations / self.step_size) - (2 * cycle)) + 1))
if (self.scale_mode == 'cycle'):
return (self.base_lr + (((self.max_lr - self.base_lr) * max(0, (1 - x))) * self.scale_fn(cycle)))
else:
return (self.base_lr + (((self.max_lr - self.base_lr) * max(0, (1 - x))) * self.scale_fn(clr_iterations)))
def on_batch_end(self, opt):
self.clr_iterations += 1
lr = self.clr(self.clr_iterations)
current_lr = opt.param_groups[0]['lr']
for param_group in opt.param_groups:
param_group['lr'] = lr
self.current_lr = current_lr
_as_saver
def save(self, path):
data = {'losses': self.losses, 'clr_iterations': self.clr_iterations}
torch.save(data, path)
_as_loader
def load(self, path, end_of_epoch=False, device=None):
del end_of_epoch
del device
data = torch.load(path)
self.losses = data['losses']
self.clr_iterations = data['clr_iterations'] |
class TokenClassificationArgumentHandlerTestCase(unittest.TestCase):
def setUp(self):
self.args_parser = TokenClassificationArgumentHandler()
def test_simple(self):
string = 'This is a simple input'
(inputs, offset_mapping) = self.args_parser(string)
self.assertEqual(inputs, [string])
self.assertEqual(offset_mapping, None)
(inputs, offset_mapping) = self.args_parser([string, string])
self.assertEqual(inputs, [string, string])
self.assertEqual(offset_mapping, None)
(inputs, offset_mapping) = self.args_parser(string, offset_mapping=[(0, 1), (1, 2)])
self.assertEqual(inputs, [string])
self.assertEqual(offset_mapping, [[(0, 1), (1, 2)]])
(inputs, offset_mapping) = self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]])
self.assertEqual(inputs, [string, string])
self.assertEqual(offset_mapping, [[(0, 1), (1, 2)], [(0, 2), (2, 3)]])
def test_errors(self):
string = 'This is a simple input'
with self.assertRaises(TypeError):
self.args_parser(string, string, offset_mapping=[[(0, 1), (1, 2)]])
with self.assertRaises(TypeError):
self.args_parser(string, string, offset_mapping=[(0, 1), (1, 2)])
with self.assertRaises(ValueError):
self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)]])
with self.assertRaises(ValueError):
self.args_parser([string, string], offset_mapping=[(0, 1), (1, 2)])
with self.assertRaises(ValueError):
self.args_parser(string, offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]])
with self.assertRaises(TypeError):
self.args_parser(offset_mapping=[[(0, 1), (1, 2)]]) |
class LinearCodeNearestNeighborDecoder(Decoder):
def __init__(self, code):
super().__init__(code, code.ambient_space(), code._default_encoder_name)
def __eq__(self, other):
return (isinstance(other, LinearCodeNearestNeighborDecoder) and (self.code() == other.code()))
def _repr_(self):
return ('Nearest neighbor decoder for %s' % self.code())
def _latex_(self):
return ('\\textnormal{Nearest neighbor decoder for }%s' % self.code()._latex_())
def decode_to_code(self, r):
c_min = self.code().zero()
h_min = r.hamming_weight()
for c in self.code():
if ((c - r).hamming_weight() < h_min):
h_min = (c - r).hamming_weight()
c_min = c
c_min.set_immutable()
return c_min
def decoding_radius(self):
return ((self.code().minimum_distance() - 1) // 2) |
def _swig_repr(self):
try:
strthis = ('proxy of ' + self.this.__repr__())
except Exception:
strthis = ''
return ('<%s.%s; %s >' % (self.__class__.__module__, self.__class__.__name__, strthis)) |
class AverageMeter():
def __init__(self, dataset):
self.benchmark = dataset.benchmark
if (self.benchmark == 'pascal'):
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
self.nclass = 20
elif (self.benchmark == 'fss'):
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
self.nclass = 1000
elif (self.benchmark == 'deepglobe'):
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
self.nclass = 6
elif (self.benchmark == 'isic'):
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
self.nclass = 3
elif (self.benchmark == 'lung'):
self.class_ids_interest = dataset.class_ids
self.class_ids_interest = torch.tensor(self.class_ids_interest).cuda()
self.nclass = 1
self.intersection_buf = torch.zeros([2, self.nclass]).float().cuda()
self.union_buf = torch.zeros([2, self.nclass]).float().cuda()
self.ones = torch.ones_like(self.union_buf)
self.loss_buf = []
def update(self, inter_b, union_b, class_id, loss):
self.intersection_buf.index_add_(1, class_id, inter_b.float())
self.union_buf.index_add_(1, class_id, union_b.float())
if (loss is None):
loss = torch.tensor(0.0)
self.loss_buf.append(loss)
def compute_iou(self):
iou = (self.intersection_buf.float() / torch.max(torch.stack([self.union_buf, self.ones]), dim=0)[0])
iou = iou.index_select(1, self.class_ids_interest)
miou = (iou[1].mean() * 100)
fb_iou = ((self.intersection_buf.index_select(1, self.class_ids_interest).sum(dim=1) / self.union_buf.index_select(1, self.class_ids_interest).sum(dim=1)).mean() * 100)
return (miou, fb_iou)
def write_result(self, split, epoch):
(iou, fb_iou) = self.compute_iou()
loss_buf = torch.stack(self.loss_buf)
msg = ('\n*** %s ' % split)
msg += ('[ %02d] ' % epoch)
msg += ('Avg L: %6.5f ' % loss_buf.mean())
msg += ('mIoU: %5.2f ' % iou)
msg += ('FB-IoU: %5.2f ' % fb_iou)
msg += '***\n'
Logger.info(msg)
def write_process(self, batch_idx, datalen, epoch, write_batch_idx=20):
if ((batch_idx % write_batch_idx) == 0):
msg = (('[Epoch: %02d] ' % epoch) if (epoch != (- 1)) else '')
msg += ('[Batch: %04d/%04d] ' % ((batch_idx + 1), datalen))
(iou, fb_iou) = self.compute_iou()
if (epoch != (- 1)):
loss_buf = torch.stack(self.loss_buf)
msg += ('L: %6.5f ' % loss_buf[(- 1)])
msg += ('Avg L: %6.5f ' % loss_buf.mean())
msg += ('mIoU: %5.2f | ' % iou)
msg += ('FB-IoU: %5.2f' % fb_iou)
Logger.info(msg) |
def test_authorization_warning_missing_threshold(result):
result.checks.extend([make_check(201), make_check(401)])
assert (not has_too_many_responses_with_status(result, 401)) |
def convert_worldwide_file(filename, short_name):
assert ('en_worldwide-9class.' in filename)
if (not os.path.exists(filename)):
raise FileNotFoundError(('Cannot convert missing file %s' % filename))
new_filename = filename.replace('en_worldwide-9class.', (short_name + '.worldwide-9class.'))
with open(filename) as fin:
doc = json.load(fin)
for sentence in doc:
is_start = False
for word in sentence:
text = word['text']
ner = word['ner']
(_, s4, is_start) = process_label((text, ner), is_start)
word['multi_ner'] = ('-', ner, s4)
with open(new_filename, 'w') as fout:
json.dump(doc, fout, indent=2) |
class CollateMergedPseudo():
def __init__(self, device=None):
self.device = device
def __call__(self, list_data):
source_list_data = []
target_list_data = []
target_list_pseudo = []
source_selected = []
target_selected = []
source_idx = []
target_idx = []
for d in list_data:
source_list_data.append((d['source_coordinates'].to(self.device), d['source_features'].to(self.device), d['source_labels']))
target_list_data.append((d['target_coordinates'].to(self.device), d['target_features'].to(self.device), d['target_labels']))
target_list_pseudo.append((d['target_coordinates'].to(self.device), d['target_features'].to(self.device), d['target_pseudo_labels'].to(self.device)))
source_selected.append(d['source_sampled_idx'])
target_selected.append(d['target_sampled_idx'])
source_idx.append(d['source_idx'].unsqueeze(0))
target_idx.append(d['target_idx'].unsqueeze(0))
(source_coordinates_batch, source_features_batch, source_labels_batch) = ME.utils.SparseCollation(dtype=torch.float32, device=self.device)(source_list_data)
(target_coordinates_batch, target_features_batch, target_labels_batch) = ME.utils.SparseCollation(dtype=torch.float32, device=self.device)(target_list_data)
(_, _, target_pseudo_labels) = ME.utils.SparseCollation(dtype=torch.float32, device=self.device)(target_list_pseudo)
return {'source_coordinates': source_coordinates_batch, 'source_features': source_features_batch, 'source_labels': source_labels_batch, 'target_coordinates': target_coordinates_batch, 'target_features': target_features_batch, 'target_labels': target_labels_batch, 'target_pseudo_labels': target_pseudo_labels, 'source_sampled': source_selected, 'target_sampled': target_selected, 'source_idx': source_idx, 'target_idx': target_idx} |
def run(env, num_envs, total_step, async_):
if (env == 'atari'):
task_id = 'PongNoFrameskip-v4'
frame_skip = 4
if (num_envs == 1):
env = wrap_deepmind(gym.make(task_id), episode_life=False, clip_rewards=False, frame_stack=4)
else:
env = gym.vector.make(task_id, num_envs, async_, (lambda e: wrap_deepmind(e, episode_life=False, clip_rewards=False, frame_stack=4)))
elif (env == 'mujoco'):
task_id = 'Ant-v3'
frame_skip = 5
if (num_envs == 1):
env = gym.make(task_id)
else:
env = gym.vector.make(task_id, num_envs, async_)
elif (env == 'box2d'):
task_id = 'LunarLander-v2'
frame_skip = 1
if (num_envs == 1):
env = gym.make(task_id)
else:
env = gym.vector.make(task_id, num_envs, async_)
else:
raise NotImplementedError(f'Unknown env {env}')
env.seed(0)
env.reset()
action = env.action_space.sample()
done = False
t = time.time()
for _ in tqdm.trange(total_step):
if (num_envs == 1):
if done:
done = False
env.reset()
else:
done = env.step(action)[2]
else:
env.step(action)
print(f'FPS = {(((frame_skip * total_step) * num_envs) / (time.time() - t)):.2f}') |
class XORHyperplaneClassifier(nn.Module):
def __init__(self, x_dim, y_dim, P1, P2, a1=None, a2=None, b1=None, b2=None, ksig=5):
super(XORHyperplaneClassifier, self).__init__()
if (a1 is None):
self.a1 = Parameter(torch.matmul(torch.randn(int(y_dim), int(x_dim)), torch.t(P1)))
else:
assert (a1.shape == (int(y_dim), int(x_dim)))
self.a1 = Parameter(torch.matmul(torch.Tensor(a1), torch.t(P1)))
if (a2 is None):
self.a2 = Parameter(torch.matmul(torch.randn(int(y_dim), int(x_dim)), torch.t(P2)))
else:
assert (a2.shape == (int(y_dim), int(x_dim)))
self.a2 = Parameter(torch.matmul(torch.Tensor(a2), torch.t(P2)))
if (b1 is None):
self.b1 = Parameter(torch.Tensor(int(y_dim)))
nn.init.constant_(self.b1, 0.0)
else:
assert (b1.shape == int(y_dim))
self.b1 = Parameter(torch.Tensor(b1))
if (b2 is None):
self.b2 = Parameter(torch.Tensor(int(y_dim)))
nn.init.constant_(self.b2, 0.0)
else:
assert (b2.shape == int(y_dim))
self.b2 = Parameter(torch.Tensor(b2))
self.ksig = ksig
'\n Perform classification: yhat = (a1^Tx > b1) XOR (a2^Tx > b2)\n Inputs:\n - x : input data sample \n Outputs:\n - yhat : ( p(yhat=0), p(yhat=1) )\n - a1 : slope of 1st hyperplane\n - a2 : slope of 2nd hyperplane\n '
def forward(self, x):
z1 = F.linear(x, self.a1, ((- 1) * self.b1))
z2 = F.linear(x, self.a2, ((- 1) * self.b2))
yhat_class0 = ((z1 > 0) ^ (z2 > 0)).float()
yhat_class1 = (1.0 - yhat_class0)
yhat = torch.cat((yhat_class0, yhat_class1), 1)
return (yhat, self.a1, self.a2) |
class RandomPolicy(BasePolicy):
def __init__(self):
super().__init__()
def forward(self, observation, available_actions):
if available_actions['has_search_bar']:
action = 'search[shoes]'
else:
action_arg = random.choice(available_actions['clickables'])
action = f'click[{action_arg}]'
return action |
def load_conllu(file, treebank_type):
class UDRepresentation():
def __init__(self):
self.characters = []
self.tokens = []
self.words = []
self.sentences = []
class UDSpan():
def __init__(self, start, end):
self.start = start
self.end = end
class UDWord():
def __init__(self, span, columns, is_multiword):
self.span = span
self.columns = columns
self.is_multiword = is_multiword
self.parent = None
self.functional_children = []
self.columns[FEATS] = '|'.join(sorted((feat for feat in columns[FEATS].split('|') if (feat.split('=', 1)[0] in UNIVERSAL_FEATURES))))
self.columns[DEPREL] = columns[DEPREL].split(':')[0]
self.is_content_deprel = (self.columns[DEPREL] in CONTENT_DEPRELS)
self.is_functional_deprel = (self.columns[DEPREL] in FUNCTIONAL_DEPRELS)
self.columns[DEPS] = process_enhanced_deps(columns[DEPS])
ud = UDRepresentation()
(index, sentence_start) = (0, None)
line_idx = 0
while True:
line = file.readline()
line_idx += 1
if (not line):
break
line = _decode(line.rstrip('\r\n'))
if (sentence_start is None):
if line.startswith('#'):
continue
ud.sentences.append(UDSpan(index, 0))
sentence_start = len(ud.words)
if (not line):
def process_word(word):
if (word.parent == 'remapping'):
raise UDError(('There is a cycle in the sentence that ends at line %d' % line_idx))
if (word.parent is None):
head = int(word.columns[HEAD])
if ((head < 0) or (head > (len(ud.words) - sentence_start))):
raise UDError("HEAD '{}' points outside of the sentence that ends at line {}".format(_encode(word.columns[HEAD]), line_idx))
if head:
parent = ud.words[((sentence_start + head) - 1)]
word.parent = 'remapping'
process_word(parent)
word.parent = parent
position = sentence_start
for word in ud.words[sentence_start:]:
process_word(word)
enhanced_deps = word.columns[DEPS]
processed_deps = []
for (head, steps) in word.columns[DEPS]:
if ('.' in head):
if treebank_type.get('no_empty_nodes', False):
raise UDError('The collapsed CoNLL-U file still contains references to empty nodes at line {}: {}'.format(line_idx, _encode(line)))
else:
continue
hd = int(head)
parent = (ud.words[((sentence_start + hd) - 1)] if hd else hd)
processed_deps.append((parent, steps))
enhanced_deps = processed_deps
if treebank_type.get('no_gapping', False):
processed_deps = []
for (parent, steps) in enhanced_deps:
if (len(steps) > 1):
processed_deps.append((word.parent, [word.columns[DEPREL]]))
elif ((parent, steps) in processed_deps):
True
else:
processed_deps.append((parent, steps))
enhanced_deps = processed_deps
if treebank_type.get('no_shared_parents_in_coordination', False):
for (hd, steps) in enhanced_deps:
if ((len(steps) == 1) and steps[0].startswith('conj')):
enhanced_deps = [(hd, steps)]
if treebank_type.get('no_shared_dependents_in_coordination', False):
processed_deps = []
for (hd, steps) in enhanced_deps:
duplicate = 0
for (hd2, steps2) in enhanced_deps:
if ((steps == steps2) and (hd2 == word.columns[HEAD]) and (hd != hd2)):
duplicate = 1
if (not duplicate):
processed_deps.append((hd, steps))
enhanced_deps = processed_deps
if treebank_type.get('no_control', False):
processed_deps = []
for (parent, steps) in enhanced_deps:
include = 1
if (parent and (parent.columns[DEPREL] == 'xcomp')):
for rel in steps:
if rel.startswith('nsubj'):
include = 0
if include:
processed_deps.append((parent, steps))
enhanced_deps = processed_deps
if treebank_type.get('no_external_arguments_of_relative_clauses', False):
processed_deps = []
for (parent, steps) in enhanced_deps:
if (steps[0] == 'ref'):
processed_deps.append((word.parent, [word.columns[DEPREL]]))
elif (parent and parent.columns[DEPREL].startswith('acl') and (int(parent.columns[HEAD]) == (position - sentence_start))):
True
else:
processed_deps.append((parent, steps))
enhanced_deps = processed_deps
if treebank_type.get('no_case_info', False):
processed_deps = []
for (hd, steps) in enhanced_deps:
processed_steps = []
for dep in steps:
depparts = dep.split(':')
if (depparts[0] in CASE_DEPRELS):
if ((len(depparts) == 2) and (not (depparts[1] in UNIVERSAL_DEPREL_EXTENSIONS))):
dep = depparts[0]
processed_steps.append(dep)
processed_deps.append((hd, processed_steps))
enhanced_deps = processed_deps
position += 1
word.columns[DEPS] = enhanced_deps
for word in ud.words[sentence_start:]:
if (word.parent and word.is_functional_deprel):
word.parent.functional_children.append(word)
if (len(ud.words) == sentence_start):
raise UDError(('There is a sentence with 0 tokens (possibly a double blank line) at line %d' % line_idx))
if (len([word for word in ud.words[sentence_start:] if (word.parent is None)]) == 0):
raise UDError(('There are no roots in the sentence that ends at %d' % line_idx))
if (not treebank_type.get('multiple_roots_okay', False)):
if (len([word for word in ud.words[sentence_start:] if (word.parent is None)]) > 1):
raise UDError(('There are multiple roots in the sentence that ends at %d' % line_idx))
ud.sentences[(- 1)].end = index
sentence_start = None
continue
columns = line.split('\t')
if (len(columns) != 10):
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns at line {}: '{}'".format(line_idx, _encode(line)))
if ('.' in columns[ID]):
if treebank_type.get('no_empty_nodes', False):
raise UDError('The collapsed CoNLL-U line still contains empty nodes at line {}: {}'.format(line_idx, _encode(line)))
else:
continue
columns[FORM] = ''.join(filter((lambda c: (unicodedata.category(c) != 'Zs')), columns[FORM]))
if (not columns[FORM]):
raise UDError(('There is an empty FORM in the CoNLL-U file at line %d' % line_idx))
ud.characters.extend(columns[FORM])
ud.tokens.append(UDSpan(index, (index + len(columns[FORM]))))
index += len(columns[FORM])
if ('-' in columns[ID]):
try:
(start, end) = map(int, columns[ID].split('-'))
except:
raise UDError("Cannot parse multi-word token ID '{}' at line {}".format(_encode(columns[ID]), line_idx))
for _ in range(start, (end + 1)):
word_line = _decode(file.readline().rstrip('\r\n'))
line_idx += 1
word_columns = word_line.split('\t')
if (len(word_columns) != 10):
raise UDError("The CoNLL-U line does not contain 10 tab-separated columns at line {}: '{}'".format(line_idx, _encode(word_line)))
ud.words.append(UDWord(ud.tokens[(- 1)], word_columns, is_multiword=True))
else:
try:
word_id = int(columns[ID])
except:
raise UDError("Cannot parse word ID '{}' at line {}".format(_encode(columns[ID]), line_idx))
if (word_id != ((len(ud.words) - sentence_start) + 1)):
raise UDError("Incorrect word ID '{}' for word '{}', expected '{}' at line {}".format(_encode(columns[ID]), _encode(columns[FORM]), ((len(ud.words) - sentence_start) + 1), line_idx))
try:
head_id = int(columns[HEAD])
except ValueError as e:
raise UDError("Cannot parse HEAD '{}' at line {}".format(_encode(columns[HEAD]), line_idx)) from e
if (head_id < 0):
raise UDError(('HEAD cannot be negative at line %d' % line_idx))
ud.words.append(UDWord(ud.tokens[(- 1)], columns, is_multiword=False))
if (sentence_start is not None):
raise UDError('The CoNLL-U file does not end with empty line')
return ud |
def init_ddp():
try:
local_rank = int(os.environ['LOCAL_RANK'])
world_size = int(os.environ['WORLD_SIZE'])
except KeyError:
return (0, 1)
dist.init_process_group(backend='nccl')
print(f'Initialized process {local_rank} / {world_size}')
torch.cuda.set_device(local_rank)
setup_dist_print((local_rank == 0))
return (local_rank, world_size) |
def _ensure_dask_array(array, chunks=None):
import dask.array as da
if isinstance(array, da.Array):
return array
return da.from_array(array, chunks=chunks) |
def register_Ns3EpcS11SapSgwDeleteBearerResponseMessage_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::EpcS11SapSgw::DeleteBearerResponseMessage const &', 'arg0')])
cls.add_instance_attribute('bearerContextsRemoved', 'std::list< ns3::EpcS11SapSgw::BearerContextRemovedSgwPgw >', is_const=False)
return |
def skip_torch_module_member(app, what, name, obj, skip, options):
skip_torch = (('Module.' in str(obj)) and (name in dir(torch.nn.Module)))
if (name == 'dump_patches'):
skip_torch = True
return (skip or skip_torch) |
class ToysCalculator(BaseToysCalculator, ToysManager):
def __init__(self, input, minimizer, ntoysnull: int=100, ntoysalt: int=100, sampler: Callable=base_sampler, sample: Callable=base_sample):
super().__init__(input, minimizer, sampler, sample)
self._ntoysnull = ntoysnull
self._ntoysalt = ntoysalt
def from_yaml(cls, filename: str, input, minimizer, sampler: Callable=base_sampler, sample: Callable=base_sample, **kwargs: Any):
ntoysnull = kwargs.get('ntoysnull', 100)
ntoysalt = kwargs.get('ntoysall', 100)
calculator = cls(input=input, minimizer=minimizer, ntoysnull=ntoysnull, ntoysalt=ntoysalt, sampler=sampler, sample=sample)
toysresults = calculator.toysresults_from_yaml(filename)
for t in toysresults:
calculator.add_toyresult(t)
return calculator
def ntoysnull(self) -> int:
return self._ntoysnull
def ntoysnull(self, n: int):
self._ntoysnull = n
def ntoysalt(self) -> int:
return self._ntoysalt
def ntoysalt(self, n: int):
self._ntoysalt = n
def _get_toys(self, poigen: (POI | POIarray), poieval: ((POI | POIarray) | None)=None, qtilde: bool=False, hypothesis: str='null') -> dict[(POI, ToyResult)]:
if (hypothesis not in {'null', 'alternative'}):
raise ValueError("hypothesis must be 'null' or 'alternative'.")
if (hypothesis == 'null'):
ntoys = self.ntoysnull
else:
ntoys = self.ntoysalt
ret = {}
for p in poigen:
if (poieval is None):
poieval_p = asarray(p)
else:
poieval_p = poieval
if (p not in poieval_p):
poieval_p = poieval_p.append(p.value)
if (qtilde and (0.0 not in poieval_p.values)):
poieval_p = poieval_p.append(0.0)
ngenerated = self.ntoys(p, poieval_p)
if (ngenerated < ntoys):
ntogen = (ntoys - ngenerated)
else:
ntogen = 0
if (ntogen > 0):
print(f'Generating {hypothesis} hypothesis toys for {p}.')
self.generate_and_fit_toys(ntoys=ntogen, poigen=p, poieval=poieval_p)
ret[p] = self.get_toyresult(p, poieval_p)
return ret
def get_toys_null(self, poigen: (POI | POIarray), poieval: ((POI | POIarray) | None)=None, qtilde: bool=False) -> dict[(POI, ToyResult)]:
return self._get_toys(poigen=poigen, poieval=poieval, qtilde=qtilde, hypothesis='null')
def get_toys_alt(self, poigen: (POI | POIarray), poieval: ((POI | POIarray) | None)=None, qtilde: bool=False) -> dict[(POI, ToyResult)]:
return self._get_toys(poigen=poigen, poieval=poieval, qtilde=qtilde, hypothesis='alternative') |
def test_UnmaskedArray():
a = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])))
b = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.numpyarray.NumpyArray(np.array([7.7, 8.8, 9.9])))
c = ak.concatenate([a, b])
ctt = ak.concatenate([a.to_typetracer(), b.to_typetracer()])
assert (c.to_list() == [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
assert isinstance(c.layout, ak.contents.UnmaskedArray)
assert (c.layout.form == UnmaskedForm(NumpyForm('float64')))
assert (c.layout.form == ctt.layout.form) |
class NotEq(AttributeFilter):
def __init__(self, attr: str, value: Any):
super().__init__(attr=attr, value=value, op=operator.ne)
def op_as_str(self):
return '!=' |
def fetch_logged_data(run_id):
client = mlflow.MlflowClient()
data = client.get_run(run_id).data
artifacts = [f.path for f in client.list_artifacts(run_id, 'model')]
return (data.params, data.metrics, artifacts) |
class CheckpointTest(unittest.TestCase):
def test_load_pretrained(self):
create_checkpoint('testing', './testing.npz')
model = models.KNOWN_MODELS['testing'].partial(num_classes=2)
(_, params) = model.init_by_shape(jax.random.PRNGKey(0), [((1, 32, 32, 3), jnp.float32)])
logger = logging.getLogger()
logger.setLevel(logging.INFO)
checkpoint.load_pretrained(pretrained_path='testing.npz', init_params=params, model_config=models.CONFIGS['testing'], logger=logger) |
class QuaternionAlgebraFactory(UniqueFactory):
def create_key(self, arg0, arg1=None, arg2=None, names='i,j,k'):
if ((arg1 is None) and (arg2 is None)):
K = QQ
D = Integer(arg0)
(a, b) = hilbert_conductor_inverse(D)
a = Rational(a)
b = Rational(b)
elif (arg2 is None):
L = []
for a in [arg0, arg1]:
if is_RingElement(a):
L.append(a)
elif isinstance(a, int):
L.append(Integer(a))
elif isinstance(a, float):
L.append(RR(a))
else:
raise ValueError('a and b must be elements of a ring with characteristic not 2')
v = Sequence(L)
K = v.universe().fraction_field()
a = K(v[0])
b = K(v[1])
else:
K = arg0
a = K(arg1)
b = K(arg2)
if (not K(2).is_unit()):
raise ValueError(('2 is not invertible in %s' % K))
if (not (a.is_unit() and b.is_unit())):
raise ValueError(('defining elements of quaternion algebra (%s, %s) are not invertible in %s' % (a, b, K)))
names = normalize_names(3, names)
return (K, a, b, names)
def create_object(self, version, key, **extra_args):
(K, a, b, names) = key
return QuaternionAlgebra_ab(K, a, b, names=names) |
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None, optimizer_disc=None, save_ckpt_freq=1):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if (not getattr(args, 'enable_deepspeed', False)):
checkpoint_paths = [(output_dir / 'checkpoint.pth')]
if (epoch == 'best'):
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
checkpoint_paths.append((output_dir / ('checkpoint-%s.pth' % epoch_name)))
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'args': args}
if (loss_scaler is not None):
to_save['scaler'] = loss_scaler.state_dict()
if (model_ema is not None):
to_save['model_ema'] = get_state_dict(model_ema)
if (optimizer_disc is not None):
to_save['optimizer_disc'] = optimizer_disc.state_dict()
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if (model_ema is not None):
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag=('checkpoint-%s' % epoch_name), client_state=client_state) |
def generate_images(images_dir, examples_dir, pattern='*.py'):
from sfepy.applications import solve_pde
from sfepy.solvers.ts_solvers import StationarySolver
prefix = output.prefix
output_dir = tempfile.mkdtemp()
trunk = os.path.join(output_dir, 'result')
options = Struct(output_filename_trunk=trunk, output_format='vtk', save_ebc=False, save_ebc_nodes=False, save_regions=False, save_regions_as_groups=False, solve_not=False)
view_options = Struct(step=0, fields=[], fields_map=[], outline=False, isosurfaces=0, show_edges=False, warp=None, factor=1.0, opacity=1.0, color_map='viridis', axes_options=[], axes_visibility=False, grid_vector1=None, grid_vector2=None, max_plots=3, show_labels=False, label_position=[(- 1), (- 1), 0, 0.2], scalar_bar_size=[0.15, 0.06], scalar_bar_position=[0.04, 0.92, 0, (- 1.5)], show_scalar_bars=True, camera=[225, 75, 1], camera_position=None, view_2d=False, force_view_3d=False)
ensure_path((images_dir + os.path.sep))
for ex_filename in locate_files(pattern, examples_dir):
if _omit(ex_filename, (omits + omit_images), omit_dirs):
continue
output.level = 0
output.prefix = prefix
ebase = ex_filename.replace(examples_dir, '')[1:]
output(('trying "%s"...' % ebase))
_ebase = ebase.replace(os.path.sep, '/')
custom_options = custom.get(_ebase)
if (custom_options and ('sfepy-view-options' in custom_options)):
try:
_apply_commands(custom_options, ebase, images_dir)
filename = custom_options.get('result')
dim = custom_options.get('dim')
custom_view_options = custom_options['sfepy-view-options']
except subprocess.CalledProcessError:
filename = None
output('***** failed! *****')
else:
custom_view_options = custom_options
try:
(problem, state) = solve_pde(ex_filename, options=options)
try:
tsolver = problem.get_solver()
except ValueError:
suffix = None
else:
if isinstance(tsolver, StationarySolver):
suffix = None
else:
suffix = (tsolver.ts.suffix % (tsolver.ts.n_step - 1))
filename = problem.get_output_name(suffix=suffix)
dim = problem.get_dim()
except KeyboardInterrupt:
raise
except:
filename = None
output('***** failed! *****')
if (filename is not None):
if (custom_view_options is not None):
views = apply_view_options(custom_view_options, view_options)
else:
views = {'': view_options.copy()}
for (suffix, kwargs) in views.items():
if ((dim in (1, 2)) and (not kwargs.force_view_3d)):
kwargs.view_2d = True
kwargs.scalar_bar_position = [0.04, 0.92, 1.7, 0]
if (kwargs.grid_vector1 is None):
kwargs.grid_vector1 = [1.2, 0, 0]
if (kwargs.grid_vector2 is None):
kwargs.grid_vector2 = [0, (- 1.2), 0]
fig_filename = _get_fig_filename(ebase, images_dir, suffix)
fname = edit_filename(filename, suffix=suffix)
output(('displaying results from "%s"' % fname))
disp_name = fig_filename.replace(sfepy.data_dir, '')
output(('to "%s"...' % disp_name.lstrip(os.path.sep)))
run_resview_plot(fname, fig_filename, kwargs)
output('...done')
remove_files(output_dir)
output('...done') |
def replay_trace():
current_time = initial_time_trace
index_start = 0
while (index_start < len(jobs_by_start_time)):
created = []
try:
while (jobs_by_start_time[index_start][1] <= current_time):
created.append(jobs_by_start_time[index_start])
index_start += 1
except IndexError:
pass
(yield created)
current_time += 1 |
class Narrow(Module):
def __init__(self, dimension, offset, length=1):
super(Narrow, self).__init__()
self.dimension = dimension
self.index = offset
self.length = length
def updateOutput(self, input):
length = self.length
if (length < 0):
length = (((input.size(self.dimension) - self.index) + self.length) + 1)
output = input.narrow(self.dimension, self.index, length)
self.output = self.output.type_as(output)
self.output.resize_as_(output).copy_(output)
return self.output
def updateGradInput(self, input, gradOutput):
length = self.length
if (length < 0):
length = (((input.size(self.dimension) - self.index) + self.length) + 1)
self.gradInput = self.gradInput.type_as(input)
self.gradInput.resize_as_(input).zero_()
self.gradInput.narrow(self.dimension, self.index, length).copy_(gradOutput)
return self.gradInput |
class AFLModel(BaseModel):
seed = peewee.CharField()
output = peewee.CharField()
group = peewee.CharField()
program = peewee.CharField()
argument = peewee.CharField()
master = peewee.BooleanField()
pid = peewee.IntegerField()
fuzzer_id = peewee.IntegerField(unique=True) |
def print_network(model, name, out_file=None):
num_params = 0
for p in model.parameters():
num_params += p.numel()
if (out_file is None):
print(name)
print(model)
print('The number of parameters: {}'.format(num_params))
else:
with open(out_file, 'w') as f:
f.write('{}\n'.format(name))
f.write('{}\n'.format(model))
f.write('The number of parameters: {}\n'.format(num_params)) |
class SpeakerDependentTAVConfig(Config):
use_target_text = True
use_target_audio = True
use_target_video = True
svm_c = 10.0 |
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True):
super(ModulatedDeformConvPack, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, deformable_groups, bias)
self.conv_offset_mask = nn.Conv2d(self.in_channels, (((self.deformable_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, bias=True)
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, x):
offset_mask = self.conv_offset_mask(x)
(offset_x, offset_y, mask) = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) |
class ForScope(ControlFlow):
itervar: str
guard: SDFGState
init: str
condition: CodeBlock
update: str
body: GeneralBlock
init_edges: List[InterstateEdge]
def as_cpp(self, codegen, symbols) -> str:
sdfg = self.guard.parent
defined_vars = codegen.dispatcher.defined_vars
init = ''
if (self.init is not None):
if defined_vars.has(self.itervar):
init = self.itervar
else:
init = f'{symbols[self.itervar]} {self.itervar}'
init += (' = ' + unparse_interstate_edge(self.init_edges[0].data.assignments[self.itervar], sdfg, codegen=codegen))
preinit = ''
if self.init_edges:
for edge in self.init_edges:
for (k, v) in edge.data.assignments.items():
if (k != self.itervar):
cppinit = unparse_interstate_edge(v, sdfg, codegen=codegen)
preinit += f'''{k} = {cppinit};
'''
if (self.condition is not None):
cond = unparse_interstate_edge(self.condition.code[0], sdfg, codegen=codegen)
else:
cond = ''
update = ''
if (self.update is not None):
update = f'{self.itervar} = {self.update}'
expr = f'''{preinit}
for ({init}; {cond}; {update}) {{
'''
expr += _clean_loop_body(self.body.as_cpp(codegen, symbols))
expr += '\n}\n'
return expr
def first_state(self) -> SDFGState:
return self.guard
def children(self) -> List[ControlFlow]:
return [self.body] |
def bi_interaction(x_h, x_l):
sizeH = (int(x_h.shape[(- 2)]), int(x_h.shape[(- 1)]))
sizeL = (int(x_l.shape[(- 2)]), int(x_l.shape[(- 1)]))
o_h = (x_h + upsample(x_l, sizeH))
o_l = (x_l + upsample(x_h, sizeL))
return (o_h, o_l) |
class CommonVoiceDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, ascending=False, ratio=1.0, offset=0, **kwargs):
self.path = path
self.bucket_size = bucket_size
for s in split:
with open(s, 'r') as fp:
rows = csv.reader(fp, delimiter='\t')
(file_list, text) = ([], [])
for (i, row) in enumerate(rows):
if (i == 0):
continue
file_list.append(join(path, row[0]))
text.append(tokenizer.encode(row[1]))
print(f'Found {len(file_list)} samples.')
if (ratio < 1.0):
print(f'Ratio = {ratio}, offset = {offset}')
skip = int((1.0 / ratio))
(file_list, text) = (file_list[offset::skip], text[offset::skip])
total_len = 0.0
for f in file_list:
total_len += (getsize(f) / 32000.0)
print('Total audio len = {:.2f} mins = {:.2f} hours'.format((total_len / 60.0), (total_len / 3600.0)))
(self.file_list, self.text) = (file_list, text)
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list) |
_module()
class ResNetDec(nn.Module):
def __init__(self, block, layers, in_channels, kernel_size=3, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='LeakyReLU', negative_slope=0.2, inplace=True), with_spectral_norm=False, late_downsample=False):
super().__init__()
if (block == 'BasicBlockDec'):
block = BasicBlockDec
else:
raise NotImplementedError(f'{block} is not implemented.')
self.kernel_size = kernel_size
self.inplanes = in_channels
self.midplanes = (64 if late_downsample else 32)
self.layer1 = self._make_layer(block, 256, layers[0], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.layer2 = self._make_layer(block, 128, layers[1], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.layer3 = self._make_layer(block, 64, layers[2], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.layer4 = self._make_layer(block, self.midplanes, layers[3], conv_cfg, norm_cfg, act_cfg, with_spectral_norm)
self.conv1 = ConvModule(self.midplanes, 32, 4, stride=2, padding=1, conv_cfg=dict(type='Deconv'), norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm)
self.conv2 = ConvModule(32, 1, self.kernel_size, padding=(self.kernel_size // 2), act_cfg=None)
def init_weights(self):
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m.weight, 1)
constant_init(m.bias, 0)
for m in self.modules():
if isinstance(m, BasicBlockDec):
constant_init(m.conv2.bn.weight, 0)
def _make_layer(self, block, planes, num_blocks, conv_cfg, norm_cfg, act_cfg, with_spectral_norm):
upsample = nn.Sequential(nn.UpsamplingNearest2d(scale_factor=2), ConvModule(self.inplanes, (planes * block.expansion), 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None, with_spectral_norm=with_spectral_norm))
layers = [block(self.inplanes, planes, kernel_size=self.kernel_size, stride=2, interpolation=upsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm)]
self.inplanes = (planes * block.expansion)
for _ in range(1, num_blocks):
layers.append(block(self.inplanes, planes, kernel_size=self.kernel_size, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv1(x)
x = self.conv2(x)
return x |
def _variables_recursive(R, include=None, exclude=None):
if ((include is not None) and (exclude is not None)):
raise RuntimeError('include and exclude cannot both be specified')
if (include is not None):
degree_one = [R(g) for g in include]
else:
try:
degree_one = [R(g) for g in R.variable_names_recursive()]
except AttributeError:
try:
degree_one = R.gens()
except (NotImplementedError, AttributeError):
degree_one = []
if (exclude is not None):
degree_one = [g for g in degree_one if (g not in exclude)]
return [g for g in degree_one if (g != R.one())] |
def str_visible_len(s):
import re
s = re.sub('[\x1b\x9b][\\[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-PRZcf-nqry=><]', '', s)
return len(s) |
def test_testsuite_statement_checked_coverage_calculation(plus_three_test):
module_name = 'tests.fixtures.linecoverage.plus'
test_suite = tsc.TestSuiteChromosome()
test_suite.add_test_case_chromosome(tcc.TestCaseChromosome(test_case=plus_three_test))
config.configuration.statistics_output.coverage_metrics = [config.CoverageMetric.CHECKED]
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
with install_import_hook(module_name, tracer):
module = importlib.import_module(module_name)
importlib.reload(module)
executor = TestCaseExecutor(tracer)
executor.add_observer(StatementSlicingObserver(tracer))
ff = TestSuiteStatementCheckedCoverageFunction(executor)
assert (ff.compute_coverage(test_suite) == pytest.approx((4 / 8), 0.1, 0.1)) |
def t_div(u: fenics.Function, n: fenics.FacetNormal) -> ufl_expr.Expr:
return (fenics.div(u) - fenics.inner((fenics.grad(u) * n), n)) |
_torch
class MLukeTokenizerIntegrationTests(unittest.TestCase):
tokenizer_class = MLukeTokenizer
from_pretrained_kwargs = {'cls_token': '<s>'}
def setUpClass(cls):
cls.tokenizer = MLukeTokenizer.from_pretrained('studio-ousia/mluke-base', return_token_type_ids=True)
cls.entity_classification_tokenizer = MLukeTokenizer.from_pretrained('studio-ousia/mluke-base', return_token_type_ids=True, task='entity_classification')
cls.entity_pair_tokenizer = MLukeTokenizer.from_pretrained('studio-ousia/mluke-base', return_token_type_ids=True, task='entity_pair_classification')
cls.entity_span_tokenizer = MLukeTokenizer.from_pretrained('studio-ousia/mluke-base', return_token_type_ids=True, task='entity_span_classification')
def test_single_text_no_padding_or_truncation(self):
tokenizer = self.tokenizer
sentence = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and (Afghanistan).'
entities = ['en:ISO 639-3', 'DUMMY_ENTITY', 'ja:', 'en:Afghanistan']
spans = [(0, 9), (59, 63), (68, 75), (77, 88)]
encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s> ISO 639-3 uses the code fas for the dialects spoken across Iran and ( Afghanistan ).</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][1:5], spaces_between_special_tokens=False), 'ISO 639-3')
self.assertEqual(tokenizer.decode(encoding['input_ids'][17], spaces_between_special_tokens=False), 'Iran')
self.assertEqual(tokenizer.decode(encoding['input_ids'][19:25], spaces_between_special_tokens=False), '')
self.assertEqual(tokenizer.decode(encoding['input_ids'][26], spaces_between_special_tokens=False), 'Afghanistan')
self.assertEqual(encoding['entity_ids'], [tokenizer.entity_vocab['en:ISO 639-3'], tokenizer.entity_vocab['[UNK]'], tokenizer.entity_vocab['ja:'], tokenizer.entity_vocab['en:Afghanistan']])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[1, 2, 3, 4, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [17, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [19, 20, 21, 22, 23, 24, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [26, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_single_text_only_entity_spans_no_padding_or_truncation(self):
tokenizer = self.tokenizer
sentence = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and (Afghanistan).'
entities = ['en:ISO 639-3', 'DUMMY_ENTITY', 'ja:', 'en:Afghanistan']
spans = [(0, 9), (59, 63), (68, 75), (77, 88)]
encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s> ISO 639-3 uses the code fas for the dialects spoken across Iran and ( Afghanistan ).</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][1:5], spaces_between_special_tokens=False), 'ISO 639-3')
self.assertEqual(tokenizer.decode(encoding['input_ids'][17], spaces_between_special_tokens=False), 'Iran')
self.assertEqual(tokenizer.decode(encoding['input_ids'][20:25], spaces_between_special_tokens=False), '')
self.assertEqual(tokenizer.decode(encoding['input_ids'][26], spaces_between_special_tokens=False), 'Afghanistan')
self.assertEqual(encoding['entity_ids'], [tokenizer.entity_vocab['en:ISO 639-3'], tokenizer.entity_vocab['[UNK]'], tokenizer.entity_vocab['ja:'], tokenizer.entity_vocab['en:Afghanistan']])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[1, 2, 3, 4, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [17, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [19, 20, 21, 22, 23, 24, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [26, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_single_text_padding_pytorch_tensors(self):
tokenizer = self.tokenizer
sentence = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and (Afghanistan).'
entities = ['en:ISO 639-3', 'DUMMY_ENTITY', 'ja:', 'en:Afghanistan']
spans = [(0, 9), (59, 63), (68, 75), (77, 88)]
encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True, padding='max_length', max_length=30, max_entity_length=16, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 16))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_position_ids'].shape, (1, 16, tokenizer.max_mention_length))
def test_text_pair_no_padding_or_truncation(self):
tokenizer = self.tokenizer
sentence = 'ISO 639-3 uses the code fas'
sentence_pair = 'for the dialects spoken across Iran and (Afghanistan).'
entities = ['en:ISO 639-3']
entities_pair = ['DUMMY_ENTITY', 'ja:', 'en:Afghanistan']
spans = [(0, 9)]
spans_pair = [(31, 35), (40, 47), (49, 60)]
encoding = tokenizer(sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s> ISO 639-3 uses the code fas</s></s> for the dialects spoken across Iran and ( Afghanistan ).</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][1:5], spaces_between_special_tokens=False), 'ISO 639-3')
self.assertEqual(tokenizer.decode(encoding['input_ids'][19], spaces_between_special_tokens=False), 'Iran')
self.assertEqual(tokenizer.decode(encoding['input_ids'][21:27], spaces_between_special_tokens=False), '')
self.assertEqual(tokenizer.decode(encoding['input_ids'][28], spaces_between_special_tokens=False), 'Afghanistan')
self.assertEqual(encoding['entity_ids'], [tokenizer.entity_vocab['en:ISO 639-3'], tokenizer.entity_vocab['[UNK]'], tokenizer.entity_vocab['ja:'], tokenizer.entity_vocab['en:Afghanistan']])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[1, 2, 3, 4, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [19, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [21, 22, 23, 24, 25, 26, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [28, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_text_pair_only_entity_spans_no_padding_or_truncation(self):
tokenizer = self.tokenizer
sentence = 'ISO 639-3 uses the code fas'
sentence_pair = 'for the dialects spoken across Iran and (Afghanistan).'
entities = ['en:ISO 639-3']
entities_pair = ['DUMMY_ENTITY', 'ja:', 'en:Afghanistan']
spans = [(0, 9)]
spans_pair = [(31, 35), (40, 47), (49, 60)]
encoding = tokenizer(sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s> ISO 639-3 uses the code fas</s></s> for the dialects spoken across Iran and ( Afghanistan ).</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][1:5], spaces_between_special_tokens=False), 'ISO 639-3')
self.assertEqual(tokenizer.decode(encoding['input_ids'][19], spaces_between_special_tokens=False), 'Iran')
self.assertEqual(tokenizer.decode(encoding['input_ids'][21:27], spaces_between_special_tokens=False), '')
self.assertEqual(tokenizer.decode(encoding['input_ids'][28], spaces_between_special_tokens=False), 'Afghanistan')
self.assertEqual(encoding['entity_ids'], [tokenizer.entity_vocab['en:ISO 639-3'], tokenizer.entity_vocab['[UNK]'], tokenizer.entity_vocab['ja:'], tokenizer.entity_vocab['en:Afghanistan']])
self.assertEqual(encoding['entity_position_ids'], [[1, 2, 3, 4, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [19, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [21, 22, 23, 24, 25, 26, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [28, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_text_pair_padding_pytorch_tensors(self):
tokenizer = self.tokenizer
sentence = 'ISO 639-3 uses the code fas'
sentence_pair = 'for the dialects spoken across Iran and (Afghanistan).'
entities = ['en:ISO 639-3']
entities_pair = ['DUMMY_ENTITY', 'ja:', 'en:Afghanistan']
spans = [(0, 9)]
spans_pair = [(31, 35), (40, 47), (49, 60)]
encoding = tokenizer(sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, padding='max_length', max_length=40, max_entity_length=16, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 40))
self.assertEqual(encoding['attention_mask'].shape, (1, 40))
self.assertEqual(encoding['token_type_ids'].shape, (1, 40))
self.assertEqual(encoding['entity_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 16))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_position_ids'].shape, (1, 16, tokenizer.max_mention_length))
def test_entity_classification_no_padding_or_truncation(self):
tokenizer = self.entity_classification_tokenizer
sentence = 'Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.'
span = (15, 34)
encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True)
self.assertEqual(len(encoding['input_ids']), 23)
self.assertEqual(len(encoding['attention_mask']), 23)
self.assertEqual(len(encoding['token_type_ids']), 23)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s> Japanese is an<ent>East Asian language<ent>spoken by about 128 million people, primarily in Japan.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][4:9], spaces_between_special_tokens=False), '<ent>East Asian language<ent>')
mask_id = tokenizer.entity_vocab['[MASK]']
self.assertEqual(encoding['entity_ids'], [mask_id])
self.assertEqual(encoding['entity_attention_mask'], [1])
self.assertEqual(encoding['entity_token_type_ids'], [0])
self.assertEqual(encoding['entity_position_ids'], [[4, 5, 6, 7, 8, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_entity_classification_padding_pytorch_tensors(self):
tokenizer = self.entity_classification_tokenizer
sentence = 'Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.'
span = (15, 34)
encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True, padding='max_length', return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 512))
self.assertEqual(encoding['attention_mask'].shape, (1, 512))
self.assertEqual(encoding['token_type_ids'].shape, (1, 512))
self.assertEqual(encoding['entity_ids'].shape, (1, 1))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 1))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 1))
self.assertEqual(encoding['entity_position_ids'].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length))
def test_entity_pair_classification_no_padding_or_truncation(self):
tokenizer = self.entity_pair_tokenizer
sentence = 'Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.'
spans = [(0, 8), (84, 89)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s><ent>Japanese<ent>is an East Asian language spoken by about 128 million people, primarily in<ent2>Japan<ent2>.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][1:4], spaces_between_special_tokens=False), '<ent>Japanese<ent>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][20:23], spaces_between_special_tokens=False), '<ent2>Japan<ent2>')
mask_id = tokenizer.entity_vocab['[MASK]']
mask2_id = tokenizer.entity_vocab['[MASK2]']
self.assertEqual(encoding['entity_ids'], [mask_id, mask2_id])
self.assertEqual(encoding['entity_attention_mask'], [1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0])
self.assertEqual(encoding['entity_position_ids'], [[1, 2, 3, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [20, 21, 22, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_entity_pair_classification_padding_pytorch_tensors(self):
tokenizer = self.entity_pair_tokenizer
sentence = 'Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.'
spans = [(0, 8), (84, 89)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True, padding='max_length', max_length=30, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 2))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 2))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 2))
self.assertEqual(encoding['entity_position_ids'].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length))
def test_entity_span_classification_no_padding_or_truncation(self):
tokenizer = self.entity_span_tokenizer
sentence = 'Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.'
spans = [(0, 8), (15, 34), (84, 89)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s> Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.</s>')
mask_id = tokenizer.entity_vocab['[MASK]']
self.assertEqual(encoding['entity_ids'], [mask_id, mask_id, mask_id])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[1, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [4, 5, 6, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [18, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
self.assertEqual(encoding['entity_start_positions'], [1, 4, 18])
self.assertEqual(encoding['entity_end_positions'], [1, 6, 18])
def test_entity_span_classification_padding_pytorch_tensors(self):
tokenizer = self.entity_span_tokenizer
sentence = 'Japanese is an East Asian language spoken by about 128 million people, primarily in Japan.'
spans = [(0, 8), (15, 34), (84, 89)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True, padding='max_length', max_length=30, max_entity_length=16, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 16))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_position_ids'].shape, (1, 16, tokenizer.max_mention_length))
self.assertEqual(encoding['entity_start_positions'].shape, (1, 16))
self.assertEqual(encoding['entity_end_positions'].shape, (1, 16)) |
def _process_image_files_batch(coder, thread_index, ranges, name, filenames, texts, labels, num_shards):
num_threads = len(ranges)
assert (not (num_shards % num_threads))
num_shards_per_batch = int((num_shards / num_threads))
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], (num_shards_per_batch + 1)).astype(int)
num_files_in_thread = (ranges[thread_index][1] - ranges[thread_index][0])
counter = 0
for s in xrange(num_shards_per_batch):
shard = ((thread_index * num_shards_per_batch) + s)
output_filename = ('%s-%.5d-of-%.5d' % (name, shard, num_shards))
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[(s + 1)], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
(image_buffer, height, width) = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label, text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if (not (counter % 1000)):
print(('%s [thread %d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)))
sys.stdout.flush()
print(('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file)))
sys.stdout.flush()
shard_counter = 0
print(('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread)))
sys.stdout.flush() |
def load_soba_json(json_file, image_root, dataset_name=None):
from pysobatools.soba import SOBA
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
soba_api = SOBA(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
id_map = None
if (dataset_name is not None):
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(soba_api.getCatIds())
association_ids = soba_api.getAssoIds()
cats = soba_api.loadCats(cat_ids)
association = soba_api.loadAsso(association_ids)
thing_classes = [c['name'] for c in sorted(cats, key=(lambda x: x['id']))]
association_classes = [c['name'] for c in sorted(association, key=(lambda x: x['id']))]
meta.association_classes = association_classes
meta.thing_classes = thing_classes
meta.keypoint_names = ['Object', 'Shadow']
meta.keypoint_flip_map = {'Object': 'Shadow'}
meta.keypoint_connection_rules = [('Object', 'Shadow', (255, 255, 255))]
if (not ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids)))):
if ('soba' not in dataset_name):
logger.warning("\nCategory ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n")
id_map = {v: i for (i, v) in enumerate(cat_ids)}
association_id_map = {v: i for (i, v) in enumerate(association_ids)}
meta.association_dataset_id_to_contiguous_id = association_id_map
meta.thing_dataset_id_to_contiguous_id = id_map
img_ids = sorted(list(soba_api.imgs.keys()))
imgs = soba_api.loadImgs(img_ids)
anns = [soba_api.imgToAnns[img_id] for img_id in img_ids]
assoAnns = [soba_api.imgToAssoAnns[img_id] for img_id in img_ids]
if ('minival' not in json_file):
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
asso_ann_ids = [assoAnn['id'] for anns_per_image in assoAnns for assoAnn in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique!".format(json_file)
imgs_anns = list(zip(imgs, anns))
imgs_asso_anns = list(zip(imgs, assoAnns))
logger.info('Loaded {} images in SOBA format from {}'.format(len(imgs_anns), json_file))
dataset_dicts = []
DENSEPOSE_KEYS = ['dp_x', 'dp_y', 'dp_I', 'dp_U', 'dp_V', 'dp_masks']
num_instances_without_valid_segmentation = 0
for ((img_dict, anno_dict_list), (_, asso_anno_dict_list)) in zip(imgs_anns, imgs_asso_anns):
record = {}
record['file_name'] = os.path.join(image_root, img_dict['file_name'])
record['height'] = img_dict['height']
record['width'] = img_dict['width']
image_id = record['image_id'] = img_dict['id']
objs = []
for anno in anno_dict_list:
assert (anno['image_id'] == image_id)
assert (anno.get('ignore', 0) == 0)
obj = {field: anno[field] for field in (['iscrowd', 'bbox', 'keypoints', 'category_id', 'association', 'light', 'relation'] + DENSEPOSE_KEYS) if (field in anno)}
segm = anno.get('segmentation', None)
if segm:
if (not isinstance(segm, dict)):
segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(segm) == 0):
num_instances_without_valid_segmentation += 1
continue
obj['segmentation'] = segm
keypts = anno.get('keypoints', None)
if keypts:
for (idx, v) in enumerate(keypts):
if ((idx % 3) != 2):
keypts[idx] = (v + 0.5)
obj['keypoints'] = keypts
obj['bbox_mode'] = BoxMode.XYWH_ABS
if id_map:
obj['category_id'] = id_map[obj['category_id']]
objs.append(obj)
record['annotations'] = objs
objs = []
for anno in asso_anno_dict_list:
assert (anno['image_id'] == image_id)
assert (anno.get('ignore', 0) == 0)
obj = {field: anno[field] for field in (['iscrowd', 'bbox', 'light', 'keypoints', 'category_id'] + DENSEPOSE_KEYS) if (field in anno)}
segm = anno.get('segmentation', None)
if segm:
if (not isinstance(segm, dict)):
segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(segm) == 0):
num_instances_without_valid_segmentation += 1
continue
obj['segmentation'] = segm
keypts = anno.get('keypoints', None)
if keypts:
for (idx, v) in enumerate(keypts):
if ((idx % 3) != 2):
keypts[idx] = (v + 0.5)
obj['keypoints'] = keypts
obj['bbox_mode'] = BoxMode.XYWH_ABS
if id_map:
obj['category_id'] = id_map[obj['category_id']]
objs.append(obj)
record['association_anno'] = objs
dataset_dicts.append(record)
if (num_instances_without_valid_segmentation > 0):
logger.warn('Filtered out {} instances without valid segmentation. There might be issues in your dataset generation process.'.format(num_instances_without_valid_segmentation))
return dataset_dicts |
_utils.test()
def test_listcomp():
def identity(dt, n: ti.template()):
return ti.Matrix([[ti.cast(int((i == j)), dt) for j in range(n)] for i in range(n)])
def foo(n: ti.template()) -> ti.i32:
a = identity(ti.i32, n)
b = [j for i in a for j in i]
ret = 0
for i in ti.static(range(n)):
for j in ti.static(range(n)):
ret += ((i * j) * b[((i * n) + j)])
return ret
assert (foo(5) == (((1 + 4) + 9) + 16)) |
def draw(G, c, x, ax, draw_edge=True, font_size=0, pos=None, cmap=None, max_group_num=None, draw_nodes_kwd={}, draw_edges_kwd={'edge_color': '#adadad'}, draw_labels_kwd={}, layout_algorithm=None):
(colored_nodes, muted_nodes, residuals) = classify_nodes(G, c, x, max_group_num)
(node_colors, node_edge_colors) = set_node_colors(c, x, cmap, colored_nodes)
if (pos is None):
pos = calc_node_pos(G, layout_algorithm)
nodes = nx.draw_networkx_nodes(G, pos, node_color=[node_colors[d] for d in colored_nodes], nodelist=colored_nodes, ax=ax, **draw_nodes_kwd)
if (nodes is not None):
nodes.set_zorder(3)
nodes.set_edgecolor([node_edge_colors[r] for r in colored_nodes])
draw_nodes_kwd_residual = draw_nodes_kwd.copy()
draw_nodes_kwd_residual['node_size'] = (0.1 * draw_nodes_kwd.get('node_size', 100))
nodes = nx.draw_networkx_nodes(G, pos, node_color='#efefef', nodelist=residuals, node_shape='s', ax=ax, **draw_nodes_kwd_residual)
if (nodes is not None):
nodes.set_zorder(1)
nodes.set_edgecolor('#4d4d4d')
if draw_edge:
nx.draw_networkx_edges(G.subgraph((colored_nodes + residuals)), pos, ax=ax, **draw_edges_kwd)
if (font_size > 0):
nx.draw_networkx_labels(G, pos, ax=ax, font_size=font_size, **draw_labels_kwd)
ax.axis('off')
return (ax, pos) |
class OldNSZ(BaseSMTEncoder):
def _float_binary_operator(self, term, op):
x = self.eval(term.x)
y = self.eval(term.y)
if ('nnan' in term.flags):
self.add_defs(z3.Not(z3.fpIsNaN(x)), z3.Not(z3.fpIsNaN(y)), z3.Not(z3.fpIsNaN(op(x, y))))
if ('ninf' in term.flags):
self.add_defs(z3.Not(z3.fpIsInf(x)), z3.Not(z3.fpIsInf(y)), z3.Not(z3.fpIsInf(op(x, y))))
if ('nsz' in term.flags):
nz = z3.fpMinusZero(_ty_sort(self.type(term)))
self.add_defs(z3.Not((x == nz)), z3.Not((y == nz)))
return op(x, y)
return op(x, y) |
def visualize_ner_str(text, pipe, select=None, colors=None):
doc = pipe(text)
visualize_ner_doc(doc, pipe.lang, select, colors) |
def callback_image(data):
try:
cv_image = cv_bridge.imgmsg_to_cv2(data, 'bgr8')
except CvBridgeError as e:
rospy.logerr(('[tf-pose-estimation] Converting Image Error. ' + str(e)))
return
acquired = tf_lock.acquire(False)
if (not acquired):
return
try:
humans = pose_estimator.inference(cv_image, resize_to_default=True, upsample_size=resize_out_ratio)
finally:
tf_lock.release()
msg = humans_to_msg(humans)
msg.image_w = data.width
msg.image_h = data.height
msg.header = data.header
pub_pose.publish(msg) |
def validate_kr_rrn(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(rrn.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(rrn.is_valid)
else:
return df.applymap(rrn.is_valid)
return rrn.is_valid(df) |
def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False):
def _create_dummy_data(filename):
data = torch.rand((num_examples * maxlen))
data = (97 + torch.floor((26 * data)).int())
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:(offset + ex_len)]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), 'r') as src_f, open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, open(os.path.join(data_dir, filename), 'w') as h:
for (src, tgt) in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = ((src_len + tgt_len) // 2)
num_alignments = random.randint((avg_len // 2), (2 * avg_len))
src_indices = torch.floor((torch.rand(num_alignments) * src_len)).int()
tgt_indices = torch.floor((torch.rand(num_alignments) * tgt_len)).int()
ex_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in zip(src_indices, tgt_indices)])
print(ex_str, file=h)
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
if alignment:
_create_dummy_alignment_data('train.in', 'train.out', 'train.align')
_create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align')
_create_dummy_alignment_data('test.in', 'test.out', 'test.align') |
class SGD(torch.optim.Optimizer):
def __init__(self, params, lr=0.1, momentum=0, dampening=0, weight_decay=0, nesterov=False):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad.data
param_state = self.state[p]
siz = p.grad.size()
if (len(siz) > 3):
if (siz[2] == 3):
weight_decay = config_task.decay3x3[config_task.task]
elif (siz[2] == 1):
weight_decay = config_task.decay1x1[config_task.task]
if (weight_decay != 0):
d_p.add_(weight_decay, p.data)
if (momentum != 0):
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = d_p.clone()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_((1 - dampening), d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_((- group['lr']), d_p)
return loss |
def sample_procedural_objects(task_base, num_samples, mass=0.1):
assets_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../assets/procedural_objects')
samples = np.random.choice(os.listdir(assets_dir), num_samples, replace=False)
created = []
for s in samples:
respondable = os.path.join(assets_dir, s, (s + '_coll.obj'))
visual = os.path.join(assets_dir, s, (s + '.obj'))
resp = Shape.import_mesh(respondable, scaling_factor=0.005)
vis = Shape.import_mesh(visual, scaling_factor=0.005)
resp.set_renderable(False)
vis.set_renderable(True)
vis.set_parent(resp)
vis.set_dynamic(False)
vis.set_respondable(False)
resp.set_dynamic(True)
resp.set_mass(mass)
resp.set_respondable(True)
resp.set_model(True)
resp.set_parent(task_base)
created.append(resp)
return created |
def generate_data(data_dir='heterogeneous_example_data', verbose=True):
if (data_dir[(- 1)] != '/'):
data_dir += '/'
if (not os.path.exists(data_dir)):
os.makedirs(data_dir)
data_filename = (data_dir + 'heterogeneous_example_data.json')
sample_sizes = [100, 200, 500, 1000, 2000]
illicit_exits = np.arange(0.28, (0.36 + 1e-06), 0.04)
nonmedical_incidence_deltas = np.arange((- 0.12), ((- 0.1) + 1e-06), 0.01)
try:
data = json.load(open(data_filename))
print('DATA FILE FOUND')
except:
print('DATA FILE NOT FOUND')
data = {}
for n in sample_sizes:
data[str(n)] = data.get(str(n), {})
for illicit_exit in illicit_exits:
key = '{:.2f}'.format(illicit_exit)
data[str(n)][key] = data.get(key, {})
for delta in nonmedical_incidence_deltas:
key_ = '{:.2f}'.format(delta)
data[str(n)][key][key_] = data[str(n)][key].get(key_, {})
for n in sample_sizes:
for (i, illicit_exit) in enumerate(illicit_exits):
for (j, delta) in enumerate(nonmedical_incidence_deltas):
one_run_data = data[str(n)]['{:.2f}'.format(illicit_exit)]['{:.2f}'.format(delta)]
if ('true_effects' in one_run_data):
continue
if verbose:
sim_number = (((i * len(illicit_exits)) + j) + 1)
total_num_sims = (len(illicit_exits) * len(nonmedical_incidence_deltas))
if (sim_number == 1):
print('\nSIMULATING: SAMPLE SIZE {}'.format(n))
print(' Running simulation {}/{}'.format(sim_number, total_num_sims))
experiment = DynamicsExperiment(name='opioid_rct', description='Randomized experiment reducing nonmedical incidence of opioid use in 2015.', simulator=opioid, simulator_config=opioid.Config(illicit_exit=illicit_exit), intervention=opioid_intervention, state_sampler=sample_initial_states, propensity_scorer=0.5, outcome_extractor=overdose_deaths, covariate_builder=(lambda run: run.initial_state.values()))
d = experiment.run(num_samples=n, nonmedical_incidence_delta=delta)
one_run_data['covariates'] = tuple((tuple(x) for x in d.covariates.astype(float)))
one_run_data['treatments'] = tuple(d.treatments.astype(float))
one_run_data['outcomes'] = tuple(d.outcomes.astype(float))
one_run_data['true_effects'] = tuple(d.true_effects.astype(float))
json.dump(data, open(data_filename, 'w'))
for n in sample_sizes:
for (i, illicit_exit) in enumerate(illicit_exits):
for (j, delta) in enumerate(nonmedical_incidence_deltas):
one_run_data = data[str(n)]['{:.2f}'.format(illicit_exit)]['{:.2f}'.format(delta)]
if ('estimated_effects' in one_run_data):
continue
if verbose:
sim_number = (((i * len(illicit_exits)) + j) + 1)
total_num_sims = (len(illicit_exits) * len(nonmedical_incidence_deltas))
if (sim_number == 1):
print('\nESTIMATING: SAMPLE SIZE {}'.format(n))
print(' Running estimation {}/{}'.format(sim_number, total_num_sims))
estimate = causal_forest.estimate_treatment_effect(np.array(one_run_data['covariates']), np.array(one_run_data['treatments']), np.array(one_run_data['outcomes']))
one_run_data['estimated_effects'] = tuple(estimate.individual_effects)
json.dump(data, open(data_filename, 'w')) |
def getsourcelines(obj):
with _InspectContextManager():
return inspect.getsourcelines(obj) |
def register_Ns3ExtendedSupportedRatesIE_methods(root_module, cls):
cls.add_constructor([param('ns3::ExtendedSupportedRatesIE const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::SupportedRates *', 'rates')])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint16_t', [], is_const=True)
cls.add_method('Serialize', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'start')], is_const=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetSupportedRates', 'void', [param('ns3::SupportedRates *', 'rates')])
return |
class Block35(nn.Module):
def __init__(self, scale=1.0):
super(Block35, self).__init__()
self.scale = scale
self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(BasicConv2d(320, 32, kernel_size=1, stride=1), BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv2d(320, 32, kernel_size=1, stride=1), BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1), BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1))
self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
out = self.conv2d(out)
out = ((out * self.scale) + x)
out = self.relu(out)
return out |
def universal_sentence_embedding(sentences, mask, sqrt=True):
sentence_sums = torch.bmm(sentences.permute(0, 2, 1), mask.float().unsqueeze((- 1))).squeeze((- 1))
divisor = mask.sum(dim=1).view((- 1), 1).float()
if sqrt:
divisor = divisor.sqrt()
sentence_sums /= divisor
return sentence_sums |
def register_Ns3RipRte_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::RipRte const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetNextHop', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetPrefix', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetRouteMetric', 'uint32_t', [], is_const=True)
cls.add_method('GetRouteTag', 'uint16_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSubnetMask', 'ns3::Ipv4Mask', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetNextHop', 'void', [param('ns3::Ipv4Address', 'nextHop')])
cls.add_method('SetPrefix', 'void', [param('ns3::Ipv4Address', 'prefix')])
cls.add_method('SetRouteMetric', 'void', [param('uint32_t', 'routeMetric')])
cls.add_method('SetRouteTag', 'void', [param('uint16_t', 'routeTag')])
cls.add_method('SetSubnetMask', 'void', [param('ns3::Ipv4Mask', 'subnetMask')])
return |
def register_Ns3DsrDsrOptionSR_methods(root_module, cls):
cls.add_constructor([param('ns3::dsr::DsrOptionSR const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetOptionNumber', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Process', 'uint8_t', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ptr< ns3::Packet >', 'dsrP'), param('ns3::Ipv4Address', 'ipv4Address'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Header const &', 'ipv4Header'), param('uint8_t', 'protocol'), param('bool &', 'isPromisc'), param('ns3::Ipv4Address', 'promiscSource')], is_virtual=True)
cls.add_static_attribute('OPT_NUMBER', 'uint8_t const', is_const=True)
return |
def get_class_weight_from_file(n_class, weight_filename=None, add_bg_loss=False):
weight = torch.ones(n_class)
if weight_filename:
import pandas as pd
loss_df = pd.read_csv(weight_filename)
loss_df.sort_values('class_id', inplace=True)
weight *= torch.FloatTensor(loss_df.weight.values)
if (not add_bg_loss):
weight[(n_class - 1)] = 0
return weight |
def _check_special_BC_cases(dg, n, check_letter_list, check_twist_list, hope_letter_list, conn_vert_list=False):
if (not dg.is_connected()):
return 'unknown'
if conn_vert_list:
mut_type = _connected_mutation_type_AAtildeD(dg, ret_conn_vert=True)
if (not (mut_type == 'unknown')):
(mut_type, conn_verts) = mut_type
else:
mut_type = _connected_mutation_type_AAtildeD(dg, ret_conn_vert=False)
conn_verts = []
if (not (mut_type == 'unknown')):
for i in range(len(check_letter_list)):
check_letter = check_letter_list[i]
check_twist = check_twist_list[i]
hope_letter = hope_letter_list[i]
if conn_vert_list:
conn_vert = set(conn_vert_list[i])
else:
conn_vert = set()
if ((hope_letter == 'D') and (mut_type._letter == 'A') and (mut_type._rank == 3) and (not mut_type._twist)):
hope_letter = 'A'
if conn_vert_list:
conn_verts = list(set(dg).difference(conn_verts))
if ((mut_type._letter == hope_letter) and (not mut_type._twist) and conn_vert.issubset(conn_verts)):
if (len(check_letter) > 1):
check_twist = 1
if check_twist:
n -= 1
return QuiverMutationType([check_letter, n, check_twist])
return 'unknown' |
def test_brent_underflow_in_root_bracketing():
underflow_scenario = ((- 450.0), (- 350.0), (- 400.0))
overflow_scenario = (350.0, 450.0, 400.0)
for (a, b, root) in [underflow_scenario, overflow_scenario]:
c = np.exp(root)
for method in [zeros.brenth, zeros.brentq]:
res = method((lambda x: (np.exp(x) - c)), a, b)
assert_allclose(root, res) |
def run():
logger = config.get_logger('train')
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
if (config['visualizer']['type'] != ''):
visualizer = config.initialize(name='visualizer', module=module_vis, exp_name=config['name'], web_dir=config._web_log_dir)
else:
visualizer = None
torch.cuda.set_device(args.local_rank)
if (args.master_address != 9339):
print('DistributedDataParallel')
torch.distributed.init_process_group(backend='nccl', init_method='tcp://{}:{}'.format(args.master_address, args.master_port), rank=args.rank, world_size=args.world_size)
device = torch.device(f'cuda:{args.local_rank}')
if (args.rank == 0):
print('world_size', args.world_size, flush=True)
print('local_rank: ', args.local_rank, flush=True)
tokenizer = transformers.AutoTokenizer.from_pretrained(config['arch']['args']['text_params']['model'], TOKENIZERS_PARALLELISM=False)
(data_loader, valid_data_loader) = init_dataloaders(config, module_data)
if (args.rank == 0):
print('Train dataset: ', [x.n_samples for x in data_loader], ' samples')
print('Val dataset: ', [x.n_samples for x in valid_data_loader], ' samples')
model = config.initialize('arch', module_arch)
if (args.local_rank == 0):
logger.info(model)
loss = config.initialize(name='loss', module=module_loss)
metrics = [getattr(module_metric, met) for met in config['metrics']]
trainable_params = filter((lambda p: p.requires_grad), model.parameters())
optimizer = config.initialize('optimizer', transformers, trainable_params)
lr_scheduler = None
if ('lr_scheduler' in config._config):
if hasattr(transformers, config._config['lr_scheduler']['type']):
lr_scheduler = config.initialize('lr_scheduler', transformers, optimizer)
else:
print('lr scheduler not found')
if config['trainer']['neptune']:
writer = ex
else:
writer = None
if (args.rank == 0):
writer = SummaryWriter(log_dir=str(config.tf_dir))
trainer = Multi_Trainer_dist_MIR(args, model, loss, metrics, optimizer, config=config, data_loader=data_loader, valid_data_loader=valid_data_loader, lr_scheduler=lr_scheduler, visualizer=visualizer, writer=writer, tokenizer=tokenizer, max_samples_per_epoch=config['trainer']['max_samples_per_epoch'])
trainer.train() |
class ToTensor(object):
def __call__(self, img, gt):
return (F.to_tensor(img), F.to_tensor(gt)) |
def test_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = (Poly1.domain + (random((2,)) * 0.25))
w1 = (Poly1.window + (random((2,)) * 0.25))
p1 = Poly1(coef, domain=d1, window=w1)
d2 = (Poly2.domain + (random((2,)) * 0.25))
w2 = (Poly2.window + (random((2,)) * 0.25))
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x)) |
def mp_fit(epochs: int, learn: Learner, callbacks: Optional[CallbackList]=None, metrics: OptMetrics=None) -> None:
assert (len(learn.data.train_dl) != 0), f'''Your training dataloader is empty, can't train a model.
Use a smaller batch size (batch size={learn.data.train_dl.batch_size} for {len(learn.data.train_dl.dataset)} elements).'''
cb_handler = CallbackHandler(callbacks, metrics)
pbar = master_bar(range(epochs))
cb_handler.on_train_begin(epochs, pbar=pbar, metrics=metrics)
exception = False
try:
for epoch in pbar:
learn.model.train()
cb_handler.set_dl(learn.data.train_dl)
cb_handler.on_epoch_begin()
for (xb, yb) in progress_bar(learn.data.train_dl, parent=pbar):
(xb, yb) = cb_handler.on_batch_begin(xb, yb)
loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
if cb_handler.on_batch_end(loss):
break
if ((not cb_handler.skip_validate) and (not learn.data.empty_val)):
val_loss = validate(learn.model, learn.data.valid_dl, loss_func=learn.loss_func, cb_handler=cb_handler, pbar=pbar)
else:
val_loss = None
if cb_handler.on_epoch_end(val_loss):
break
except Exception as e:
exception = e
raise
finally:
cb_handler.on_train_end(exception) |
class ThroughputTable(PerformanceTable):
def __init__(self, percentiles, unit='tok/s', reverse_percentiles=True):
super().__init__(percentiles, unit, reverse_percentiles)
self.unit_convert = {'tok/s': 1} |
class _netD(nn.Module):
def __init__(self, ngpu):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf, (ndf * 2), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 2), (ndf * 4), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 4), (ndf * 8), 4, 2, 1, bias=False), nn.BatchNorm2d((ndf * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((ndf * 8), 1, 4, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
if ((self.ngpu > 1) and isinstance(input.data, torch.cuda.FloatTensor)):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view((- 1), 1) |
def test():
array = ak.highlevel.Array([[[0.0, 1.1, 2.2], []], [[3.3, 4.4]], [], [[5.5], [], [6.6, 7.7, 8.8, 9.9]]])
assert (to_list(ak.operations.local_index(array, axis=0)) == [0, 1, 2, 3])
assert (to_list(ak.operations.local_index(array, axis=1)) == [[0, 1], [0], [], [0, 1, 2]])
assert (to_list(ak.operations.local_index(array, axis=2)) == [[[0, 1, 2], []], [[0, 1]], [], [[0], [], [0, 1, 2, 3]]])
assert (to_list(ak.operations.local_index(array, axis=(- 1))) == [[[0, 1, 2], []], [[0, 1]], [], [[0], [], [0, 1, 2, 3]]])
assert (to_list(ak.operations.local_index(array, axis=(- 2))) == [[0, 1], [0], [], [0, 1, 2]])
assert (to_list(ak.operations.local_index(array, axis=(- 3))) == [0, 1, 2, 3])
assert (to_list(ak.operations.zip([ak.operations.local_index(array, axis=0), ak.operations.local_index(array, axis=1), ak.operations.local_index(array, axis=2)])) == [[[(0, 0, 0), (0, 0, 1), (0, 0, 2)], []], [[(1, 0, 0), (1, 0, 1)]], [], [[(3, 0, 0)], [], [(3, 2, 0), (3, 2, 1), (3, 2, 2), (3, 2, 3)]]]) |
def _apply_fc_weight_for_sum_match(model, input, dim_in, dim_out, scope, name):
output = brew.fc(model, input, s(scope, name), dim_in=dim_in, dim_out=dim_out, axis=2)
output = model.net.Squeeze(output, output, dims=[0])
return output |
def run_ete(paths, dataset, short_name, command_args, extra_args):
(short_language, package) = short_name.split('_', 1)
tokenize_dir = paths['TOKENIZE_DATA_DIR']
mwt_dir = paths['MWT_DATA_DIR']
lemma_dir = paths['LEMMA_DATA_DIR']
ete_dir = paths['ETE_DATA_DIR']
wordvec_dir = paths['WORDVEC_DIR']
if (command_args and command_args.test_data):
test_short_name = treebank_to_short_name(command_args.test_data)
else:
test_short_name = short_name
tokenizer_type = '--txt_file'
tokenizer_file = f'{tokenize_dir}/{test_short_name}.{dataset}.txt'
tokenizer_output = f'{ete_dir}/{short_name}.{dataset}.tokenizer.conllu'
tokenizer_args = ['--mode', 'predict', tokenizer_type, tokenizer_file, '--lang', short_language, '--conll_file', tokenizer_output, '--shorthand', short_name]
tokenizer_args = (tokenizer_args + extra_args)
logger.info('----- TOKENIZER ')
logger.info('Running tokenizer step with args: {}'.format(tokenizer_args))
tokenizer.main(tokenizer_args)
mwt_train_file = f'{mwt_dir}/{short_name}.train.in.conllu'
logger.info('----- MWT ')
if check_mwt(mwt_train_file):
mwt_output = f'{ete_dir}/{short_name}.{dataset}.mwt.conllu'
mwt_args = ['--eval_file', tokenizer_output, '--output_file', mwt_output, '--lang', short_language, '--shorthand', short_name, '--mode', 'predict']
mwt_args = (mwt_args + extra_args)
logger.info('Running mwt step with args: {}'.format(mwt_args))
mwt_expander.main(mwt_args)
else:
logger.info('No MWT in training data. Skipping')
mwt_output = tokenizer_output
logger.info('----- POS ')
pos_output = f'{ete_dir}/{short_name}.{dataset}.pos.conllu'
pos_args = ['--wordvec_dir', wordvec_dir, '--eval_file', mwt_output, '--output_file', pos_output, '--lang', short_language, '--shorthand', short_name, '--mode', 'predict', '--no_gold_labels']
pos_charlm_args = build_pos_charlm_args(short_language, package, command_args.charlm)
pos_args = (((pos_args + wordvec_args(short_language, package, extra_args)) + pos_charlm_args) + extra_args)
logger.info('Running pos step with args: {}'.format(pos_args))
tagger.main(pos_args)
logger.info('----- LEMMA ')
lemma_train_file = f'{lemma_dir}/{short_name}.train.in.conllu'
lemma_output = f'{ete_dir}/{short_name}.{dataset}.lemma.conllu'
lemma_args = ['--eval_file', pos_output, '--output_file', lemma_output, '--shorthand', short_name, '--mode', 'predict']
if check_lemmas(lemma_train_file):
lemma_charlm_args = build_lemma_charlm_args(short_language, package, command_args.charlm)
lemma_args = ((lemma_args + lemma_charlm_args) + extra_args)
logger.info('Running lemmatizer step with args: {}'.format(lemma_args))
lemmatizer.main(lemma_args)
else:
lemma_args = (lemma_args + extra_args)
logger.info('No lemmas in training data')
logger.info('Running identity lemmatizer step with args: {}'.format(lemma_args))
identity_lemmatizer.main(lemma_args)
logger.info('----- DEPPARSE ')
depparse_output = f'{ete_dir}/{short_name}.{dataset}.depparse.conllu'
depparse_args = ['--wordvec_dir', wordvec_dir, '--eval_file', lemma_output, '--output_file', depparse_output, '--lang', short_name, '--shorthand', short_name, '--mode', 'predict']
depparse_charlm_args = build_depparse_charlm_args(short_language, package, command_args.charlm)
depparse_args = (((depparse_args + wordvec_args(short_language, package, extra_args)) + depparse_charlm_args) + extra_args)
logger.info('Running depparse step with args: {}'.format(depparse_args))
parser.main(depparse_args)
logger.info('----- EVALUATION ')
gold_file = f'{tokenize_dir}/{test_short_name}.{dataset}.gold.conllu'
ete_file = depparse_output
results = common.run_eval_script(gold_file, ete_file)
logger.info('{} {} models on {} {} data:\n{}'.format(RESULTS_STRING, short_name, test_short_name, dataset, results)) |
def bit_width_of(value):
for i in range(0, 64):
if (value == 0):
return i
value >>= 1 |
def select_primitives(primitive_parse):
assert isinstance(primitive_parse, PrimitiveParse)
if (len(primitive_parse.primitives) == 0):
logging.error('No primitive detected.')
return primitive_parse
pixels_dict = _get_pixels_dict(primitive_parse, params.LINE_EPS, params.CIRCLE_EPS)
selected_primitives = {}
remaining_primitives = primitive_parse.primitives.copy()
reward = 0
while (len(remaining_primitives) > 0):
key = _get_next_primitive_key(selected_primitives, remaining_primitives, pixels_dict)
updated_selected_primitives = selected_primitives.copy()
updated_selected_primitives[key] = remaining_primitives[key]
new_reward = _evaluate_reward(updated_selected_primitives, pixels_dict)
if ((new_reward - reward) > params.PRIMITIVE_SELECTION_MIN_GAIN):
selected_primitives = updated_selected_primitives
del remaining_primitives[key]
reward = new_reward
else:
break
new_primitive_parse = _get_primitive_parse(primitive_parse.image_segment_parse, selected_primitives)
return new_primitive_parse |
def save_checkpoint(state, save, epoch):
if (not os.path.exists(save)):
os.makedirs(save)
filename = os.path.join(save, ('checkpt-%04d.pth' % epoch))
torch.save(state, filename) |
def build_many(target, args, processes=None):
from multiprocessing import Process, Queue, cpu_count, set_start_method
if (os.uname().sysname == 'Darwin'):
set_start_method('fork', force=True)
from queue import Empty
if (processes is None):
processes = cpu_count()
workers = ([None] * processes)
tasks = enumerate(args)
results = []
result_queue = Queue()
def run_worker(target, queue, idx, task):
try:
result = target(task)
except BaseException as exc:
queue.put((None, RemoteExceptionWrapper(exc)))
else:
queue.put((idx, result))
def bring_out_yer_dead(w, task, exitcode):
if ((w is None) or (exitcode is None)):
return (w, task)
if (w._popen.returncode is None):
w._popen.returncode = exitcode
if (exitcode != 0):
raise WorkerDiedException(f'worker for {task[1]} died with non-zero exit code {w.exitcode}')
try:
result = result_queue.get_nowait()
if (result[0] is None):
exception = result[1]
raise WorkerDiedException('', original_exception=exception)
else:
results.append(result)
except Empty:
pass
w.join()
return None
def wait_for_one():
try:
(pid, sts) = os.wait()
except OSError as exc:
if (exc.errno != errno.ECHILD):
raise
else:
return (None, None)
if os.WIFSIGNALED(sts):
exitcode = (- os.WTERMSIG(sts))
else:
exitcode = os.WEXITSTATUS(sts)
return (pid, exitcode)
def reap_workers(waited_pid=None, waited_exitcode=None):
all_done = True
for (idx, w) in enumerate(workers):
if (w is not None):
(w, task) = w
if (w.pid == waited_pid):
exitcode = waited_exitcode
else:
exitcode = w.exitcode
w = bring_out_yer_dead(w, task, exitcode)
if (w is None):
try:
task = next(tasks)
except StopIteration:
pass
else:
w = Process(target=run_worker, args=((target, result_queue) + task))
w.start()
w = (w, task)
workers[idx] = w
if (w is not None):
all_done = False
return all_done
waited_pid = None
waited_exitcode = None
worker_exc = None
try:
while True:
try:
if reap_workers(waited_pid, waited_exitcode):
break
except WorkerDiedException as exc:
worker_exc = exc
break
(waited_pid, waited_exitcode) = wait_for_one()
finally:
try:
remaining_workers = [w for w in workers if (w is not None)]
for (w, _) in remaining_workers:
try:
w.terminate()
except OSError as exc:
if (exc.errno != errno.ESRCH):
raise
for (w, _) in remaining_workers:
w.join()
finally:
if (worker_exc is not None):
if (worker_exc.original_exception is not None):
raise worker_exc.original_exception
else:
raise worker_exc
while True:
try:
results.append(result_queue.get_nowait())
except Empty:
break
return [r[1] for r in sorted(results, key=(lambda r: r[0]))] |
class FileRequired(DataRequired):
def __call__(self, form, field):
if (not (isinstance(field.data, FileStorage) and field.data)):
raise StopValidation((self.message or field.gettext('This field is required.'))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.